All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.graphhopper.reader.osm.pbf.PbfDecoder Maven / Gradle / Ivy

Go to download

GraphHopper is a fast and memory efficient Java road routing engine working seamlessly with OpenStreetMap data.

There is a newer version: 10.0
Show newest version
// This software is released into the Public Domain.  See copying.txt for details.
package com.graphhopper.reader.osm.pbf;

import com.graphhopper.reader.ReaderElement;
import com.graphhopper.reader.osm.SkipOptions;

import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;

/**
 * Decodes all blocks from a PBF stream using worker threads, and passes the results to the
 * downstream sink.
 * 

* * @author Brett Henderson */ public class PbfDecoder { private final PbfStreamSplitter streamSplitter; private final ExecutorService executorService; private final int maxPendingBlobs; private final Sink sink; private final Lock lock; private final Condition dataWaitCondition; private final Queue blobResults; private final SkipOptions skipOptions; /** * Creates a new instance. *

* * @param streamSplitter The PBF stream splitter providing the source of blobs to be decoded. * @param executorService The executor service managing the thread pool. * @param maxPendingBlobs The maximum number of blobs to have in progress at any point in time. * @param sink The sink to send all decoded entities to. */ public PbfDecoder(PbfStreamSplitter streamSplitter, ExecutorService executorService, int maxPendingBlobs, Sink sink, SkipOptions skipOptions) { this.streamSplitter = streamSplitter; this.executorService = executorService; this.maxPendingBlobs = maxPendingBlobs; this.sink = sink; this.skipOptions = skipOptions; // Create the thread synchronisation primitives. lock = new ReentrantLock(); dataWaitCondition = lock.newCondition(); // Create the queue of blobs being decoded. blobResults = new LinkedList<>(); } /** * Any thread can call this method when they wish to wait until an update has been performed by * another thread. */ private void waitForUpdate() { try { dataWaitCondition.await(); } catch (InterruptedException e) { throw new RuntimeException("Thread was interrupted.", e); } } /** * Any thread can call this method when they wish to signal another thread that an update has * occurred. */ private void signalUpdate() { dataWaitCondition.signal(); } private void sendResultsToSink(int targetQueueSize) { while (blobResults.size() > targetQueueSize) { // Get the next result from the queue and wait for it to complete. PbfBlobResult blobResult = blobResults.remove(); while (!blobResult.isComplete()) { // The thread hasn't finished processing yet so wait for an // update from another thread before checking again. waitForUpdate(); } if (!blobResult.isSuccess()) { throw new RuntimeException("A PBF decoding worker thread failed, aborting.", blobResult.getException()); } // Send the processed entities to the sink. We can release the lock // for the duration of processing to allow worker threads to post // their results. lock.unlock(); try { for (ReaderElement entity : blobResult.getEntities()) { sink.process(entity); } } finally { lock.lock(); } } } private void processBlobs() { // Process until the PBF stream is exhausted. while (streamSplitter.hasNext()) { // Obtain the next raw blob from the PBF stream. PbfRawBlob rawBlob = streamSplitter.next(); // Create the result object to capture the results of the decoded // blob and add it to the blob results queue. final PbfBlobResult blobResult = new PbfBlobResult(); blobResults.add(blobResult); // Create the listener object that will update the blob results // based on an event fired by the blob decoder. PbfBlobDecoderListener decoderListener = new PbfBlobDecoderListener() { @Override public void error(Exception ex) { lock.lock(); try { // System.out.println("ERROR: " + new Date()); blobResult.storeFailureResult(ex); signalUpdate(); } finally { lock.unlock(); } } @Override public void complete(List decodedEntities) { lock.lock(); try { blobResult.storeSuccessResult(decodedEntities); signalUpdate(); } finally { lock.unlock(); } } }; // Create the blob decoder itself and execute it on a worker thread. PbfBlobDecoder blobDecoder = new PbfBlobDecoder(rawBlob.getType(), rawBlob.getData(), decoderListener, skipOptions); executorService.execute(blobDecoder); // If the number of pending blobs has reached capacity we must begin // sending results to the sink. This method will block until blob // decoding is complete. sendResultsToSink(maxPendingBlobs - 1); } // There are no more entities available in the PBF stream, so send all remaining data to the sink. sendResultsToSink(0); } public void run() { lock.lock(); try { processBlobs(); } finally { lock.unlock(); } } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy