All Downloads are FREE. Search and download functionalities are using the official Maven repository.

bt.torrent.data.DefaultDataWorker Maven / Gradle / Ivy

There is a newer version: 1.10
Show newest version
package bt.torrent.data;

import bt.data.ChunkDescriptor;
import bt.data.ChunkVerifier;
import bt.data.DataDescriptor;
import bt.net.Peer;
import bt.service.IRuntimeLifecycleBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;

class DefaultDataWorker implements DataWorker {

    private static final Logger LOGGER = LoggerFactory.getLogger(DefaultDataWorker.class);

    private DataDescriptor data;
    private ChunkVerifier verifier;

    private final ExecutorService executor;
    private final int maxPendingTasks;
    private final AtomicInteger pendingTasksCount;

    public DefaultDataWorker(IRuntimeLifecycleBinder lifecycleBinder,
                             DataDescriptor data,
                             ChunkVerifier verifier,
                             int maxQueueLength) {

        this.data = data;
        this.verifier = verifier;
        this.executor = Executors.newSingleThreadExecutor(new ThreadFactory() {

            private AtomicInteger i = new AtomicInteger();

            @Override
            public Thread newThread(Runnable r) {
                return new Thread(r, "bt.torrent.data.worker-" + i.incrementAndGet());
            }
        });
        this.maxPendingTasks = maxQueueLength;
        this.pendingTasksCount = new AtomicInteger();

        lifecycleBinder.onShutdown("Shutdown data worker for descriptor: " + data, this.executor::shutdownNow);
    }

    @Override
    public CompletableFuture addBlockRequest(Peer peer, int pieceIndex, int offset, int length) {
        if (pendingTasksCount.get() >= maxPendingTasks) {
            LOGGER.warn("Can't accept read block request from peer (" + peer + ") -- queue is full");
            return CompletableFuture.completedFuture(BlockRead.rejected(peer, pieceIndex, offset));
        } else {
            pendingTasksCount.incrementAndGet();
            return CompletableFuture.supplyAsync(() -> {
                try {
                    ChunkDescriptor chunk = data.getChunkDescriptors().get(pieceIndex);
                    byte[] block = chunk.getData().getSubrange(offset, length).getBytes();
                    return BlockRead.complete(peer, pieceIndex, offset, block);
                } catch (Throwable e) {
                    return BlockRead.exceptional(peer, e, pieceIndex, offset);
                } finally {
                    pendingTasksCount.decrementAndGet();
                }
            }, executor);
        }
    }

    @Override
    public CompletableFuture addBlock(Peer peer, int pieceIndex, int offset, byte[] block) {
        if (pendingTasksCount.get() >= maxPendingTasks) {
            LOGGER.warn("Can't accept write block request -- queue is full");
            return CompletableFuture.completedFuture(BlockWrite.rejected(peer, pieceIndex, offset, block));
        } else {
            pendingTasksCount.incrementAndGet();
            return CompletableFuture.supplyAsync(() -> {
                try {
                    if (data.getBitfield().isVerified(pieceIndex)) {
                        if (LOGGER.isTraceEnabled()) {
                            LOGGER.trace("Rejecting request to write block because the chunk is already complete and verified: " +
                                    "piece index {" + pieceIndex + "}, offset {" + offset + "}, length {" + block.length + "}");
                        }
                        return BlockWrite.rejected(peer, pieceIndex, offset, block);
                    }

                    ChunkDescriptor chunk = data.getChunkDescriptors().get(pieceIndex);
                    chunk.getData().getSubrange(offset).putBytes(block);
                    if (LOGGER.isTraceEnabled()) {
                        LOGGER.trace("Successfully processed block: " +
                                "piece index {" + pieceIndex + "}, offset {" + offset + "}, length {" + block.length + "}");
                    }

                    CompletableFuture verificationFuture = null;
                    if (chunk.isComplete()) {
                        verificationFuture = CompletableFuture.supplyAsync(() -> {
                            boolean verified = verifier.verify(chunk);
                            if (verified) {
                                data.getBitfield().markVerified(pieceIndex);
                            }
                            return verified;
                        }, executor);
                    }

                    return BlockWrite.complete(peer, pieceIndex, offset, block, verificationFuture);
                } catch (Throwable e) {
                    return BlockWrite.exceptional(peer, e, pieceIndex, offset, block);
                } finally {
                    pendingTasksCount.decrementAndGet();
                }
            }, executor);
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy