bt.data.DefaultDataDescriptor Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of bt-core Show documentation
Show all versions of bt-core Show documentation
BitTorrent Client Library (Core)
The newest version!
/*
* Copyright (c) 2016—2021 Andrei Tomashpolskiy and individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bt.data;
import bt.metainfo.Torrent;
import bt.metainfo.TorrentFile;
import bt.torrent.callbacks.FileDownloadCompleteCallback;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
* Note that this class is not a part of the public API and is a subject to change.
*/
class DefaultDataDescriptor implements DataDescriptor {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultDataDescriptor.class);
private final Torrent torrent;
private final Storage storage;
private final DataReader reader;
private final ChunkVerifier verifier;
private final FileDownloadCompleteCallback fileCompletionCallback;
private List chunkDescriptors;
private LocalBitfield bitfield;
private boolean startedAsSeed;
private List> filesForPieces;
private Collection storageUnits;
public DefaultDataDescriptor(Storage storage,
Torrent torrent,
FileDownloadCompleteCallback fileCompletionCallback,
ChunkVerifier verifier,
DataReaderFactory dataReaderFactory,
int transferBlockSize) {
this.storage = storage;
this.torrent = torrent;
this.fileCompletionCallback = fileCompletionCallback;
this.verifier = verifier;
init(transferBlockSize);
this.reader = dataReaderFactory.createReader(torrent, this);
}
private void init(long transferBlockSize) {
List files = torrent.getFiles();
long totalSize = torrent.getSize();
long chunkSize = torrent.getChunkSize();
transferBlockSize = Math.min(transferBlockSize, chunkSize);
int chunksTotal = PieceUtils.calculateNumberOfChunks(totalSize, chunkSize);
List> pieceNumToFile = new ArrayList<>(chunksTotal);
final Map storageUnitsToFilesMap = buildStorageUnitToFilesMap(files);
// filter out empty files (and create them at once)
List nonEmptyStorageUnits = handleEmptyStorageUnits(storageUnitsToFilesMap);
List chunks = PieceUtils
.buildChunkDescriptors(torrent, transferBlockSize, totalSize, chunkSize, chunksTotal, pieceNumToFile,
storageUnitsToFilesMap, nonEmptyStorageUnits);
List> countdownTorrentFiles =
createListOfCountdownFiles(torrent.getFiles(), pieceNumToFile);
this.bitfield = buildBitfield(chunks, countdownTorrentFiles);
this.chunkDescriptors = chunks;
this.storageUnits = nonEmptyStorageUnits;
this.filesForPieces = pieceNumToFile;
}
private Map buildStorageUnitToFilesMap(List files) {
final Map storageUnitsToFilesMap =
Maps.newLinkedHashMapWithExpectedSize(files.size());
files.forEach(f -> storageUnitsToFilesMap.put(storage.getUnit(torrent, f), f));
return Collections.unmodifiableMap(storageUnitsToFilesMap);
}
private List> createListOfCountdownFiles(List allFiles,
List> chunkToTorrentFile) {
Map torrentFileToPieceCount = chunkToTorrentFile.stream()
.flatMap(List::stream)
.collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
Map tfToCountingTf = allFiles.stream()
.collect(Collectors.toMap(
Function.identity(),
tf -> new CompletableTorrentFile(tf, torrentFileToPieceCount.getOrDefault(tf, 0L), null)
));
return chunkToTorrentFile.stream()
.map(list -> list.stream()
.map(tfToCountingTf::get)
.collect(Collectors.toList()))
.collect(Collectors.toList());
}
/**
* Create empty files for each empty storage unit. Return the non-empty storage units in a list
*
* @param storageUnitsToFilesMap the storage units to process
* @return the list of non-empty storage units
*/
private List handleEmptyStorageUnits(Map storageUnitsToFilesMap) {
List nonEmptyStorageUnits = new ArrayList<>();
for (StorageUnit unit : storageUnitsToFilesMap.keySet()) {
if (unit.capacity() > 0) {
nonEmptyStorageUnits.add(unit);
} else {
if (!unit.createEmpty()) {
throw new IllegalStateException("Failed to initialize storage unit: " + unit);
}
// close these units after file creation - no reason to keep them open
try {
unit.close();
} catch (IOException ex) {
throw new UncheckedIOException("Failed to close storage unit after initialization: " + unit, ex);
}
}
}
return Collections.unmodifiableList(nonEmptyStorageUnits);
}
private LocalBitfield buildBitfield(List chunks,
List> chunkToCountdownFiles) {
LocalBitfield bitfield = new LocalBitfield(chunks.size(), chunkToCountdownFiles) {
@Override
protected void fileFinishedCallback(TorrentFile tf) {
if (fileCompletionCallback != null) {
fileCompletionCallback.fileDownloadCompleted(torrent, tf, storage);
}
}
};
this.startedAsSeed = verifier.verify(chunks, bitfield);
return bitfield;
}
@Override
public List getChunkDescriptors() {
return chunkDescriptors;
}
@Override
public LocalBitfield getBitfield() {
return bitfield;
}
@Override
public List getFilesForPiece(int pieceIndex) {
if (pieceIndex < 0 || pieceIndex >= bitfield.getPiecesTotal()) {
throw new IllegalArgumentException("Invalid piece index: " + pieceIndex +
", expected 0.." + bitfield.getPiecesTotal());
}
return filesForPieces.get(pieceIndex);
}
@Override
public BitSet getAllPiecesForFiles(Set files) {
return getPieces(pieceFiles -> pieceFiles.stream().anyMatch(files::contains));
}
@Override
public BitSet getPiecesWithOnlyFiles(Set files) {
return getPieces(files::containsAll);
}
private BitSet getPieces(Predicate> includePredicate) {
BitSet ret = new BitSet(bitfield.getPiecesTotal());
for (int i = 0; i < bitfield.getPiecesTotal(); i++) {
if (includePredicate.test(filesForPieces.get(i))) {
ret.set(i);
}
}
return ret;
}
@Override
public DataReader getReader() {
return reader;
}
@Override
public void waitForAllPieces() throws InterruptedException {
bitfield.waitForAllPieces();
}
@Override
public long getLeft() {
BitSet chunkStatus = this.bitfield.getBitmask();
int lastChunkIdx = chunkDescriptors.size() - 1;
// handle last piece which may be smaller than other pieces
final boolean lastBlockDone = chunkStatus.get(lastChunkIdx);
long dataCompleted = lastBlockDone ? chunkDescriptors.get(lastChunkIdx).length() : 0;
long numFullCompleteChunks = chunkStatus.cardinality() - (lastBlockDone ? 1 : 0);
dataCompleted += torrent.getChunkSize() * numFullCompleteChunks;
return torrent.getSize() - dataCompleted;
}
@Override
public boolean startedAsSeed() {
return this.startedAsSeed;
}
@Override
public void close() {
storageUnits.forEach(unit -> {
try {
unit.close();
} catch (Exception e) {
LOGGER.error("Failed to close storage unit: " + unit);
}
});
}
@Override
public String toString() {
return this.getClass().getName() + " <" + torrent.getName() + ">";
}
}