
bt.data.DefaultDataDescriptor Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of bt-core Show documentation
Show all versions of bt-core Show documentation
BitTorrent Client Library (Core)
/*
* Copyright (c) 2016—2017 Andrei Tomashpolskiy and individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bt.data;
import bt.BtException;
import bt.data.range.BlockRange;
import bt.data.range.Ranges;
import bt.metainfo.Torrent;
import bt.metainfo.TorrentFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
class DefaultDataDescriptor implements DataDescriptor {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultDataDescriptor.class);
private Storage storage;
private Torrent torrent;
private List chunkDescriptors;
private Bitfield bitfield;
private Map> filesForPieces;
private Set storageUnits;
private ChunkVerifier verifier;
public DefaultDataDescriptor(Storage storage,
Torrent torrent,
ChunkVerifier verifier,
int transferBlockSize) {
this.storage = storage;
this.torrent = torrent;
this.verifier = verifier;
init(transferBlockSize);
}
private void init(long transferBlockSize) {
List files = torrent.getFiles();
long totalSize = torrent.getSize();
long chunkSize = torrent.getChunkSize();
if (transferBlockSize > chunkSize) {
transferBlockSize = chunkSize;
}
int chunksTotal = (int) Math.ceil(totalSize / chunkSize);
Map> filesForPieces = new HashMap<>((int)(chunksTotal / 0.75d) + 1);
List chunks = new ArrayList<>(chunksTotal + 1);
Iterator chunkHashes = torrent.getChunkHashes().iterator();
Map storageUnitsToFilesMap = new LinkedHashMap<>((int)(files.size() / 0.75d) + 1);
files.forEach(f -> storageUnitsToFilesMap.put(storage.getUnit(torrent, f), f));
// filter out empty files (and create them at once)
List nonEmptyStorageUnits = new ArrayList<>();
for (StorageUnit unit : storageUnitsToFilesMap.keySet()) {
if (unit.capacity() > 0) {
nonEmptyStorageUnits.add(unit);
} else {
try {
// TODO: think about adding some explicit "initialization/creation" method
unit.writeBlock(new byte[0], 0);
} catch (Exception e) {
LOGGER.warn("Failed to create empty storage unit: " + unit, e);
}
}
}
if (nonEmptyStorageUnits.size() > 0) {
long limitInLastUnit = nonEmptyStorageUnits.get(nonEmptyStorageUnits.size() - 1).capacity();
DataRange data = new ReadWriteDataRange(nonEmptyStorageUnits, 0, limitInLastUnit);
long off, lim;
long remaining = totalSize;
while (remaining > 0) {
off = chunks.size() * chunkSize;
lim = Math.min(chunkSize, remaining);
DataRange subrange = data.getSubrange(off, lim);
if (!chunkHashes.hasNext()) {
throw new BtException("Wrong number of chunk hashes in the torrent: too few");
}
List chunkFiles = new ArrayList<>();
subrange.visitUnits((unit, off1, lim1) -> chunkFiles.add(storageUnitsToFilesMap.get(unit)));
filesForPieces.put(chunks.size(), chunkFiles);
chunks.add(buildChunkDescriptor(subrange, transferBlockSize, chunkHashes.next()));
remaining -= chunkSize;
}
}
if (chunkHashes.hasNext()) {
throw new BtException("Wrong number of chunk hashes in the torrent: too many");
}
this.bitfield = buildBitfield(chunks);
this.chunkDescriptors = chunks;
this.storageUnits = storageUnitsToFilesMap.keySet();
this.filesForPieces = filesForPieces;
}
private ChunkDescriptor buildChunkDescriptor(DataRange data, long blockSize, byte[] checksum) {
BlockRange blockData = Ranges.blockRange(data, blockSize);
DataRange synchronizedData = Ranges.synchronizedDataRange(blockData);
BlockSet synchronizedBlockSet = Ranges.synchronizedBlockSet(blockData.getBlockSet());
return new DefaultChunkDescriptor(synchronizedData, synchronizedBlockSet, checksum);
}
private Bitfield buildBitfield(List chunks) {
Bitfield bitfield = new Bitfield(chunks.size());
verifier.verify(chunks, bitfield);
return bitfield;
}
@Override
public List getChunkDescriptors() {
return chunkDescriptors;
}
@Override
public Bitfield getBitfield() {
return bitfield;
}
@Override
public List getFilesForPiece(int pieceIndex) {
if (pieceIndex < 0 || pieceIndex >= bitfield.getPiecesTotal()) {
throw new IllegalArgumentException("Invalid piece index: " + pieceIndex +
", expected 0.." + bitfield.getPiecesTotal());
}
return filesForPieces.get(pieceIndex);
}
@Override
public void close() {
storageUnits.forEach(unit -> {
try {
unit.close();
} catch (Exception e) {
LOGGER.error("Failed to close storage unit: " + unit);
}
});
}
@Override
public String toString() {
return this.getClass().getName() + " <" + torrent.getName() + ">";
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy