dev.fitko.fitconnect.core.io.FileChunker Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of client Show documentation
Show all versions of client Show documentation
Library that provides client access to the FIT-Connect api-endpoints for sending, subscribing and
routing
The newest version!
package dev.fitko.fitconnect.core.io;
import dev.fitko.fitconnect.core.utils.Preconditions;
import dev.fitko.fitconnect.core.utils.StopWatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.stream.Stream;
import static java.util.Comparator.comparing;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toList;
public class FileChunker {
private static final Logger LOGGER = LoggerFactory.getLogger(FileChunker.class);
private static final String CHUNK_INDEX_FORMAT = "%06d"; // 6 digits -> max 999.999 named chunks
private static final String CHUNK_FILE_ENDING = ".chunk";
private static final String PART_SUFFIX = "_part_";
public static final String FRAGMENT_PREFIX = "fragment_";
/**
* Splits a file path into chunks and writes them into a given directory.
*
* @param filePath path to a file that should be chunked
* @param chunkSize max. size of each chunk
* @param outputDir output directory the chunks will be written to
* @return list of chunked files
* @throws IOException if an error occurred whilst reading the file or writing the chunk
*/
public List chunkFile(final Path filePath, final int chunkSize, final Path outputDir) throws IOException {
try (InputStream is = Files.newInputStream(filePath)) {
return chunkStream(is, chunkSize, filePath.getFileName().toString(), outputDir);
}
}
/**
* Splits a stream into chunked files and writes them into a given directory.
*
* @param stream input-stream of the data
* @param chunkSize max. size of each chunk
* @param fileName name of the original file
* @param outputDir output directory the chunks will be written to
* @return list of chunked files
* @throws IOException if an error occurred whilst reading the file or writing the chunk
*/
public List chunkStream(final InputStream stream, final int chunkSize, final String fileName, final Path outputDir) throws IOException {
Preconditions.checkArgumentAndThrow(chunkSize <= 0, "chunk size must be larger than zero, but was " + chunkSize);
final var buffer = new byte[chunkSize];
final var fileChunks = new ArrayList();
final var name = fileName == null ? UUID.randomUUID().toString() : fileName;
int chunkCount = 0;
int readBytes = stream.read(buffer);
final var start = StopWatch.start();
while (readBytes > -1) {
final File fileChunk = writeFileChunk(generateFileName(name, chunkCount), buffer, readBytes, outputDir);
LOGGER.debug("Wrote {} bytes of chunk #{}", readBytes, ++chunkCount);
fileChunks.add(fileChunk);
readBytes = stream.read(buffer);
}
LOGGER.info("Chunking to {} files took {}", chunkCount, StopWatch.stop(start));
return fileChunks;
}
/**
* Concatenates all file parts of a given directory to a single file and deletes the file chunks afterward.
*
* @param path path that contains the file chunks with ".chunk" extension
* @param fileName filename the merged file is written to
* @return optional of the merged file, is empty if no chunks were found
* @throws IOException if an error occurred whilst reading the directory path or merging the chunks
*/
public Optional concatChunks(final Path path, final String fileName) throws IOException {
final List chunkedFiles = Stream.of(requireNonNull(path.toFile().listFiles()))
.filter(file -> !file.isDirectory())
.filter(file -> file.getName().contains(CHUNK_FILE_ENDING))
.sorted(comparing(File::getName))
.collect(toList());
if (chunkedFiles.isEmpty()) {
return Optional.empty();
}
final File mergedFile = path.resolve(fileName).toFile();
LOGGER.info("Merging {} chunks into file {}", chunkedFiles.size(), mergedFile.getPath());
final var start = StopWatch.start();
try (FileOutputStream fos = new FileOutputStream(mergedFile)) {
for (final File chunk : chunkedFiles) {
try (InputStream is = Files.newInputStream(chunk.toPath())) {
is.transferTo(fos);
fos.flush();
chunk.delete();
}
}
}
LOGGER.info("Merging chunks took {}", StopWatch.stop(start));
return Optional.of(mergedFile);
}
/**
* Writes a file chunk with an index to the filesystem.
*
* @param data as byte[] that should be written
* @param chunkIndex index of the current chunk that is used in the filename
* @param outputDir the target path dir the chunk is written to
* @return that file tha was written
* @throws IOException if there was an error whilst writing the file
*/
public File writeFileChunk(final byte[] data, final int chunkIndex, final Path outputDir) throws IOException {
final String fileName = generateFileName(chunkIndex);
return writeFileChunk(fileName, data, data.length, outputDir);
}
private File writeFileChunk(final String fileName, final byte[] buffer, final int length, final Path outputDir) throws IOException {
final File chunk = outputDir.resolve(fileName).toFile();
try (FileOutputStream fos = new FileOutputStream(chunk)) {
fos.write(buffer, 0, length);
}
return chunk;
}
private static String generateFileName(final String fileName, final int chunkCount) {
return fileName + PART_SUFFIX + String.format(CHUNK_INDEX_FORMAT, chunkCount) + CHUNK_FILE_ENDING;
}
private static String generateFileName(final int chunkCount) {
return FRAGMENT_PREFIX + String.format(CHUNK_INDEX_FORMAT, chunkCount) + CHUNK_FILE_ENDING;
}
}