All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.ning.compress.lzf.LZFOutputStream Maven / Gradle / Ivy

Go to download

Full deployment of default TransiStore service, using basic storage types, packaged as "fat jar" with its dependencies.

There is a newer version: 0.9.8
Show newest version
package com.ning.compress.lzf;

import java.io.*;

import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.util.ChunkEncoderFactory;

/**
 * Decorator {@link OutputStream} implementation that will compress
 * output using LZF compression algorithm, given uncompressed input
 * to write. Its counterpart is {@link LZFInputStream}; although
 * in some ways {@link LZFCompressingInputStream} can be seen
 * as the opposite.
 * 
 * @author jon hartlaub
 * @author Tatu Saloranta
 * 
 * @see LZFInputStream
 * @see LZFCompressingInputStream
 */
public class LZFOutputStream extends OutputStream 
{
    private static final int OUTPUT_BUFFER_SIZE = LZFChunk.MAX_CHUNK_LEN;

    private final ChunkEncoder _encoder;
    private final BufferRecycler _recycler;
	
    protected final OutputStream _outputStream;
    protected byte[] _outputBuffer;
    protected int _position = 0;

    /**
     * Configuration setting that governs whether basic 'flush()' should
     * first complete a block or not.
     *

* Default value is 'true' */ protected boolean _cfgFinishBlockOnFlush = true; /** * Flag that indicates if we have already called '_outputStream.close()' * (to avoid calling it multiple times) */ protected boolean _outputStreamClosed; /* /////////////////////////////////////////////////////////////////////// // Construction, configuration /////////////////////////////////////////////////////////////////////// */ public LZFOutputStream(final OutputStream outputStream) { this(ChunkEncoderFactory.optimalInstance(OUTPUT_BUFFER_SIZE), outputStream); } public LZFOutputStream(final ChunkEncoder encoder, final OutputStream outputStream) { _encoder = encoder; _recycler = BufferRecycler.instance(); _outputStream = outputStream; _outputBuffer = _recycler.allocOutputBuffer(OUTPUT_BUFFER_SIZE); _outputStreamClosed = false; } /** * Method for defining whether call to {@link #flush} will also complete * current block (similar to calling {@link #finishBlock()}) or not. */ public LZFOutputStream setFinishBlockOnFlush(boolean b) { _cfgFinishBlockOnFlush = b; return this; } /* /////////////////////////////////////////////////////////////////////// // OutputStream impl /////////////////////////////////////////////////////////////////////// */ @Override public void write(final int singleByte) throws IOException { checkNotClosed(); if (_position >= _outputBuffer.length) { writeCompressedBlock(); } _outputBuffer[_position++] = (byte) singleByte; } @Override public void write(final byte[] buffer, int offset, int length) throws IOException { checkNotClosed(); final int BUFFER_LEN = _outputBuffer.length; // simple case first: buffering only (for trivially short writes) int free = BUFFER_LEN - _position; if (free >= length) { System.arraycopy(buffer, offset, _outputBuffer, _position, length); _position += length; return; } // otherwise, copy whatever we can, flush System.arraycopy(buffer, offset, _outputBuffer, _position, free); offset += free; length -= free; _position += free; writeCompressedBlock(); // then write intermediate full block, if any, without copying: while (length >= BUFFER_LEN) { _encoder.encodeAndWriteChunk(buffer, offset, BUFFER_LEN, _outputStream); offset += BUFFER_LEN; length -= BUFFER_LEN; } // and finally, copy leftovers in buffer, if any if (length > 0) { System.arraycopy(buffer, offset, _outputBuffer, 0, length); } _position = length; } @Override public void flush() throws IOException { checkNotClosed(); if (_cfgFinishBlockOnFlush && _position > 0) { writeCompressedBlock(); } _outputStream.flush(); } @Override public void close() throws IOException { if (!_outputStreamClosed) { if (_position > 0) { writeCompressedBlock(); } _outputStream.flush(); _encoder.close(); byte[] buf = _outputBuffer; if (buf != null) { _outputBuffer = null; _recycler.releaseOutputBuffer(buf); } _outputStreamClosed = true; _outputStream.close(); } } /* /////////////////////////////////////////////////////////////////////// // Additional public methods /////////////////////////////////////////////////////////////////////// */ /** * Method that can be used to find underlying {@link OutputStream} that * we write encoded LZF encoded data into, after compressing it. * Will never return null; although underlying stream may be closed * (if this stream has been closed). * * @since 0.8 */ public OutputStream getUnderlyingOutputStream() { return _outputStream; } /** * Accessor for checking whether call to "flush()" will first finish the * current block or not * * @since 0.8 */ public boolean getFinishBlockOnFlush() { return _cfgFinishBlockOnFlush; } /** * Method that can be used to force completion of the current block, * which means that all buffered data will be compressed into an * LZF block. This typically results in lower compression ratio * as larger blocks compress better; but may be necessary for * network connections to ensure timely sending of data. * * @since 0.8 */ public LZFOutputStream finishBlock() throws IOException { checkNotClosed(); if (_position > 0) { writeCompressedBlock(); } return this; } /* /////////////////////////////////////////////////////////////////////// // Internal methods /////////////////////////////////////////////////////////////////////// */ /** * Compress and write the current block to the OutputStream */ protected void writeCompressedBlock() throws IOException { int left = _position; _position = 0; int offset = 0; do { int chunkLen = Math.min(LZFChunk.MAX_CHUNK_LEN, left); _encoder.encodeAndWriteChunk(_outputBuffer, offset, chunkLen, _outputStream); offset += chunkLen; left -= chunkLen; } while (left > 0); } protected void checkNotClosed() throws IOException { if (_outputStreamClosed) { throw new IOException(getClass().getName()+" already closed"); } } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy