All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.eclipse.jetty.websocket.common.extensions.compress.CompressExtension Maven / Gradle / Ivy

There is a newer version: 11.0.0.beta1
Show newest version
//
//  ========================================================================
//  Copyright (c) 1995-2014 Mort Bay Consulting Pty. Ltd.
//  ------------------------------------------------------------------------
//  All rights reserved. This program and the accompanying materials
//  are made available under the terms of the Eclipse Public License v1.0
//  and Apache License v2.0 which accompanies this distribution.
//
//      The Eclipse Public License is available at
//      http://www.eclipse.org/legal/epl-v10.html
//
//      The Apache License v2.0 is available at
//      http://www.opensource.org/licenses/apache2.0.php
//
//  You may elect to redistribute this code under either of these licenses.
//  ========================================================================
//

package org.eclipse.jetty.websocket.common.extensions.compress;

import java.nio.ByteBuffer;
import java.util.Queue;
import java.util.zip.DataFormatException;
import java.util.zip.Deflater;
import java.util.zip.Inflater;
import java.util.zip.ZipException;

import org.eclipse.jetty.util.BufferUtil;
import org.eclipse.jetty.util.ConcurrentArrayQueue;
import org.eclipse.jetty.util.IteratingCallback;
import org.eclipse.jetty.util.log.Log;
import org.eclipse.jetty.util.log.Logger;
import org.eclipse.jetty.websocket.api.BadPayloadException;
import org.eclipse.jetty.websocket.api.BatchMode;
import org.eclipse.jetty.websocket.api.WriteCallback;
import org.eclipse.jetty.websocket.api.extensions.Frame;
import org.eclipse.jetty.websocket.common.OpCode;
import org.eclipse.jetty.websocket.common.extensions.AbstractExtension;
import org.eclipse.jetty.websocket.common.frames.DataFrame;

public abstract class CompressExtension extends AbstractExtension
{
    protected static final byte[] TAIL_BYTES = new byte[]{0x00, 0x00, (byte)0xFF, (byte)0xFF};
    private static final Logger LOG = Log.getLogger(CompressExtension.class);
    
    /** Never drop tail bytes 0000FFFF, from any frame type */
    protected static final int TAIL_DROP_NEVER = 0;
    /** Always drop tail bytes 0000FFFF, from all frame types */
    protected static final int TAIL_DROP_ALWAYS = 1;
    /** Only drop tail bytes 0000FFFF, from fin==true frames */
    protected static final int TAIL_DROP_FIN_ONLY = 2;

    /** Always set RSV flag, on all frame types */
    protected static final int RSV_USE_ALWAYS = 0;
    /** 
     * Only set RSV flag on first frame in multi-frame messages.
     * 

* Note: this automatically means no-continuation frames have * the RSV bit set */ protected static final int RSV_USE_ONLY_FIRST = 1; private final Queue entries = new ConcurrentArrayQueue<>(); private final IteratingCallback flusher = new Flusher(); private final Deflater compressor; private final Inflater decompressor; private int tailDrop = TAIL_DROP_NEVER; private int rsvUse = RSV_USE_ALWAYS; protected CompressExtension() { compressor = new Deflater(Deflater.BEST_COMPRESSION, true); decompressor = new Inflater(true); tailDrop = getTailDropMode(); rsvUse = getRsvUseMode(); } public Deflater getDeflater() { return compressor; } public Inflater getInflater() { return decompressor; } /** * Indicates use of RSV1 flag for indicating deflation is in use. */ @Override public boolean isRsv1User() { return true; } /** * Return the mode of operation for dropping (or keeping) tail bytes in frames generated by compress (outgoing) * * @return either {@link #TAIL_DROP_ALWAYS}, {@link #TAIL_DROP_FIN_ONLY}, or {@link #TAIL_DROP_NEVER} */ abstract int getTailDropMode(); /** * Return the mode of operation for RSV flag use in frames generate by compress (outgoing) * * @return either {@link #RSV_USE_ALWAYS} or {@link #RSV_USE_ONLY_FIRST} */ abstract int getRsvUseMode(); protected void forwardIncoming(Frame frame, ByteAccumulator accumulator) { DataFrame newFrame = new DataFrame(frame); // Unset RSV1 since it's not compressed anymore. newFrame.setRsv1(false); ByteBuffer buffer = getBufferPool().acquire(accumulator.getLength(), false); try { BufferUtil.flipToFill(buffer); accumulator.transferTo(buffer); newFrame.setPayload(buffer); nextIncomingFrame(newFrame); } finally { getBufferPool().release(buffer); } } protected ByteAccumulator decompress(byte[] input) { // Since we don't track text vs binary vs continuation state, just grab whatever is the greater value. int maxSize = Math.max(getPolicy().getMaxTextMessageSize(), getPolicy().getMaxBinaryMessageBufferSize()); ByteAccumulator accumulator = new ByteAccumulator(maxSize); decompressor.setInput(input, 0, input.length); if (LOG.isDebugEnabled()) LOG.debug("Decompressing {} bytes", input.length); try { // It is allowed to send DEFLATE blocks with BFINAL=1. // For such blocks, getRemaining() will be > 0 but finished() // will be true, so we need to check for both. // When BFINAL=0, finished() will always be false and we only // check the remaining bytes. while (decompressor.getRemaining() > 0 && !decompressor.finished()) { byte[] output = new byte[Math.min(input.length * 2, 32 * 1024)]; int decompressed = decompressor.inflate(output); if (decompressed == 0) { if (decompressor.needsInput()) { throw new BadPayloadException("Unable to inflate frame, not enough input on frame"); } if (decompressor.needsDictionary()) { throw new BadPayloadException("Unable to inflate frame, frame erroneously says it needs a dictionary"); } } else { accumulator.addChunk(output, 0, decompressed); } } if (LOG.isDebugEnabled()) LOG.debug("Decompressed {}->{} bytes", input.length, accumulator.getLength()); return accumulator; } catch (DataFormatException x) { throw new BadPayloadException(x); } } @Override public void outgoingFrame(Frame frame, WriteCallback callback, BatchMode batchMode) { // We use a queue and an IteratingCallback to handle concurrency. // We must compress and write atomically, otherwise the compression // context on the other end gets confused. if (flusher.isFailed()) { notifyCallbackFailure(callback, new ZipException()); return; } FrameEntry entry = new FrameEntry(frame, callback, batchMode); if (LOG.isDebugEnabled()) LOG.debug("Queuing {}", entry); entries.offer(entry); flusher.iterate(); } protected void notifyCallbackSuccess(WriteCallback callback) { try { if (callback != null) callback.writeSuccess(); } catch (Throwable x) { if (LOG.isDebugEnabled()) LOG.debug("Exception while notifying success of callback " + callback, x); } } protected void notifyCallbackFailure(WriteCallback callback, Throwable failure) { try { if (callback != null) callback.writeFailed(failure); } catch (Throwable x) { if (LOG.isDebugEnabled()) LOG.debug("Exception while notifying failure of callback " + callback, x); } } @Override public String toString() { return getClass().getSimpleName(); } private static class FrameEntry { private final Frame frame; private final WriteCallback callback; private final BatchMode batchMode; private FrameEntry(Frame frame, WriteCallback callback, BatchMode batchMode) { this.frame = frame; this.callback = callback; this.batchMode = batchMode; } @Override public String toString() { return frame.toString(); } } private class Flusher extends IteratingCallback implements WriteCallback { private static final int INPUT_BUFSIZE = 32 * 1024; private FrameEntry current; private ByteBuffer payload; private boolean finished = true; @Override protected Action process() throws Exception { if (finished) { current = entries.poll(); LOG.debug("Processing {}", current); if (current == null) return Action.IDLE; deflate(current); } else { compress(current, false); } return Action.SCHEDULED; } private void deflate(FrameEntry entry) { Frame frame = entry.frame; BatchMode batchMode = entry.batchMode; if (OpCode.isControlFrame(frame.getOpCode()) || !frame.hasPayload()) { nextOutgoingFrame(frame, this, batchMode); return; } compress(entry, true); } private void compress(FrameEntry entry, boolean first) { // Get a chunk of the payload to avoid to blow // the heap if the payload is a huge mapped file. Frame frame = entry.frame; ByteBuffer data = frame.getPayload(); int remaining = data.remaining(); int inputLength = Math.min(remaining, INPUT_BUFSIZE); if (LOG.isDebugEnabled()) LOG.debug("Compressing {}: {} bytes in {} bytes chunk", entry, remaining, inputLength); // Avoid to copy the bytes if the ByteBuffer // is backed by an array. int inputOffset; byte[] input; if (data.hasArray()) { input = data.array(); int position = data.position(); inputOffset = position + data.arrayOffset(); data.position(position + inputLength); } else { input = new byte[inputLength]; inputOffset = 0; data.get(input, 0, inputLength); } finished = inputLength == remaining; compressor.setInput(input, inputOffset, inputLength); // Use an additional space in case the content is not compressible. byte[] output = new byte[inputLength + 64]; int outputOffset = 0; int outputLength = 0; while (true) { int space = output.length - outputOffset; int compressed = compressor.deflate(output, outputOffset, space, Deflater.SYNC_FLUSH); outputLength += compressed; if (compressed < space) { // Everything was compressed. break; } else { // The compressed output is bigger than the uncompressed input. byte[] newOutput = new byte[output.length * 2]; System.arraycopy(output, 0, newOutput, 0, output.length); outputOffset += output.length; output = newOutput; } } boolean fin = frame.isFin() && finished; // Handle tail bytes generated by SYNC_FLUSH. if(tailDrop == TAIL_DROP_ALWAYS) { payload = ByteBuffer.wrap(output, 0, outputLength - TAIL_BYTES.length); } else if(tailDrop == TAIL_DROP_FIN_ONLY) { payload = ByteBuffer.wrap(output, 0, outputLength - (fin?TAIL_BYTES.length:0)); } else { // always include payload = ByteBuffer.wrap(output, 0, outputLength); } if (LOG.isDebugEnabled()) { LOG.debug("Compressed {}: {}->{} chunk bytes",entry,inputLength,outputLength); } boolean continuation = frame.getType().isContinuation() || !first; DataFrame chunk = new DataFrame(frame, continuation); if(rsvUse == RSV_USE_ONLY_FIRST) { chunk.setRsv1(!continuation); } else { // always set chunk.setRsv1(true); } chunk.setPayload(payload); chunk.setFin(fin); nextOutgoingFrame(chunk, this, entry.batchMode); } @Override protected void onCompleteSuccess() { // This IteratingCallback never completes. } @Override protected void onCompleteFailure(Throwable x) { // Fail all the frames in the queue. FrameEntry entry; while ((entry = entries.poll()) != null) notifyCallbackFailure(entry.callback, x); } @Override public void writeSuccess() { if (finished) notifyCallbackSuccess(current.callback); succeeded(); } @Override public void writeFailed(Throwable x) { notifyCallbackFailure(current.callback, x); // If something went wrong, very likely the compression context // will be invalid, so we need to fail this IteratingCallback. failed(x); } } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy