All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.arangodb.shaded.netty.handler.codec.compression.SnappyFrameDecoder Maven / Gradle / Ivy

There is a newer version: 7.9.0
Show newest version
/*
 * Copyright 2012 The Netty Project
 *
 * The Netty Project licenses this file to you under the Apache License,
 * version 2.0 (the "License"); you may not use this file except in compliance
 * with the License. You may obtain a copy of the License at:
 *
 *   https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */
package com.arangodb.shaded.netty.handler.codec.compression;

import com.arangodb.shaded.netty.buffer.ByteBuf;
import com.arangodb.shaded.netty.channel.ChannelHandlerContext;
import com.arangodb.shaded.netty.handler.codec.ByteToMessageDecoder;

import java.util.List;

import static com.arangodb.shaded.netty.handler.codec.compression.Snappy.validateChecksum;

/**
 * Uncompresses a {@link ByteBuf} encoded with the Snappy framing format.
 *
 * See Snappy framing format.
 *
 * Note that by default, validation of the checksum header in each chunk is
 * DISABLED for performance improvements. If performance is less of an issue,
 * or if you would prefer the safety that checksum validation brings, please
 * use the {@link #SnappyFrameDecoder(boolean)} constructor with the argument
 * set to {@code true}.
 */
public class SnappyFrameDecoder extends ByteToMessageDecoder {

    private enum ChunkType {
        STREAM_IDENTIFIER,
        COMPRESSED_DATA,
        UNCOMPRESSED_DATA,
        RESERVED_UNSKIPPABLE,
        RESERVED_SKIPPABLE
    }

    private static final int SNAPPY_IDENTIFIER_LEN = 6;
    // See https://github.com/google/snappy/blob/1.1.9/framing_format.txt#L95
    private static final int MAX_UNCOMPRESSED_DATA_SIZE = 65536 + 4;
    // See https://github.com/google/snappy/blob/1.1.9/framing_format.txt#L82
    private static final int MAX_DECOMPRESSED_DATA_SIZE = 65536;
    // See https://github.com/google/snappy/blob/1.1.9/framing_format.txt#L82
    private static final int MAX_COMPRESSED_CHUNK_SIZE = 16777216 - 1;

    private final Snappy snappy = new Snappy();
    private final boolean validateChecksums;

    private boolean started;
    private boolean corrupted;
    private int numBytesToSkip;

    /**
     * Creates a new snappy-framed decoder with validation of checksums
     * turned OFF. To turn checksum validation on, please use the alternate
     * {@link #SnappyFrameDecoder(boolean)} constructor.
     */
    public SnappyFrameDecoder() {
        this(false);
    }

    /**
     * Creates a new snappy-framed decoder with validation of checksums
     * as specified.
     *
     * @param validateChecksums
     *        If true, the checksum field will be validated against the actual
     *        uncompressed data, and if the checksums do not match, a suitable
     *        {@link DecompressionException} will be thrown
     */
    public SnappyFrameDecoder(boolean validateChecksums) {
        this.validateChecksums = validateChecksums;
    }

    @Override
    protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception {
        if (corrupted) {
            in.skipBytes(in.readableBytes());
            return;
        }

        if (numBytesToSkip != 0) {
            // The last chunkType we detected was RESERVED_SKIPPABLE and we still have some bytes to skip.
            int skipBytes = Math.min(numBytesToSkip, in.readableBytes());
            in.skipBytes(skipBytes);
            numBytesToSkip -= skipBytes;

            // Let's return and try again.
            return;
        }

        try {
            int idx = in.readerIndex();
            final int inSize = in.readableBytes();
            if (inSize < 4) {
                // We need to be at least able to read the chunk type identifier (one byte),
                // and the length of the chunk (3 bytes) in order to proceed
                return;
            }

            final int chunkTypeVal = in.getUnsignedByte(idx);
            final ChunkType chunkType = mapChunkType((byte) chunkTypeVal);
            final int chunkLength = in.getUnsignedMediumLE(idx + 1);

            switch (chunkType) {
                case STREAM_IDENTIFIER:
                    if (chunkLength != SNAPPY_IDENTIFIER_LEN) {
                        throw new DecompressionException("Unexpected length of stream identifier: " + chunkLength);
                    }

                    if (inSize < 4 + SNAPPY_IDENTIFIER_LEN) {
                        break;
                    }

                    in.skipBytes(4);
                    int offset = in.readerIndex();
                    in.skipBytes(SNAPPY_IDENTIFIER_LEN);

                    checkByte(in.getByte(offset++), (byte) 's');
                    checkByte(in.getByte(offset++), (byte) 'N');
                    checkByte(in.getByte(offset++), (byte) 'a');
                    checkByte(in.getByte(offset++), (byte) 'P');
                    checkByte(in.getByte(offset++), (byte) 'p');
                    checkByte(in.getByte(offset), (byte) 'Y');

                    started = true;
                    break;
                case RESERVED_SKIPPABLE:
                    if (!started) {
                        throw new DecompressionException("Received RESERVED_SKIPPABLE tag before STREAM_IDENTIFIER");
                    }

                    in.skipBytes(4);

                    int skipBytes = Math.min(chunkLength, in.readableBytes());
                    in.skipBytes(skipBytes);
                    if (skipBytes != chunkLength) {
                        // We could skip all bytes, let's store the remaining so we can do so once we receive more
                        // data.
                        numBytesToSkip = chunkLength - skipBytes;
                    }
                    break;
                case RESERVED_UNSKIPPABLE:
                    // The spec mandates that reserved unskippable chunks must immediately
                    // return an error, as we must assume that we cannot decode the stream
                    // correctly
                    throw new DecompressionException(
                            "Found reserved unskippable chunk type: 0x" + Integer.toHexString(chunkTypeVal));
                case UNCOMPRESSED_DATA:
                    if (!started) {
                        throw new DecompressionException("Received UNCOMPRESSED_DATA tag before STREAM_IDENTIFIER");
                    }
                    if (chunkLength > MAX_UNCOMPRESSED_DATA_SIZE) {
                        throw new DecompressionException("Received UNCOMPRESSED_DATA larger than " +
                                MAX_UNCOMPRESSED_DATA_SIZE + " bytes");
                    }

                    if (inSize < 4 + chunkLength) {
                        return;
                    }

                    in.skipBytes(4);
                    if (validateChecksums) {
                        int checksum = in.readIntLE();
                        validateChecksum(checksum, in, in.readerIndex(), chunkLength - 4);
                    } else {
                        in.skipBytes(4);
                    }
                    out.add(in.readRetainedSlice(chunkLength - 4));
                    break;
                case COMPRESSED_DATA:
                    if (!started) {
                        throw new DecompressionException("Received COMPRESSED_DATA tag before STREAM_IDENTIFIER");
                    }

                    if (chunkLength > MAX_COMPRESSED_CHUNK_SIZE) {
                        throw new DecompressionException("Received COMPRESSED_DATA that contains" +
                                " chunk that exceeds " + MAX_COMPRESSED_CHUNK_SIZE + " bytes");
                    }

                    if (inSize < 4 + chunkLength) {
                        return;
                    }

                    in.skipBytes(4);
                    int checksum = in.readIntLE();

                    int uncompressedSize = snappy.getPreamble(in);
                    if (uncompressedSize > MAX_DECOMPRESSED_DATA_SIZE) {
                        throw new DecompressionException("Received COMPRESSED_DATA that contains" +
                                " uncompressed data that exceeds " + MAX_DECOMPRESSED_DATA_SIZE + " bytes");
                    }

                    ByteBuf uncompressed = ctx.alloc().buffer(uncompressedSize, MAX_DECOMPRESSED_DATA_SIZE);
                    try {
                        if (validateChecksums) {
                            int oldWriterIndex = in.writerIndex();
                            try {
                                in.writerIndex(in.readerIndex() + chunkLength - 4);
                                snappy.decode(in, uncompressed);
                            } finally {
                                in.writerIndex(oldWriterIndex);
                            }
                            validateChecksum(checksum, uncompressed, 0, uncompressed.writerIndex());
                        } else {
                            snappy.decode(in.readSlice(chunkLength - 4), uncompressed);
                        }
                        out.add(uncompressed);
                        uncompressed = null;
                    } finally {
                        if (uncompressed != null) {
                            uncompressed.release();
                        }
                    }
                    snappy.reset();
                    break;
            }
        } catch (Exception e) {
            corrupted = true;
            throw e;
        }
    }

    private static void checkByte(byte actual, byte expect) {
        if (actual != expect) {
            throw new DecompressionException("Unexpected stream identifier contents. Mismatched snappy " +
                    "protocol version?");
        }
    }

    /**
     * Decodes the chunk type from the type tag byte.
     *
     * @param type The tag byte extracted from the stream
     * @return The appropriate {@link ChunkType}, defaulting to {@link ChunkType#RESERVED_UNSKIPPABLE}
     */
    private static ChunkType mapChunkType(byte type) {
        if (type == 0) {
            return ChunkType.COMPRESSED_DATA;
        } else if (type == 1) {
            return ChunkType.UNCOMPRESSED_DATA;
        } else if (type == (byte) 0xff) {
            return ChunkType.STREAM_IDENTIFIER;
        } else if ((type & 0x80) == 0x80) {
            return ChunkType.RESERVED_SKIPPABLE;
        } else {
            return ChunkType.RESERVED_UNSKIPPABLE;
        }
    }
}