org.jboss.netty.handler.codec.http.HttpRequestDecoder Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of hadoop-ranger-client-for-impala Show documentation
Show all versions of hadoop-ranger-client-for-impala Show documentation
Tencent Qcloud chdfs hadoop ranger client.
The newest version!
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.handler.codec.http;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.handler.codec.frame.TooLongFrameException;
/**
* Decodes {@link ChannelBuffer}s into {@link HttpRequest}s and {@link HttpChunk}s.
*
* Parameters that prevents excessive memory consumption
*
*
* Name Meaning
*
*
* {@code maxInitialLineLength}
* The maximum length of the initial line (e.g. {@code "GET / HTTP/1.0"})
* If the length of the initial line exceeds this value, a
* {@link TooLongFrameException} will be raised.
*
*
* {@code maxHeaderSize}
* The maximum length of all headers. If the sum of the length of each
* header exceeds this value, a {@link TooLongFrameException} will be raised.
*
*
* {@code maxChunkSize}
* The maximum length of the content or each chunk. If the content length
* exceeds this value, the transfer encoding of the decoded request will be
* converted to 'chunked' and the content will be split into multiple
* {@link HttpChunk}s. If the transfer encoding of the HTTP request is
* 'chunked' already, each chunk will be split into smaller chunks if the
* length of the chunk exceeds this value. If you prefer not to handle
* {@link HttpChunk}s in your handler, insert {@link HttpChunkAggregator}
* after this decoder in the {@link ChannelPipeline}.
*
*
*/
public class HttpRequestDecoder extends HttpMessageDecoder {
/**
* Creates a new instance with the default
* {@code maxInitialLineLength (4096}}, {@code maxHeaderSize (8192)}, and
* {@code maxChunkSize (8192)}.
*/
public HttpRequestDecoder() {
}
/**
* Creates a new instance with the specified parameters.
*/
public HttpRequestDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize) {
super(maxInitialLineLength, maxHeaderSize, maxChunkSize);
}
@Override
protected HttpMessage createMessage(String[] initialLine) throws Exception {
return new DefaultHttpRequest(
HttpVersion.valueOf(initialLine[2]), HttpMethod.valueOf(initialLine[0]), initialLine[1]);
}
@Override
protected boolean isDecodingRequest() {
return true;
}
}