org.jboss.netty.handler.queue.BufferedWriteHandler Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of netty Show documentation
Show all versions of netty Show documentation
The Netty project is an effort to provide an asynchronous event-driven
network application framework and tools for rapid development of
maintainable high performance and high scalability protocol servers and
clients. In other words, Netty is a NIO client server framework which
enables quick and easy development of network applications such as protocol
servers and clients. It greatly simplifies and streamlines network
programming such as TCP and UDP socket server.
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.handler.queue;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelConfig;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.LifeCycleAwareChannelHandler;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
import org.jboss.netty.channel.socket.nio.NioSocketChannelConfig;
import org.jboss.netty.util.HashedWheelTimer;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Emulates buffered write operation. This handler stores all write requests
* into an unbounded {@link Queue} and flushes them to the downstream when
* {@link #flush()} method is called.
*
* Here is an example that demonstrates the usage:
*
* BufferedWriteHandler bufferedWriter = new BufferedWriteHandler();
* ChannelPipeline p = ...;
* p.addFirst("buffer", bufferedWriter);
*
* ...
*
* Channel ch = ...;
*
* // msg1, 2, and 3 are stored in the queue of bufferedWriter.
* ch.write(msg1);
* ch.write(msg2);
* ch.write(msg3);
*
* // and will be flushed on request.
* bufferedWriter.flush();
*
*
* Auto-flush
* The write request queue is automatically flushed when the associated
* {@link Channel} is disconnected or closed. However, it does not flush the
* queue otherwise. It means you have to call {@link #flush()} before the size
* of the queue increases too much. You can implement your own auto-flush
* strategy by extending this handler:
*
* public class AutoFlusher extends {@link BufferedWriteHandler} {
*
* private final AtomicLong bufferSize = new AtomicLong();
*
* {@literal @Override}
* public void writeRequested({@link ChannelHandlerContext} ctx, {@link MessageEvent} e) {
* super.writeRequested(ctx, e);
*
* {@link ChannelBuffer} data = ({@link ChannelBuffer}) e.getMessage();
* int newBufferSize = bufferSize.addAndGet(data.readableBytes());
*
* // Flush the queue if it gets larger than 8KiB.
* if (newBufferSize > 8192) {
* flush();
* bufferSize.set(0);
* }
* }
* }
*
*
* Consolidate on flush
*
* If there are two or more write requests in the queue and all their message
* type is {@link ChannelBuffer}, they can be merged into a single write request
* to save the number of system calls.
*
* BEFORE consolidation: AFTER consolidation:
* +-------+-------+-------+ +-------------+
* | Req C | Req B | Req A |------\\| Request ABC |
* | "789" | "456" | "123" |------//| "123456789" |
* +-------+-------+-------+ +-------------+
*
* This feature is disabled by default. You can override the default when you
* create this handler or call {@link #flush(boolean)}. If you specified
* {@code true} when you call the constructor, calling {@link #flush()} will
* always consolidate the queue. Otherwise, you have to call
* {@link #flush(boolean)} with {@code true} to enable this feature for each
* flush.
*
* The disadvantage of consolidation is that the {@link ChannelFuture} and its
* {@link ChannelFutureListener}s associated with the original write requests
* might be notified later than when they are actually written out. They will
* always be notified when the consolidated write request is fully written.
*
* The following example implements the consolidation strategy that reduces
* the number of write requests based on the writability of a channel:
*
* public class ConsolidatingAutoFlusher extends {@link BufferedWriteHandler} {
*
* public ConsolidatingAutoFlusher() {
* // Enable consolidation by default.
* super(true);
* }
*
* {@literal @Override}
* public void channelOpen({@link ChannelHandlerContext} ctx, {@link ChannelStateEvent} e) throws Exception {
* {@link ChannelConfig} cfg = e.getChannel().getConfig();
* if (cfg instanceof {@link NioSocketChannelConfig}) {
* // Lower the watermark to increase the chance of consolidation.
* cfg.setWriteBufferLowWaterMark(0);
* }
* super.channelOpen(e);
* }
*
* {@literal @Override}
* public void writeRequested({@link ChannelHandlerContext} ctx, {@link MessageEvent} e) throws Exception {
* super.writeRequested(ctx, et);
* if (e.getChannel().isWritable()) {
* flush();
* }
* }
*
* {@literal @Override}
* public void channelInterestChanged(
* {@link ChannelHandlerContext} ctx, {@link ChannelStateEvent} e) throws Exception {
* if (e.getChannel().isWritable()) {
* flush();
* }
* }
* }
*
*
* Prioritized Writes
*
* You can implement prioritized writes by specifying an unbounded priority
* queue in the constructor of this handler. It will be required to design
* the proper strategy to determine how often {@link #flush()} should be called.
* For example, you could call {@link #flush()} periodically, using
* {@link HashedWheelTimer} every second.
* @apiviz.landmark
*/
public class BufferedWriteHandler extends SimpleChannelHandler implements LifeCycleAwareChannelHandler {
private final Queue queue;
private final boolean consolidateOnFlush;
private volatile ChannelHandlerContext ctx;
private final AtomicBoolean flush = new AtomicBoolean(false);
/**
* Creates a new instance with the default unbounded {@link BlockingQueue}
* implementation and without buffer consolidation.
*/
public BufferedWriteHandler() {
this(false);
}
/**
* Creates a new instance with the specified thread-safe unbounded
* {@link Queue} and without buffer consolidation. Please note that
* specifying a bounded {@link Queue} or a thread-unsafe {@link Queue} will
* result in an unspecified behavior.
*/
public BufferedWriteHandler(Queue queue) {
this(queue, false);
}
/**
* Creates a new instance with {@link ConcurrentLinkedQueue}
*
* @param consolidateOnFlush
* {@code true} if and only if the buffered write requests are merged
* into a single write request on {@link #flush()}
*/
public BufferedWriteHandler(boolean consolidateOnFlush) {
this(new ConcurrentLinkedQueue(), consolidateOnFlush);
}
/**
* Creates a new instance with the specified thread-safe unbounded
* {@link Queue}. Please note that specifying a bounded {@link Queue} or
* a thread-unsafe {@link Queue} will result in an unspecified behavior.
*
* @param consolidateOnFlush
* {@code true} if and only if the buffered write requests are merged
* into a single write request on {@link #flush()}
*/
public BufferedWriteHandler(Queue queue, boolean consolidateOnFlush) {
if (queue == null) {
throw new NullPointerException("queue");
}
this.queue = queue;
this.consolidateOnFlush = consolidateOnFlush;
}
public boolean isConsolidateOnFlush() {
return consolidateOnFlush;
}
/**
* Returns the queue which stores the write requests. The default
* implementation returns the queue which was specified in the constructor.
*/
protected Queue getQueue() {
return queue;
}
/**
* Sends the queued write requests to the downstream.
*/
public void flush() {
flush(consolidateOnFlush);
}
/**
* Sends the queued write requests to the downstream.
*
* @param consolidateOnFlush
* {@code true} if and only if the buffered write requests are merged
* into a single write request
*/
public void flush(boolean consolidateOnFlush) {
final ChannelHandlerContext ctx = this.ctx;
if (ctx == null) {
// No write request was made.
return;
}
Channel channel = ctx.getChannel();
boolean acquired;
// use CAS to see if the have flush already running, if so we don't need to take further actions
if (acquired = flush.compareAndSet(false, true)) {
final Queue queue = getQueue();
if (consolidateOnFlush) {
if (queue.isEmpty()) {
flush.set(false);
return;
}
List pendingWrites = new ArrayList();
for (;;) {
MessageEvent e = queue.poll();
if (e == null) {
break;
}
if (!(e.getMessage() instanceof ChannelBuffer)) {
if ((pendingWrites = consolidatedWrite(pendingWrites)) == null) {
pendingWrites = new ArrayList();
}
ctx.sendDownstream(e);
} else {
pendingWrites.add(e);
}
}
consolidatedWrite(pendingWrites);
} else {
for (;;) {
MessageEvent e = queue.poll();
if (e == null) {
break;
}
ctx.sendDownstream(e);
}
}
flush.set(false);
}
if (acquired && (!channel.isConnected() || channel.isWritable() && !queue.isEmpty())) {
flush(consolidateOnFlush);
}
}
private List consolidatedWrite(final List pendingWrites) {
final int size = pendingWrites.size();
if (size == 1) {
ctx.sendDownstream(pendingWrites.remove(0));
return pendingWrites;
}
if (size == 0) {
return pendingWrites;
}
ChannelBuffer[] data = new ChannelBuffer[size];
for (int i = 0; i < data.length; i ++) {
data[i] = (ChannelBuffer) pendingWrites.get(i).getMessage();
}
ChannelBuffer composite = ChannelBuffers.wrappedBuffer(data);
ChannelFuture future = Channels.future(ctx.getChannel());
future.addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future)
throws Exception {
if (future.isSuccess()) {
for (MessageEvent e: pendingWrites) {
e.getFuture().setSuccess();
}
} else {
Throwable cause = future.getCause();
for (MessageEvent e: pendingWrites) {
e.getFuture().setFailure(cause);
}
}
}
});
Channels.write(ctx, future, composite);
return null;
}
/**
* Stores all write requests to the queue so that they are actually written
* on {@link #flush()}.
*/
@Override
public void writeRequested(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
if (this.ctx == null) {
this.ctx = ctx;
} else {
assert this.ctx == ctx;
}
getQueue().add(e);
}
@Override
public void disconnectRequested(ChannelHandlerContext ctx,
ChannelStateEvent e) throws Exception {
try {
flush(consolidateOnFlush);
} finally {
ctx.sendDownstream(e);
}
}
@Override
public void closeRequested(ChannelHandlerContext ctx, ChannelStateEvent e)
throws Exception {
try {
flush(consolidateOnFlush);
} finally {
ctx.sendDownstream(e);
}
}
/**
* Fail all buffered writes that are left. See
* #308 for more details.
*/
public void afterRemove(ChannelHandlerContext ctx) throws Exception {
Throwable cause = null;
for (;;) {
MessageEvent ev = queue.poll();
if (ev == null) {
break;
}
if (cause == null) {
cause = new IOException("Unable to flush message");
}
ev.getFuture().setFailure(cause);
}
if (cause != null) {
Channels.fireExceptionCaughtLater(ctx.getChannel(), cause);
}
}
}