All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.mentaqueue.test.owt.ConcurrentLinkedQueueTest Maven / Gradle / Ivy

The newest version!
package org.mentaqueue.test.owt;

import java.util.Random;

import org.mentaaffinity.Affinity;
import org.mentaqueue.AtomicQueue;
import org.mentaqueue.ConcurrentLinkedQueue;
import org.mentaqueue.util.Builder;
import org.mentaqueue.util.DetailedBenchmarker;
import org.mentaqueue.util.PauseSupport;
import org.mentaqueue.wait.SpinWaitStrategy;
import org.mentaqueue.wait.WaitStrategy;
import org.tsutils.TSUtils;
import org.tsutils.Timestamper;

/**
 * Results with this machine => http://mentablog.soliveirajr.com/lab/
 * 
 * More info here => http://mentablog.soliveirajr.com/2012/11/inter-thread-communication-with-2-digit-nanosecond-latency/
 * 
 * Producing and consumer pinned to the same core with hyperthreading.
 * 
 * java -server -Xms1g -Xmx4g -XX:NewSize=512m -XX:MaxNewSize=1024m -cp target/classes/:../MentaAffinity/target/mentaaffinity.jar:../MentaLog/target/mentalog.jar:../MentaAffinity/lib/jna-3.5.1.jar:../TSUtils/target/tsutils.jar org.mentaqueue.test.owt.ConcurrentLinkedQueueTest 100000000 100000000 0
 * 
 * Iterations: 100,000,000 | Avg Time: 90.35 nanos | Min Time: 54 nanos | Max Time: 7731049 nanos | 75%: avg=84 max=100 | 90%: avg=87 max=108 | 99%: avg=89 max=122 | 99.9%: avg=90 max=135 | 99.99%: avg=90 max=383 | 99.999%: avg=90 max=2023
 * 
 * @author Sergio Oliveira Jr.
 */
public class ConcurrentLinkedQueueTest {
	
	private static final int QUEUE_SIZE = 1024;
	private static final Random RANDOM = new Random();
	
	public static void main(String[] args) {
		
		final long messagesToWarmup = Long.parseLong(args[0]);
		final long messagesToTest = Long.parseLong(args[1]);
		final int delayBetweenMessages = Integer.parseInt(args[2]);
		
		final DetailedBenchmarker bench = new DetailedBenchmarker();
		
		final ConcurrentLinkedQueue aToB = new ConcurrentLinkedQueue(MutableLong.BUILDER);
		final AtomicQueue bToA = new AtomicQueue(QUEUE_SIZE, MutableLong.BUILDER);
		
		final WaitStrategy producerWaitStrategy = new SpinWaitStrategy();
		final WaitStrategy consumerWaitStrategy = new SpinWaitStrategy();
		
		final Timestamper timestamper = TSUtils.getTimestamper();
		
		Thread producer = new Thread(new Runnable() {
			
			private final void send(long ts) {
				MutableLong ml = aToB.nextToDispatch();
				ml.set(ts);
				aToB.flush(); // no lazySet, send immediately
			}

			@Override
			public void run() {
				
				Affinity.bind();
				
				// first warmup...
				
				send(0); // send the first one to start!
				
				long count = 0;
				
				long total = messagesToWarmup + messagesToTest;
				
				while(count < total) {
					
					// receive echo to send the next message...
					long avail = bToA.availableToPoll();
					if (avail > 0) {
						bToA.poll();
						count++;
						bToA.donePolling(true); // can be lazy here because queue is not full...
						producerWaitStrategy.reset();
						
						long ts = timestamper.nanoTime(); // warm up this as well!
						
						if (count < messagesToWarmup) {
							send(0);
						} else if (count < total) {
							if (delayBetweenMessages == 0) {
								// No pause!
								send(ts);
							} else if (delayBetweenMessages < 0) {
								PauseSupport.random(RANDOM.nextInt(-1 * delayBetweenMessages));
								send(timestamper.nanoTime()); // don't include the pause of course
							} else {
								PauseSupport.random(delayBetweenMessages);
								send(timestamper.nanoTime()); // don't include the pause of course
							}
						}
					} else {
						producerWaitStrategy.waitForOtherThread();
					}
				}
				
				Affinity.unbind();
				
				// DONE!
				System.out.println(bench.results());
				
			}
		}, "Thread-Producer");
		
		Thread consumer = new Thread(new Runnable() {

			@Override
			public void run() {
				
				Affinity.bind();
				
				while (true) {
					
					// time and echo back...
					
					long avail = aToB.availableToPoll();
					if (avail > 0) {
						MutableLong ml = aToB.poll();
						long ts = ml.get();
						aToB.donePolling(true); // can be lazy because queue will never be full...
						
						long end = timestamper.nanoTime(); // warm up this (even if not using it)
						
						if (ts > 0) {
							bench.measure(end - ts);
						}
						
						consumerWaitStrategy.reset();
						
						// echo back
						MutableLong back = bToA.nextToDispatch();
						back.set(ts);
						bToA.flush(); // send immediately so the producer gets the echo...
					} else {
						consumerWaitStrategy.waitForOtherThread();
					}
				}
			}
		}, "Thread-Consumer");
		
		if (Affinity.isAvailable()) {
			Affinity.assignToProcessor(2, producer);
			Affinity.assignToProcessor(6, consumer);
		} else {
			System.err.println("Thread affinity not available!");
		}
		
		producer.setDaemon(false);
		consumer.setDaemon(true);

		consumer.start();
		try { Thread.sleep(1); } catch(Exception e) { }
		producer.start();

	}
		
	private static class MutableLong {
		
		private long value = 0L;

		public MutableLong(long value) {
			this.value = value;
		}

		public final long get() {
			return value;
		}

		public final void set(long value) {
			this.value = value;
		}
		
		@Override
		public String toString() {
			return String.valueOf(value);
		}
		
		public final static Builder BUILDER = new Builder() {
			@Override
	        public MutableLong newInstance() {
		        return new MutableLong(-1);
	        }
		};
	}
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy