All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.mentaqueue.test.owt.TestOWT2 Maven / Gradle / Ivy

The newest version!
package org.mentaqueue.test.owt;

import java.util.concurrent.locks.LockSupport;

import org.mentaaffinity.Affinity;
import org.mentaqueue.AtomicQueue;
import org.mentaqueue.util.DetailedBenchmarker;
import org.mentaqueue.util.MutableLong;

/**
 * In this test we use two atomic queues to send a message to a thread through one queue and get it back through the other queue.
 * 
 * We only send the next message when the previous one comes back (i.e. not a throughput test).
 * 
 * Thread pinning was used.
 * 
 * java -Xms1g -Xmx4g -XX:NewSize=512m -XX:MaxNewSize=1024m -cp target/classes/:../MentaAffinity/target/mentaaffinity.jar:../MentaLog/target/mentalog.jar:../MentaAffinity/lib/jna-3.5.1.jar org.mentaqueue.test.rtt.TestRTT 100000000
 * 
 * The results:
 * 
 * Old machine: Xeon @ 2.53Ghz quad-core.
 * 
 * Iterations: 100,000,000 | Avg Time: 298.86 nanos | Min Time: 181 nanos | Max Time: 274173 nanos | 75%: avg=290 max=306 | 90%: avg=294 max=318 | 99%: avg=297 max=355 | 99.9%: avg=297 max=388 | 99.99%: avg=298 max=3266 | 99.999%: avg=298 max=5267
 * 
 * New machine without overcloking: http://mentablog.soliveirajr.com/lab/
 * 
 * Iterations: 100,000,000 | Avg Time: 139.71 nanos | Min Time: 96 nanos | Max Time: 48013 nanos | 75%: avg=131 max=152 | 90%: avg=136 max=163 | 99%: avg=138 max=182 | 99.9%: avg=139 max=199 | 99.99%: avg=139 max=549 | 99.999%: avg=139 max=8651
 * 
 * @author Sergio Oliveira Jr.
 */
public class TestOWT2 {
	
	private static final int BUFFER_SIZE = 1024;
	
	public static void main(String[] args) throws Exception {
		
		final int messages = Integer.parseInt(args[0]);
		
		final int delay = args.length > 2 ? Integer.parseInt(args[1]) : -1;
		
		final AtomicQueue aToB = new AtomicQueue(BUFFER_SIZE, MutableLong.BUILDER);
		final AtomicQueue bToA = new AtomicQueue(BUFFER_SIZE, MutableLong.BUILDER);
		
		final DetailedBenchmarker bench = new DetailedBenchmarker();
		
		Thread a = new Thread(new Runnable() {
			
			private final void send() {
				MutableLong ml;
				while((ml = aToB.nextToDispatch()) == null); // spin...
				ml.set(System.nanoTime());
				aToB.flush(false);
			}

			@Override
			public void run() {
				
				Affinity.bind();
				
				send();
				
				long count = 0;
				while (count < messages) {
					
					// receive...
					
					long avail = bToA.availableToPoll();
					if (avail == 1) {
						MutableLong ml = bToA.poll();
						if (ml != null) {
							count++;
						} else {
							throw new IllegalStateException("This should never happen!");
						}
						bToA.donePolling(true);
						if (count < messages) {
							if (delay > 0) {
								LockSupport.parkNanos(delay);
							}
							send();
						}
					}
				}
				
				Affinity.unbind();
				
				// DONE!
				System.out.println(bench.results());
				
			}
		}, "Thread-A");
		
		Thread b = new Thread(new Runnable() {

			@Override
			public void run() {
				
				Affinity.bind();
				
				long count = 0;
				while (count < messages) {
					
					// echo...
					
					long avail = aToB.availableToPoll();
					if (avail == 1) {
						MutableLong ml = aToB.poll();
						long ts;
						if (ml != null) {
							count++;
							ts = ml.get();
						} else {
							throw new IllegalStateException("This should never happen!");
						}
						aToB.donePolling(true);
						bench.measure(System.nanoTime() - ts);
						
						// echo back
						MutableLong back;
						while((back = bToA.nextToDispatch()) == null); // spin
						back.set(ts);
						bToA.flush(false);
					}
				}
				
				Affinity.unbind();
			}
		}, "Thread-B");
		
		Affinity.assignToProcessor(2, a);
		Affinity.assignToProcessor(3, b);

		b.start();
		Thread.sleep(1);
		a.start();

	}
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy