org.mentaqueue.test.owt.LatencyTest1 Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of menta-queue Show documentation
Show all versions of menta-queue Show documentation
A super fast inter-thread transfer queue.
The newest version!
package org.mentaqueue.test.owt;
import java.util.Random;
import org.mentaaffinity.Affinity;
import org.mentaqueue.AtomicQueue;
import org.mentaqueue.util.Builder;
import org.mentaqueue.util.DetailedBenchmarker;
import org.mentaqueue.util.PauseSupport;
import org.mentaqueue.wait.SpinWaitStrategy;
import org.mentaqueue.wait.WaitStrategy;
import org.tsutils.TSUtils;
import org.tsutils.Timestamper;
/**
* Results with this machine => http://mentablog.soliveirajr.com/lab/
*
* More info here => http://mentablog.soliveirajr.com/2012/11/inter-thread-communication-with-2-digit-nanosecond-latency/
*
* Producing and consumer pinned to the same core with hyperthreading.
*
* java -server -Xms1g -Xmx4g -XX:NewSize=512m -XX:MaxNewSize=1024m -cp target/classes/:../MentaAffinity/target/mentaaffinity.jar:../MentaLog/target/mentalog.jar:../MentaAffinity/lib/jna-3.5.1.jar:../TSUtils/target/tsutils.jar org.mentaqueue.test.owt.LatencyTest1 100000000 100000000 0
*
* Iterations: 100,000,000 | Avg Time: 50.79 nanos | Min Time: 32 nanos | Max Time: 59616 nanos | 75%: avg=49 max=53 | 90%: avg=50 max=56 | 99%: avg=50 max=59 | 99.9%: avg=50 max=63 | 99.99%: avg=50 max=143 | 99.999%: avg=50 max=1089
*
* @author Sergio Oliveira Jr.
*/
public class LatencyTest1 {
private static final int QUEUE_SIZE = 1024;
private static final Random RANDOM = new Random();
public static void main(String[] args) {
final long messagesToWarmup = Long.parseLong(args[0]);
final long messagesToTest = Long.parseLong(args[1]);
final int delayBetweenMessages = Integer.parseInt(args[2]);
final DetailedBenchmarker bench = new DetailedBenchmarker();
final AtomicQueue aToB = new AtomicQueue(QUEUE_SIZE, MutableLong.BUILDER);
final AtomicQueue bToA = new AtomicQueue(QUEUE_SIZE, MutableLong.BUILDER);
final WaitStrategy producerWaitStrategy = new SpinWaitStrategy();
final WaitStrategy consumerWaitStrategy = new SpinWaitStrategy();
final Timestamper timestamper = TSUtils.getTimestamper();
Thread producer = new Thread(new Runnable() {
private final void send(long ts) {
MutableLong ml = aToB.nextToDispatch();
ml.set(ts);
aToB.flush(); // no lazySet, send immediately
}
@Override
public void run() {
Affinity.bind();
// first warmup...
send(0); // send the first one to start!
long count = 0;
long total = messagesToWarmup + messagesToTest;
while(count < total) {
// receive echo to send the next message...
long avail = bToA.availableToPoll();
if (avail > 0) {
bToA.poll();
count++;
bToA.donePolling(true); // can be lazy here because queue is not full...
producerWaitStrategy.reset();
long ts = timestamper.nanoTime(); // warm up this as well!
if (count < messagesToWarmup) {
send(0);
} else if (count < total) {
if (delayBetweenMessages == 0) {
// No pause!
send(ts);
} else if (delayBetweenMessages < 0) {
PauseSupport.random(RANDOM.nextInt(-1 * delayBetweenMessages));
send(timestamper.nanoTime()); // don't include the pause of course
} else {
PauseSupport.random(delayBetweenMessages);
send(timestamper.nanoTime()); // don't include the pause of course
}
}
} else {
producerWaitStrategy.waitForOtherThread();
}
}
Affinity.unbind();
// DONE!
System.out.println(bench.results());
}
}, "Thread-Producer");
Thread consumer = new Thread(new Runnable() {
@Override
public void run() {
Affinity.bind();
while (true) {
// time and echo back...
long avail = aToB.availableToPoll();
if (avail > 0) {
MutableLong ml = aToB.poll();
long ts = ml.get();
aToB.donePolling(true); // can be lazy because queue will never be full...
long end = timestamper.nanoTime(); // warm up this (even if not using it)
if (ts > 0) {
bench.measure(end - ts);
}
consumerWaitStrategy.reset();
// echo back
MutableLong back = bToA.nextToDispatch();
back.set(ts);
bToA.flush(); // send immediately so the producer gets the echo...
} else {
consumerWaitStrategy.waitForOtherThread();
}
}
}
}, "Thread-Consumer");
if (Affinity.isAvailable()) {
Affinity.assignToProcessor(2, producer);
Affinity.assignToProcessor(6, consumer);
} else {
System.err.println("Thread affinity not available!");
}
producer.setDaemon(false);
consumer.setDaemon(true);
consumer.start();
try { Thread.sleep(1); } catch(Exception e) { }
producer.start();
}
private static class MutableLong {
private long value = 0L;
public MutableLong(long value) {
this.value = value;
}
public final long get() {
return value;
}
public final void set(long value) {
this.value = value;
}
@Override
public String toString() {
return String.valueOf(value);
}
public final static Builder BUILDER = new Builder() {
@Override
public MutableLong newInstance() {
return new MutableLong(-1);
}
};
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy