org.osgi.util.pushstream.AbstractPushStreamImpl Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of org.osgi.util.pushstream Show documentation
Show all versions of org.osgi.util.pushstream Show documentation
OSGi Companion Code for org.osgi.util.pushstream Version 1.0.1
/*
* Copyright (c) OSGi Alliance (2015, 2018). All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.osgi.util.pushstream;
import static java.util.Collections.emptyList;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import static org.osgi.util.pushstream.AbstractPushStreamImpl.State.*;
import static org.osgi.util.pushstream.PushEventConsumer.*;
import java.time.Duration;
import java.util.AbstractQueue;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
import java.util.function.IntFunction;
import java.util.function.IntSupplier;
import java.util.function.LongUnaryOperator;
import java.util.function.Supplier;
import java.util.function.ToLongBiFunction;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import org.osgi.util.function.Function;
import org.osgi.util.function.Predicate;
import org.osgi.util.promise.Deferred;
import org.osgi.util.promise.Promise;
import org.osgi.util.promise.PromiseFactory;
import org.osgi.util.promise.TimeoutException;
import org.osgi.util.pushstream.PushEvent.EventType;
abstract class AbstractPushStreamImpl implements PushStream {
private final Function IDENTITY = x -> x;
static enum State {
BUILDING, STARTED, CLOSED
}
protected final PushStreamProvider psp;
protected final PromiseFactory promiseFactory;
protected final AtomicReference closed = new AtomicReference<>(BUILDING);
protected final AtomicReference> next = new AtomicReference<>();
protected final AtomicReference onCloseCallback = new AtomicReference<>();
protected final AtomicReference> onErrorCallback = new AtomicReference<>();
protected abstract boolean begin();
protected abstract void upstreamClose(PushEvent< ? > close);
AbstractPushStreamImpl(PushStreamProvider psp,
PromiseFactory promiseFactory) {
this.psp = psp;
this.promiseFactory = promiseFactory;
}
protected long handleEvent(PushEvent< ? extends T> event) {
if(closed.get() != CLOSED) {
try {
if(event.isTerminal()) {
close(event.nodata());
return ABORT;
} else {
PushEventConsumer consumer = next.get();
long val;
if(consumer == null) {
//TODO log a warning
val = CONTINUE;
} else {
val = consumer.accept(event);
}
if(val < 0) {
close();
}
return val;
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
}
return ABORT;
}
@Override
public void close() {
PushEvent close = PushEvent.close();
if (close(close, true)) {
upstreamClose(close);
}
}
protected boolean close(PushEvent event) {
return close(event, true);
}
protected boolean close(PushEvent event, boolean sendDownStreamEvent) {
if(!event.isTerminal()) {
throw new IllegalArgumentException("The event " + event + " is not a close event.");
}
if(closed.getAndSet(CLOSED) != CLOSED) {
PushEventConsumer aec = next.getAndSet(null);
if (sendDownStreamEvent && aec != null) {
try {
aec.accept(event);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
Runnable handler = onCloseCallback.getAndSet(null);
if(handler != null) {
try {
handler.run();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
if (event.getType() == EventType.ERROR) {
Consumer super Throwable> errorHandler = onErrorCallback.getAndSet(null);
if(errorHandler != null) {
try {
errorHandler.accept(event.getFailure());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
return true;
}
return false;
}
@Override
public PushStream onClose(Runnable closeHandler) {
if(onCloseCallback.compareAndSet(null, closeHandler)) {
if(closed.get() == State.CLOSED && onCloseCallback.compareAndSet(closeHandler, null)) {
closeHandler.run();
}
} else {
throw new IllegalStateException("A close handler has already been defined for this stream object");
}
return this;
}
@Override
public PushStream onError(Consumer< ? super Throwable> closeHandler) {
if(onErrorCallback.compareAndSet(null, closeHandler)) {
if(closed.get() == State.CLOSED) {
//TODO log already closed
onErrorCallback.set(null);
}
} else {
throw new IllegalStateException("A close handler has already been defined for this stream object");
}
return this;
}
private void updateNext(PushEventConsumer consumer) {
if(!next.compareAndSet(null, consumer)) {
throw new IllegalStateException("This stream has already been chained");
} else if(closed.get() == CLOSED && next.compareAndSet(consumer, null)) {
try {
consumer.accept(PushEvent.close());
} catch (Exception e) {
//TODO log
e.printStackTrace();
}
}
}
@Override
public PushStream filter(Predicate< ? super T> predicate) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
updateNext((event) -> {
try {
if (!event.isTerminal()) {
if (predicate.test(event.getData())) {
return eventStream.handleEvent(event);
} else {
return CONTINUE;
}
}
return eventStream.handleEvent(event);
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream map(Function< ? super T, ? extends R> mapper) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
updateNext(event -> {
try {
if (!event.isTerminal()) {
return eventStream.handleEvent(
PushEvent.data(mapper.apply(event.getData())));
} else {
return eventStream.handleEvent(event.nodata());
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream asyncMap(int n, int delay,
Function< ? super T,Promise< ? extends R>> mapper) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
Semaphore s = new Semaphore(n);
updateNext(event -> {
try {
if (event.isTerminal()) {
s.acquire(n);
eventStream.close(event.nodata());
return ABORT;
}
s.acquire(1);
Promise< ? extends R> p = mapper.apply(event.getData());
p.thenAccept(d -> promiseFactory.executor().execute(() -> {
try {
if (eventStream
.handleEvent(PushEvent.data(d)) < 0) {
PushEvent close = PushEvent.close();
eventStream.close(close);
// Upstream close is needed as we have no direct
// backpressure
upstreamClose(close);
}
} finally {
s.release();
}
})).onFailure(t -> promiseFactory.executor().execute(() -> {
PushEvent error = PushEvent.error(t);
close(error);
// Upstream close is needed as we have no direct
// backpressure
upstreamClose(error);
}));
// The number active before was one less than the active number
int activePromises = Math.max(0, n - s.availablePermits() - 1);
return (activePromises + s.getQueueLength()) * delay;
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream flatMap(
Function< ? super T, ? extends PushStream< ? extends R>> mapper) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
PushEventConsumer consumer = e -> {
switch (e.getType()) {
case ERROR :
close(e.nodata());
return ABORT;
case CLOSE :
// Close should allow the next flat mapped entry
// without closing the stream;
return ABORT;
case DATA :
long returnValue = eventStream.handleEvent(e);
if (returnValue < 0) {
close();
return ABORT;
}
return returnValue;
default :
throw new IllegalArgumentException(
"The event type " + e.getType() + " is unknown");
}
};
updateNext(event -> {
try {
if (!event.isTerminal()) {
PushStream< ? extends R> mappedStream = mapper
.apply(event.getData());
return mappedStream.forEachEvent(consumer)
.getValue()
.longValue();
} else {
return eventStream.handleEvent(event.nodata());
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream distinct() {
Set set = Collections.newSetFromMap(new ConcurrentHashMap<>());
return filter(set::add);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public PushStream sorted() {
return sorted((Comparator)Comparator.naturalOrder());
}
@Override
public PushStream sorted(Comparator< ? super T> comparator) {
List list = Collections.synchronizedList(new ArrayList<>());
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
updateNext(event -> {
try {
switch(event.getType()) {
case DATA :
list.add(event.getData());
return CONTINUE;
case CLOSE :
list.sort(comparator);
sorted: for (T t : list) {
if (eventStream
.handleEvent(PushEvent.data(t)) < 0) {
break sorted;
}
}
// Fall through
case ERROR :
eventStream.handleEvent(event);
return ABORT;
}
return eventStream.handleEvent(event.nodata());
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream limit(long maxSize) {
if(maxSize <= 0) {
throw new IllegalArgumentException("The limit must be greater than zero");
}
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
AtomicLong counter = new AtomicLong(maxSize);
updateNext(event -> {
try {
if (!event.isTerminal()) {
long count = counter.decrementAndGet();
if (count > 0) {
return eventStream.handleEvent(event);
} else if (count == 0) {
eventStream.handleEvent(event);
}
return ABORT;
} else {
return eventStream.handleEvent(event.nodata());
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream limit(Duration maxTime) {
Runnable start = () -> promiseFactory.scheduledExecutor().schedule(
() -> close(),
maxTime.toNanos(), NANOSECONDS);
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl(
psp, promiseFactory, this) {
@Override
protected void beginning() {
start.run();
}
};
updateNext((event) -> {
try {
return eventStream.handleEvent(event);
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream timeout(Duration maxTime) {
AtomicLong lastTime = new AtomicLong();
long timeout = maxTime.toNanos();
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl(
psp, promiseFactory, this) {
@Override
protected void beginning() {
lastTime.set(System.nanoTime());
promiseFactory.scheduledExecutor().schedule(
() -> check(lastTime, timeout), timeout,
NANOSECONDS);
}
};
updateNext((event) -> {
try {
return eventStream.handleEvent(event);
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
void check(AtomicLong lastTime, long timeout) {
long now = System.nanoTime();
long elapsed = now - lastTime.get();
if (elapsed < timeout) {
promiseFactory.scheduledExecutor().schedule(
() -> check(lastTime, timeout),
timeout - elapsed, NANOSECONDS);
} else {
PushEvent error = PushEvent.error(new TimeoutException());
close(error);
// Upstream close is needed as we have no direct backpressure
upstreamClose(error);
}
}
@Override
public PushStream skip(long n) {
if (n < 0) {
throw new IllegalArgumentException(
"The number to skip must be greater than or equal to zero");
}
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
AtomicLong counter = new AtomicLong(n);
updateNext(event -> {
try {
if (!event.isTerminal()) {
if (counter.get() > 0 && counter.decrementAndGet() >= 0) {
return CONTINUE;
} else {
return eventStream.handleEvent(event);
}
} else {
return eventStream.handleEvent(event.nodata());
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream fork(int n, int delay, Executor ex) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, new PromiseFactory(Objects.requireNonNull(ex),
promiseFactory.scheduledExecutor()),
this);
Semaphore s = new Semaphore(n);
updateNext(event -> {
try {
if (event.isTerminal()) {
s.acquire(n);
eventStream.close(event.nodata());
return ABORT;
}
s.acquire(1);
ex.execute(() -> {
try {
if (eventStream.handleEvent(event) < 0) {
PushEvent close = PushEvent.close();
eventStream.close(close);
// Upstream close is needed as we have no direct
// backpressure
upstreamClose(close);
}
} catch (Exception e1) {
PushEvent error = PushEvent.error(e1);
close(error);
// Upstream close is needed as we have no direct
// backpressure
upstreamClose(error);
} finally {
s.release(1);
}
});
return s.getQueueLength() * delay;
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream buffer() {
return psp.createStream(c -> {
forEachEvent(c);
return this;
});
}
@Override
public >> PushStreamBuilder buildBuffer() {
return psp.buildStream(c -> {
forEachEvent(c);
return this;
});
}
@Override
public PushStream merge(
PushEventSource< ? extends T> source) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
AtomicInteger count = new AtomicInteger(2);
PushEventConsumer consumer = event -> {
try {
if (!event.isTerminal()) {
return eventStream.handleEvent(event);
}
if (count.decrementAndGet() == 0) {
eventStream.handleEvent(event.nodata());
return ABORT;
}
return CONTINUE;
} catch (Exception e) {
PushEvent error = PushEvent.error(e);
close(error);
eventStream.close(event.nodata());
return ABORT;
}
};
updateNext(consumer);
AutoCloseable second;
try {
second = source.open((PushEvent< ? extends T> event) -> {
return consumer.accept(event);
});
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
throw new IllegalStateException(
"Unable to merge events as the event source could not be opened.",
e);
}
return eventStream.onClose(() -> {
try {
second.close();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}).map(IDENTITY);
}
@Override
public PushStream merge(PushStream< ? extends T> source) {
AtomicInteger count = new AtomicInteger(2);
Consumer> start = downstream -> {
PushEventConsumer consumer = e -> {
long toReturn;
try {
if (!e.isTerminal()) {
toReturn = downstream.handleEvent(e);
} else if (count.decrementAndGet() == 0) {
downstream.handleEvent(e);
toReturn = ABORT;
} else {
return ABORT;
}
} catch (Exception ex) {
try {
downstream.handleEvent(PushEvent.error(ex));
} catch (Exception ex2) { /* Just ignore this */}
toReturn = ABORT;
}
if (toReturn < 0) {
try {
close();
} catch (Exception ex2) { /* Just ignore this */}
try {
source.close();
} catch (Exception ex2) { /* Just ignore this */}
}
return toReturn;
};
forEachEvent(consumer);
source.forEachEvent(consumer);
};
@SuppressWarnings("resource")
AbstractPushStreamImpl eventStream = new AbstractPushStreamImpl(
psp, promiseFactory) {
@Override
protected boolean begin() {
if (closed.compareAndSet(BUILDING, STARTED)) {
start.accept(this);
return true;
}
return false;
}
@Override
protected void upstreamClose(PushEvent< ? > close) {
AbstractPushStreamImpl.this.upstreamClose(close);
source.close();
}
};
return eventStream.onClose(() -> {
try {
close();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
try {
source.close();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}).map(IDENTITY);
}
@SuppressWarnings("unchecked")
@Override
public PushStream[] split(Predicate< ? super T>... predicates) {
Predicate super T>[] tests = Arrays.copyOf(predicates, predicates.length);
AbstractPushStreamImpl[] rsult = new AbstractPushStreamImpl[tests.length];
for(int i = 0; i < tests.length; i++) {
rsult[i] = new IntermediatePushStreamImpl<>(psp, promiseFactory, this);
}
Boolean[] array = new Boolean[tests.length];
Arrays.fill(array, Boolean.TRUE);
AtomicReferenceArray off = new AtomicReferenceArray<>(array);
AtomicInteger count = new AtomicInteger(tests.length);
updateNext(event -> {
if (!event.isTerminal()) {
long delay = CONTINUE;
for (int i = 0; i < tests.length; i++) {
try {
if (off.get(i).booleanValue()
&& tests[i].test(event.getData())) {
long accept = rsult[i].handleEvent(event);
if (accept < 0) {
off.set(i, Boolean.TRUE);
count.decrementAndGet();
} else if (accept > delay) {
accept = delay;
}
}
} catch (Exception e) {
try {
rsult[i].close(PushEvent.error(e));
} catch (Exception e2) {
//TODO log
}
off.set(i, Boolean.TRUE);
}
}
if (count.get() == 0)
return ABORT;
return delay;
}
for (AbstractPushStreamImpl as : rsult) {
try {
as.handleEvent(event.nodata());
} catch (Exception e) {
try {
as.close(PushEvent.error(e));
} catch (Exception e2) {
//TODO log
}
}
}
return ABORT;
});
return Arrays.copyOf(rsult, tests.length);
}
@Override
public PushStream sequential() {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
Lock lock = new ReentrantLock();
updateNext((event) -> {
try {
lock.lock();
try {
return eventStream.handleEvent(event);
} finally {
lock.unlock();
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream coalesce(
Function< ? super T,Optional> accumulator) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
updateNext((event) -> {
try {
if (!event.isTerminal()) {
Optional> coalesced = accumulator
.apply(event.getData()).map(PushEvent::data);
if (coalesced.isPresent()) {
try {
return eventStream.handleEvent(coalesced.get());
} catch (Exception ex) {
close(PushEvent.error(ex));
return ABORT;
}
} else {
return CONTINUE;
}
}
return eventStream.handleEvent(event.nodata());
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream coalesce(int count, Function,R> f) {
if (count <= 0)
throw new IllegalArgumentException(
"A coalesce operation must collect a positive number of events");
// This could be optimised to only use a single collection queue.
// It would save some GC, but is it worth it?
return coalesce(() -> count, f);
}
@Override
public PushStream coalesce(IntSupplier count,
Function,R> f) {
AtomicReference> queueRef = new AtomicReference>(
null);
Runnable init = () -> queueRef
.set(getQueueForInternalBuffering(count.getAsInt()));
@SuppressWarnings("resource")
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl(
psp, promiseFactory, this) {
@Override
protected void beginning() {
init.run();
}
};
AtomicBoolean endPending = new AtomicBoolean();
Object lock = new Object();
updateNext((event) -> {
try {
Queue queue;
if (!event.isTerminal()) {
synchronized (lock) {
for (;;) {
queue = queueRef.get();
if (queue == null) {
if (endPending.get()) {
return ABORT;
} else {
continue;
}
} else if (queue.offer(event.getData())) {
return CONTINUE;
} else {
queueRef.lazySet(null);
break;
}
}
}
queueRef.set(
getQueueForInternalBuffering(count.getAsInt()));
// This call is on the same thread and so must happen
// outside
// the synchronized block.
return aggregateAndForward(f, eventStream, event,
queue);
} else {
synchronized (lock) {
queue = queueRef.get();
queueRef.lazySet(null);
endPending.set(true);
}
if (queue != null) {
eventStream.handleEvent(
PushEvent.data(f.apply(queue)));
}
}
return eventStream.handleEvent(event.nodata());
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
private long aggregateAndForward(Function,R> f,
AbstractPushStreamImpl eventStream,
PushEvent< ? extends T> event, Queue queue) throws Exception {
if (!queue.offer(event.getData())) {
((ArrayQueue) queue).forcePush(event.getData());
}
return eventStream.handleEvent(PushEvent.data(f.apply(queue)));
}
@Override
public PushStream window(Duration time,
Function,R> f) {
return window(time, promiseFactory.executor(), f);
}
@Override
public PushStream window(Duration time, Executor executor,
Function,R> f) {
return window(() -> time, () -> 0, executor, (t, c) -> {
try {
return f.apply(c);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
@Override
public PushStream window(Supplier time,
IntSupplier maxEvents,
BiFunction,R> f) {
return window(time, maxEvents, promiseFactory.executor(), f);
}
@Override
public PushStream window(Supplier time,
IntSupplier maxEvents, Executor ex,
BiFunction,R> f) {
AtomicLong timestamp = new AtomicLong();
AtomicLong previousWindowSize = new AtomicLong();
AtomicLong counter = new AtomicLong();
Object lock = new Object();
AtomicReference> queueRef = new AtomicReference>(
null);
// This code is declared as a separate block to avoid any confusion
// about which instance's methods and variables are in scope
Consumer> begin = p -> {
synchronized (lock) {
timestamp.lazySet(System.nanoTime());
long count = counter.get();
long windowSize = time.get().toNanos();
previousWindowSize.set(windowSize);
promiseFactory.scheduledExecutor().schedule(
getWindowTask(p, f, time, maxEvents, lock, count,
queueRef, timestamp, counter,
previousWindowSize, ex),
windowSize, NANOSECONDS);
}
queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt()));
};
@SuppressWarnings("resource")
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl(
psp, new PromiseFactory(Objects.requireNonNull(ex),
promiseFactory.scheduledExecutor()),
this) {
@Override
protected void beginning() {
begin.accept(this);
}
};
AtomicBoolean endPending = new AtomicBoolean(false);
updateNext((event) -> {
try {
if (eventStream.closed.get() == CLOSED) {
return ABORT;
}
Queue queue;
if (!event.isTerminal()) {
long elapsed;
long newCount;
synchronized (lock) {
for (;;) {
queue = queueRef.get();
if (queue == null) {
if (endPending.get()) {
return ABORT;
} else {
continue;
}
} else if (queue.offer(event.getData())) {
return CONTINUE;
} else {
queueRef.lazySet(null);
break;
}
}
long now = System.nanoTime();
elapsed = now - timestamp.get();
timestamp.lazySet(now);
newCount = counter.get() + 1;
counter.lazySet(newCount);
// This is a non-blocking call, and must happen in the
// synchronized block to avoid re=ordering the executor
// enqueue with a subsequent incoming close operation
aggregateAndForward(f, eventStream, event, queue,
ex, elapsed);
}
// These must happen outside the synchronized block as we
// call out to user code
queueRef.set(
getQueueForInternalBuffering(maxEvents.getAsInt()));
long nextWindow = time.get().toNanos();
long backpressure = previousWindowSize.getAndSet(nextWindow)
- elapsed;
promiseFactory.scheduledExecutor().schedule(
getWindowTask(eventStream, f, time, maxEvents, lock,
newCount, queueRef, timestamp, counter,
previousWindowSize, ex),
nextWindow, NANOSECONDS);
return backpressure < 0 ? CONTINUE
: NANOSECONDS.toMillis(backpressure);
} else {
long elapsed;
synchronized (lock) {
queue = queueRef.get();
queueRef.lazySet(null);
endPending.lazySet(true);
long now = System.nanoTime();
elapsed = now - timestamp.get();
counter.lazySet(counter.get() + 1);
}
Collection collected = queue == null ? emptyList()
: queue;
ex.execute(() -> {
try {
eventStream
.handleEvent(PushEvent.data(f.apply(
Long.valueOf(NANOSECONDS
.toMillis(elapsed)),
collected)));
} catch (Exception e) {
PushEvent error = PushEvent.error(e);
close(error);
// Upstream close is needed as we have no direct
// backpressure
upstreamClose(error);
}
// It's now time to deliver the terminal event
eventStream.handleEvent(event.nodata());
});
}
return ABORT;
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
protected Queue getQueueForInternalBuffering(int size) {
if (size == 0) {
return new LinkedList();
} else {
return new ArrayQueue<>(size - 1);
}
}
@SuppressWarnings("unchecked")
/**
* A special queue that keeps one element in reserve and can have that last
* element set using forcePush. After the element is set the capacity is
* permanently increased by one and cannot grow further.
*
* @param The element type
*/
private static class ArrayQueue extends AbstractQueue
implements Queue {
final Object[] store;
int normalLength;
int nextIndex;
int size;
ArrayQueue(int capacity) {
store = new Object[capacity + 1];
normalLength = store.length - 1;
}
@Override
public boolean offer(E e) {
if (e == null)
throw new NullPointerException("Null values are not supported");
if (size < normalLength) {
store[nextIndex] = e;
size++;
nextIndex++;
nextIndex = nextIndex % normalLength;
return true;
}
return false;
}
public void forcePush(E e) {
store[normalLength] = e;
normalLength++;
size++;
}
@Override
public E poll() {
if (size == 0) {
return null;
} else {
int idx = nextIndex - size;
if (idx < 0) {
idx += normalLength;
}
E value = (E) store[idx];
store[idx] = null;
size--;
return value;
}
}
@Override
public E peek() {
if (size == 0) {
return null;
} else {
int idx = nextIndex - size;
if (idx < 0) {
idx += normalLength;
}
return (E) store[idx];
}
}
@Override
public Iterator iterator() {
final int previousNext = nextIndex;
return new Iterator() {
int idx;
int remaining = size;
{
idx = nextIndex - size;
if (idx < 0) {
idx += normalLength;
}
}
@Override
public boolean hasNext() {
if (nextIndex != previousNext) {
throw new ConcurrentModificationException(
"The queue was concurrently modified");
}
return remaining > 0;
}
@Override
public E next() {
if (!hasNext()) {
throw new NoSuchElementException(
"The iterator has no more values");
}
E value = (E) store[idx];
idx++;
remaining--;
if (idx == normalLength) {
idx = 0;
}
return value;
}
};
}
@Override
public int size() {
return size;
}
}
private Runnable getWindowTask(AbstractPushStreamImpl eventStream,
BiFunction,R> f, Supplier time,
IntSupplier maxEvents, Object lock, long expectedCounter,
AtomicReference> queueRef, AtomicLong timestamp,
AtomicLong counter, AtomicLong previousWindowSize,
Executor executor) {
return () -> {
Queue queue = null;
long elapsed;
synchronized (lock) {
if (counter.get() != expectedCounter) {
return;
}
counter.lazySet(expectedCounter + 1);
long now = System.nanoTime();
elapsed = now - timestamp.get();
timestamp.lazySet(now);
queue = queueRef.get();
queueRef.lazySet(null);
// This is a non-blocking call, and must happen in the
// synchronized block to avoid re=ordering the executor
// enqueue with a subsequent incoming close operation
Collection collected = queue == null ? emptyList() : queue;
executor.execute(() -> {
try {
eventStream.handleEvent(PushEvent.data(f.apply(
Long.valueOf(NANOSECONDS.toMillis(elapsed)),
collected)));
} catch (Exception e) {
PushEvent error = PushEvent.error(e);
close(error);
// Upstream close is needed as we have no direct
// backpressure
upstreamClose(error);
}
});
}
// These must happen outside the synchronized block as we
// call out to user code
long nextWindow = time.get().toNanos();
previousWindowSize.set(nextWindow);
queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt()));
promiseFactory.scheduledExecutor().schedule(
getWindowTask(eventStream, f, time, maxEvents, lock,
expectedCounter + 1, queueRef, timestamp, counter,
previousWindowSize, executor),
nextWindow, NANOSECONDS);
};
}
private void aggregateAndForward(BiFunction,R> f,
AbstractPushStreamImpl eventStream,
PushEvent< ? extends T> event, Queue queue, Executor executor,
long elapsed) {
executor.execute(() -> {
try {
if (!queue.offer(event.getData())) {
((ArrayQueue) queue).forcePush(event.getData());
}
long result = eventStream.handleEvent(PushEvent.data(
f.apply(Long.valueOf(NANOSECONDS.toMillis(elapsed)),
queue)));
if (result < 0) {
close();
}
} catch (Exception e) {
close(PushEvent.error(e));
}
});
}
@Override
public PushStream adjustBackPressure(LongUnaryOperator adjustment) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
updateNext(event -> {
try {
long bp = eventStream.handleEvent(event);
if (event.isTerminal()) {
return ABORT;
} else {
return bp < 0 ? bp : adjustment.applyAsLong(bp);
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public PushStream adjustBackPressure(
ToLongBiFunction adjustment) {
AbstractPushStreamImpl eventStream = new IntermediatePushStreamImpl<>(
psp, promiseFactory, this);
updateNext(event -> {
try {
long bp = eventStream.handleEvent(event);
if (event.isTerminal()) {
return ABORT;
} else {
return bp < 0 ? bp
: adjustment.applyAsLong(event.getData(),
Long.valueOf(bp));
}
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
@Override
public Promise forEach(Consumer< ? super T> action) {
Deferred d = promiseFactory.deferred();
updateNext((event) -> {
try {
switch(event.getType()) {
case DATA:
action.accept(event.getData());
return CONTINUE;
case CLOSE:
d.resolve(null);
break;
case ERROR:
d.fail(event.getFailure());
break;
}
close(event.nodata());
return ABORT;
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
begin();
return d.getPromise();
}
@Override
public Promise
© 2015 - 2025 Weber Informatics LLC | Privacy Policy