Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (C) 2015 SoftIndex LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.datakernel.aggregation;
import io.datakernel.aggregation.ot.AggregationStructure;
import io.datakernel.aggregation.util.PartitionPredicate;
import io.datakernel.async.AsyncCollector;
import io.datakernel.async.Promise;
import io.datakernel.async.SettablePromise;
import io.datakernel.codegen.DefiningClassLoader;
import io.datakernel.stream.*;
import java.util.ArrayList;
import java.util.List;
public final class AggregationChunker extends ForwardingStreamConsumer {
private final StreamConsumerSwitcher switcher;
private final SettablePromise> result = new SettablePromise<>();
private final AggregationStructure aggregation;
private final List fields;
private final Class recordClass;
private final PartitionPredicate partitionPredicate;
private final AggregationChunkStorage storage;
private final AsyncCollector extends List> chunksCollector;
private final DefiningClassLoader classLoader;
private final int chunkSize;
private AggregationChunker(StreamConsumerSwitcher switcher,
AggregationStructure aggregation, List fields,
Class recordClass, PartitionPredicate partitionPredicate,
AggregationChunkStorage storage,
DefiningClassLoader classLoader,
int chunkSize) {
super(switcher);
this.switcher = switcher;
this.aggregation = aggregation;
this.fields = fields;
this.recordClass = recordClass;
this.partitionPredicate = partitionPredicate;
this.storage = storage;
this.classLoader = classLoader;
(this.chunksCollector = AsyncCollector.create(new ArrayList<>()))
.run(switcher.getAcknowledgement());
this.chunkSize = chunkSize;
chunksCollector.get().whenComplete(result::trySet);
getAcknowledgement().whenException(result::trySetException);
}
public static AggregationChunker create(AggregationStructure aggregation, List fields,
Class recordClass, PartitionPredicate partitionPredicate,
AggregationChunkStorage storage,
DefiningClassLoader classLoader,
int chunkSize) {
StreamConsumerSwitcher switcher = StreamConsumerSwitcher.create();
AggregationChunker chunker = new AggregationChunker<>(switcher, aggregation, fields, recordClass, partitionPredicate, storage, classLoader, chunkSize);
chunker.startNewChunk();
return chunker;
}
public Promise> getResult() {
return result;
}
private class ChunkWriter extends ForwardingStreamConsumer implements StreamDataAcceptor {
private final SettablePromise result = new SettablePromise<>();
private final int chunkSize;
private final PartitionPredicate partitionPredicate;
private StreamDataAcceptor dataAcceptor;
private T first;
private T last;
private int count;
boolean switched;
public ChunkWriter(StreamConsumer actualConsumer,
C chunkId, int chunkSize, PartitionPredicate partitionPredicate) {
super(actualConsumer);
this.chunkSize = chunkSize;
this.partitionPredicate = partitionPredicate;
actualConsumer.getAcknowledgement()
.map($ -> count == 0 ?
null :
AggregationChunk.create(chunkId,
fields,
PrimaryKey.ofObject(first, aggregation.getKeys()),
PrimaryKey.ofObject(last, aggregation.getKeys()),
count))
.whenComplete(result::trySet);
getAcknowledgement().whenException(result::trySetException);
}
@Override
public void setSupplier(StreamSupplier supplier) {
super.setSupplier(new ForwardingStreamSupplier(supplier) {
@Override
public void resume(StreamDataAcceptor dataAcceptor) {
ChunkWriter.this.dataAcceptor = dataAcceptor;
super.resume(ChunkWriter.this);
}
});
}
@Override
public void accept(T item) {
if (first == null) {
first = item;
}
last = item;
dataAcceptor.accept(item);
if (++count == chunkSize || (partitionPredicate != null && !partitionPredicate.isSamePartition(last, item))) {
if (!switched) {
switched = true;
startNewChunk();
}
}
}
public Promise getResult() {
return result;
}
}
private void startNewChunk() {
StreamConsumer consumer = StreamConsumer.ofPromise(
storage.createId()
.then(chunkId -> storage.write(aggregation, fields, recordClass, chunkId, classLoader)
.map(streamConsumer -> {
ChunkWriter chunkWriter = new ChunkWriter(streamConsumer, chunkId, chunkSize, partitionPredicate);
chunksCollector.addPromise(
chunkWriter.getResult(),
(accumulator, newChunk) -> {
if (newChunk != null && newChunk.getCount() != 0) {
accumulator.add(newChunk);
}
});
return chunkWriter.withLateBinding();
})));
switcher.switchTo(consumer);
}
}