io.deephaven.engine.table.impl.by.CopyingPermutedBlinkFirstOrLastChunkedOperator Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of deephaven-engine-table Show documentation
Show all versions of deephaven-engine-table Show documentation
Engine Table: Implementation and closely-coupled utilities
/**
* Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
*/
package io.deephaven.engine.table.impl.by;
import io.deephaven.chunk.attributes.ChunkPositions;
import io.deephaven.chunk.attributes.Values;
import io.deephaven.engine.table.*;
import io.deephaven.engine.table.impl.MatchPair;
import io.deephaven.engine.table.impl.sort.permute.PermuteKernel;
import io.deephaven.engine.table.impl.sort.timsort.LongIntTimsortKernel;
import io.deephaven.engine.table.ChunkSink;
import io.deephaven.chunk.*;
import io.deephaven.engine.table.impl.util.ChunkUtils;
import io.deephaven.engine.rowset.RowSequence;
import io.deephaven.engine.rowset.RowSequenceFactory;
import io.deephaven.engine.rowset.chunkattributes.RowKeys;
import io.deephaven.util.SafeCloseableList;
import org.jetbrains.annotations.NotNull;
import java.util.Arrays;
/**
* Base-class for stream first/last-by chunked operators that need to copy data from source columns to result columns
* with a permutation on the redirected indices.
*/
public abstract class CopyingPermutedBlinkFirstOrLastChunkedOperator extends BaseBlinkFirstOrLastChunkedOperator {
/**
* Permute kernels, parallel to {@link #outputColumns}.
*/
protected final PermuteKernel[] permuteKernels;
public CopyingPermutedBlinkFirstOrLastChunkedOperator(
@NotNull final MatchPair[] resultPairs,
@NotNull final Table blinkTable) {
super(resultPairs, blinkTable);
permuteKernels = new PermuteKernel[numResultColumns];
for (int ci = 0; ci < numResultColumns; ++ci) {
permuteKernels[ci] = PermuteKernel.makePermuteKernel(outputColumns[ci].getChunkType());
}
}
@Override
public void ensureCapacity(final long tableSize) {
redirections.ensureCapacity(tableSize, true);
}
@Override
public final void startTrackingPrevValues() {
Arrays.stream(outputColumns).forEach(ColumnSource::startTrackingPrevValues);
}
/**
*
* For each destination slot, map to the latest source row key and copy source values to destination slots for all
* result columns.
*
*
* This implementation proceeds chunk-wise in the following manner:
*
* - Get a chunk of destination slots
* - Fill a chunk of source indices
* - Sort the chunk of source indices
* - For each input column: get a chunk of input values, permute it into a chunk of destination values, and then
* fill the output column
*
*
* @param destinations The changed (added or modified) destination slots as an {@link RowSequence}
*/
protected void copyStreamToResult(@NotNull final RowSequence destinations) {
try (final SafeCloseableList toClose = new SafeCloseableList()) {
final RowSequence.Iterator destinationsIterator = toClose.add(destinations.getRowSequenceIterator());
final ChunkSource.FillContext redirectionsContext =
toClose.add(redirections.makeFillContext(COPY_CHUNK_SIZE));
final WritableLongChunk sourceIndices =
toClose.add(WritableLongChunk.makeWritableChunk(COPY_CHUNK_SIZE));
final WritableIntChunk sourceIndicesOrder =
toClose.add(WritableIntChunk.makeWritableChunk(COPY_CHUNK_SIZE));
final LongIntTimsortKernel.LongIntSortKernelContext sortKernelContext =
toClose.add(LongIntTimsortKernel.createContext(COPY_CHUNK_SIZE));
final SharedContext inputSharedContext = toClose.add(SharedContext.makeSharedContext());
final ChunkSource.GetContext[] inputContexts =
toClose.addArray(new ChunkSource.GetContext[numResultColumns]);
final ChunkSink.FillFromContext[] outputContexts =
toClose.addArray(new ChunkSink.FillFromContext[numResultColumns]);
// noinspection unchecked
final WritableChunk[] outputChunks =
toClose.addArray(new WritableChunk[numResultColumns]);
for (int ci = 0; ci < numResultColumns; ++ci) {
inputContexts[ci] = inputColumns[ci].makeGetContext(COPY_CHUNK_SIZE, inputSharedContext);
final WritableColumnSource> outputColumn = outputColumns[ci];
outputContexts[ci] = outputColumn.makeFillFromContext(COPY_CHUNK_SIZE);
outputChunks[ci] = outputColumn.getChunkType().makeWritableChunk(COPY_CHUNK_SIZE);
outputColumn.ensureCapacity(destinations.lastRowKey() + 1, false);
}
while (destinationsIterator.hasMore()) {
final RowSequence sliceDestinations =
destinationsIterator.getNextRowSequenceWithLength(COPY_CHUNK_SIZE);
redirections.fillChunk(redirectionsContext, WritableLongChunk.upcast(sourceIndices), sliceDestinations);
sourceIndicesOrder.setSize(sourceIndices.size());
ChunkUtils.fillInOrder(sourceIndicesOrder);
LongIntTimsortKernel.sort(sortKernelContext, sourceIndicesOrder, sourceIndices);
try (final RowSequence sliceSources =
RowSequenceFactory.wrapRowKeysChunkAsRowSequence(WritableLongChunk.downcast(sourceIndices))) {
for (int ci = 0; ci < numResultColumns; ++ci) {
final Chunk extends Values> inputChunk =
inputColumns[ci].getChunk(inputContexts[ci], sliceSources);
permuteKernels[ci].permute(inputChunk, sourceIndicesOrder, outputChunks[ci]);
outputColumns[ci].fillFromChunk(outputContexts[ci], outputChunks[ci], sliceDestinations);
}
inputSharedContext.reset();
}
}
}
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy