Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
constituents) {
if (!constituents.hasNext()) {
return Collections.emptyMap();
}
boolean anyPreviousTableIsRefreshing = false;
Table constituent = constituents.next();
boolean currentTableIsRefreshing = constituent.isRefreshing();
final Map candidates = new HashMap<>(constituent.getAttributes());
while (constituents.hasNext()) {
anyPreviousTableIsRefreshing |= currentTableIsRefreshing;
constituent = constituents.next();
currentTableIsRefreshing = constituent.isRefreshing();
final Iterator> candidatesIter = candidates.entrySet().iterator();
while (candidatesIter.hasNext()) {
final Map.Entry candidate = candidatesIter.next();
final String attrKey = candidate.getKey();
final Object candidateValue = candidate.getValue();
final boolean matches = constituent.hasAttribute(attrKey) &&
Objects.equals(constituent.getAttribute(attrKey), candidateValue);
if (!matches) {
candidatesIter.remove();
}
}
if (candidates.isEmpty()) {
return Collections.emptyMap();
}
}
if (anyPreviousTableIsRefreshing) {
// if a previous table may grow and cause shifts, then the merged table cannot be add-only
candidates.remove(BaseTable.ADD_ONLY_TABLE_ATTRIBUTE);
// if a previous table may change then this cannot be append-only
candidates.remove(BaseTable.APPEND_ONLY_TABLE_ATTRIBUTE);
} else {
// otherwise, last constituent influences whether the merged result is add-only and/or append-only
if (constituent.hasAttribute(BaseTable.ADD_ONLY_TABLE_ATTRIBUTE)) {
candidates.put(BaseTable.ADD_ONLY_TABLE_ATTRIBUTE, true);
}
if (constituent.hasAttribute(BaseTable.APPEND_ONLY_TABLE_ATTRIBUTE)) {
candidates.put(BaseTable.APPEND_ONLY_TABLE_ATTRIBUTE, true);
}
}
return candidates;
}
@ConcurrentMethod
@Override
public PartitionedTableImpl filter(@NotNull final Collection extends Filter> filters) {
final WhereFilter[] whereFilters = WhereFilter.from(filters);
final boolean invalidFilter = Arrays.stream(whereFilters).flatMap((final WhereFilter filter) -> {
filter.init(table.getDefinition());
return Stream.concat(filter.getColumns().stream(), filter.getColumnArrays().stream());
}).anyMatch((final String columnName) -> columnName.equals(constituentColumnName));
if (invalidFilter) {
throw new IllegalArgumentException("Unsupported filter against constituent column " + constituentColumnName
+ " found in filters: " + filters);
}
return LivenessScopeStack.computeEnclosed(
() -> new PartitionedTableImpl(
table.where(Filter.and(whereFilters)),
keyColumnNames,
uniqueKeys,
constituentColumnName,
constituentDefinition,
constituentChangesPermitted || table.isRefreshing(),
false),
table::isRefreshing,
pt -> pt.table().isRefreshing());
}
@ConcurrentMethod
@Override
public PartitionedTable sort(@NotNull final Collection sortColumns) {
final boolean invalidSortColumn = sortColumns.stream()
.map((final SortColumn sortColumn) -> sortColumn.column().name())
.anyMatch((final String columnName) -> columnName.equals(constituentColumnName));
if (invalidSortColumn) {
throw new IllegalArgumentException("Unsupported sort on constituent column " + constituentColumnName
+ " found in sort columns: " + sortColumns);
}
return LivenessScopeStack.computeEnclosed(
() -> new PartitionedTableImpl(
table.sort(sortColumns),
keyColumnNames,
uniqueKeys,
constituentColumnName,
constituentDefinition,
constituentChangesPermitted || table.isRefreshing(),
false),
table::isRefreshing,
pt -> pt.table().isRefreshing());
}
@ConcurrentMethod
@Override
public PartitionedTable transform(
@Nullable final ExecutionContext executionContext,
@NotNull final UnaryOperator
transformer,
final boolean expectRefreshingResults,
@NotNull final Dependency... dependencies) {
final PartitionedTable resultPartitionedTable;
final TableDefinition resultConstituentDefinition;
final LivenessManager enclosingScope = LivenessScopeStack.peek();
try (final SafeCloseable ignored1 = executionContext == null ? null : executionContext.open();
final SafeCloseable ignored2 = LivenessScopeStack.open()) {
final Table prepared = prepareForTransform(table, expectRefreshingResults, dependencies);
// Perform the transformation
final Table resultTable = prepared.update(List.of(new TableTransformationColumn(
constituentColumnName,
disableRecursiveParallelOperationInitialization(executionContext),
prepared.isRefreshing() ? transformer : assertResultsStatic(transformer))));
// Make sure we have a valid result constituent definition
final Table emptyConstituent = emptyConstituent(constituentDefinition);
final Table resultEmptyConstituent = transformer.apply(emptyConstituent);
resultConstituentDefinition = resultEmptyConstituent.getDefinition();
// Build the result partitioned table
resultPartitionedTable = new PartitionedTableImpl(
resultTable,
keyColumnNames,
uniqueKeys,
constituentColumnName,
resultConstituentDefinition,
constituentChangesPermitted,
true);
enclosingScope.manage(resultPartitionedTable);
}
return resultPartitionedTable;
}
/**
* Ensures that the returned executionContext will have an OperationInitializer compatible with being called by work
* already running on an initialization thread - it must either already return false for
* {@link OperationInitializer#canParallelize()}, or must be a different instance than the current context's
* OperationInitializer.
*/
private static ExecutionContext disableRecursiveParallelOperationInitialization(ExecutionContext provided) {
if (provided == null) {
return null;
}
ExecutionContext current = ExecutionContext.getContext();
if (!provided.getOperationInitializer().canParallelize()) {
return provided;
}
if (current.getOperationInitializer() != provided.getOperationInitializer()) {
return provided;
}
// The current operation initializer isn't safe to submit more tasks that we will block on, replace
// with an instance that will never attempt to push work to another thread
return provided.withOperationInitializer(OperationInitializer.NON_PARALLELIZABLE);
}
@Override
public PartitionedTable partitionedTransform(
@NotNull final PartitionedTable other,
@Nullable final ExecutionContext executionContext,
@NotNull final BinaryOperator
transformer,
final boolean expectRefreshingResults,
@NotNull final Dependency... dependencies) {
// Check safety before doing any extra work
final UpdateGraph updateGraph = table.getUpdateGraph(other.table());
if (table.isRefreshing() || other.table().isRefreshing()) {
updateGraph.checkInitiateSerialTableOperation();
}
// Validate join compatibility
final MatchPair[] joinPairs = matchKeyColumns(this, other);
final PartitionedTable resultPartitionedTable;
final TableDefinition resultConstituentDefinition;
final LivenessManager enclosingScope = LivenessScopeStack.peek();
try (final SafeCloseable ignored1 = executionContext == null ? null : executionContext.open();
final SafeCloseable ignored2 = LivenessScopeStack.open()) {
// Perform the transformation
final MatchPair[] joinAdditions =
new MatchPair[] {new MatchPair(RHS_CONSTITUENT, other.constituentColumnName())};
final Table joined = uniqueKeys
? table.naturalJoin(other.table(), Arrays.asList(joinPairs), Arrays.asList(joinAdditions))
.where(new MatchFilter(Inverted, RHS_CONSTITUENT, (Object) null))
: table.join(other.table(), Arrays.asList(joinPairs), Arrays.asList(joinAdditions));
final Table prepared = prepareForTransform(joined, expectRefreshingResults, dependencies);
final Table resultTable = prepared
.update(List.of(new BiTableTransformationColumn(
constituentColumnName,
RHS_CONSTITUENT,
disableRecursiveParallelOperationInitialization(executionContext),
prepared.isRefreshing() ? transformer : assertResultsStatic(transformer))))
.dropColumns(RHS_CONSTITUENT);
// Make sure we have a valid result constituent definition
final Table emptyConstituent1 = emptyConstituent(constituentDefinition);
final Table emptyConstituent2 = emptyConstituent(other.constituentDefinition());
final Table resultEmptyConstituent = transformer.apply(emptyConstituent1, emptyConstituent2);
resultConstituentDefinition = resultEmptyConstituent.getDefinition();
// Build the result partitioned table
resultPartitionedTable = new PartitionedTableImpl(
resultTable,
keyColumnNames,
uniqueKeys,
constituentColumnName,
resultConstituentDefinition,
constituentChangesPermitted || other.constituentChangesPermitted(),
true);
enclosingScope.manage(resultPartitionedTable);
}
return resultPartitionedTable;
}
private static Table prepareForTransform(
@NotNull final Table table,
final boolean expectRefreshingResults,
@Nullable final Dependency[] dependencies) {
final boolean addDependencies = dependencies != null && dependencies.length > 0;
final boolean setRefreshing = (expectRefreshingResults || addDependencies) && !table.isRefreshing();
if (!addDependencies && !setRefreshing) {
return table;
}
final Table copied = ((QueryTable) table.coalesce()).copy();
if (setRefreshing) {
copied.setRefreshing(true);
}
if (addDependencies) {
Arrays.stream(dependencies).forEach(copied::addParentReference);
}
return copied;
}
private static UnaryOperator
assertResultsStatic(@NotNull final UnaryOperator
wrapped) {
return (final Table table) -> {
final Table result = wrapped.apply(table);
if (result != null && result.isRefreshing()) {
throw new IllegalStateException("Static partitioned tables cannot contain refreshing constituents. "
+ "Did you mean to specify expectRefreshingResults=true for this transform?");
}
return result;
};
}
private static BinaryOperator
assertResultsStatic(@NotNull final BinaryOperator
wrapped) {
return (final Table table1, final Table table2) -> {
final Table result = wrapped.apply(table1, table2);
if (result != null && result.isRefreshing()) {
throw new IllegalStateException("Static partitioned tables cannot contain refreshing constituents. "
+ "Did you mean to specify expectRefreshingResults=true for this transform?");
}
return result;
};
}
// TODO (https://github.com/deephaven/deephaven-core/issues/2368): Consider adding transformWithKeys support
@ConcurrentMethod
@Override
public Table constituentFor(@NotNull final Object... keyColumnValues) {
if (keyColumnValues.length != keyColumnNames.size()) {
throw new IllegalArgumentException(
"Key count mismatch: expected one key column value for each key column name in " + keyColumnNames
+ ", instead received " + Arrays.toString(keyColumnValues));
}
final int numKeys = keyColumnValues.length;
final List filters = new ArrayList<>(numKeys);
final String[] keyColumnNames = keyColumnNames().toArray(String[]::new);
for (int kci = 0; kci < numKeys; ++kci) {
filters.add(new MatchFilter(keyColumnNames[kci], keyColumnValues[kci]));
}
return LivenessScopeStack.computeEnclosed(() -> {
final Table[] matchingConstituents = filter(filters).snapshotConstituents();
final int matchingCount = matchingConstituents.length;
if (matchingCount > 1) {
throw new UnsupportedOperationException(
"Result size mismatch: expected 0 or 1 results, instead found " + matchingCount);
}
return matchingCount == 1 ? matchingConstituents[0] : null;
},
table::isRefreshing,
constituent -> constituent != null && constituent.isRefreshing());
}
@ConcurrentMethod
@Override
public Table[] constituents() {
return LivenessScopeStack.computeArrayEnclosed(
this::snapshotConstituents,
table::isRefreshing,
constituent -> constituent != null && constituent.isRefreshing());
}
private Table[] snapshotConstituents() {
if (constituentChangesPermitted) {
final MutableObject