com.azure.cosmos.implementation.query.ParallelDocumentQueryExecutionContext Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of azure-cosmos Show documentation
Show all versions of azure-cosmos Show documentation
This Package contains Microsoft Azure Cosmos SDK (with Reactive Extension Reactor support) for Azure Cosmos DB SQL API
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.cosmos.implementation.query;
import com.azure.cosmos.BridgeInternal;
import com.azure.cosmos.CosmosDiagnostics;
import com.azure.cosmos.CosmosException;
import com.azure.cosmos.implementation.Configs;
import com.azure.cosmos.implementation.DiagnosticsClientContext;
import com.azure.cosmos.implementation.DocumentClientRetryPolicy;
import com.azure.cosmos.implementation.HttpConstants;
import com.azure.cosmos.implementation.PartitionKeyRange;
import com.azure.cosmos.implementation.QueryMetrics;
import com.azure.cosmos.implementation.RequestChargeTracker;
import com.azure.cosmos.implementation.Resource;
import com.azure.cosmos.implementation.ResourceType;
import com.azure.cosmos.implementation.RxDocumentServiceRequest;
import com.azure.cosmos.implementation.Utils;
import com.azure.cosmos.implementation.Utils.ValueHolder;
import com.azure.cosmos.implementation.apachecommons.lang.tuple.ImmutablePair;
import com.azure.cosmos.implementation.feedranges.FeedRangeEpkImpl;
import com.azure.cosmos.models.CosmosQueryRequestOptions;
import com.azure.cosmos.models.FeedResponse;
import com.azure.cosmos.models.ModelBridgeInternal;
import com.azure.cosmos.models.SqlQuerySpec;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.concurrent.Queues;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* While this class is public, but it is not part of our published public APIs.
* This is meant to be internally used only by our sdk.
*/
public class ParallelDocumentQueryExecutionContext
extends ParallelDocumentQueryExecutionContextBase {
private final CosmosQueryRequestOptions cosmosQueryRequestOptions;
private final Map partitionKeyRangeToContinuationTokenMap;
private ParallelDocumentQueryExecutionContext(
DiagnosticsClientContext diagnosticsClientContext,
IDocumentQueryClient client,
ResourceType resourceTypeEnum,
Class resourceType,
SqlQuerySpec query,
CosmosQueryRequestOptions cosmosQueryRequestOptions,
String resourceLink,
String rewrittenQuery,
String collectionRid,
boolean isContinuationExpected,
boolean getLazyFeedResponse,
UUID correlatedActivityId) {
super(diagnosticsClientContext, client, resourceTypeEnum, resourceType, query, cosmosQueryRequestOptions, resourceLink,
rewrittenQuery, isContinuationExpected, getLazyFeedResponse, correlatedActivityId);
this.cosmosQueryRequestOptions = cosmosQueryRequestOptions;
partitionKeyRangeToContinuationTokenMap = new HashMap<>();
}
public static Flux> createAsync(
DiagnosticsClientContext diagnosticsClientContext,
IDocumentQueryClient client,
PipelinedDocumentQueryParams initParams) {
ParallelDocumentQueryExecutionContext context = new ParallelDocumentQueryExecutionContext(diagnosticsClientContext,
client,
initParams.getResourceTypeEnum(),
initParams.getResourceType(),
initParams.getQuery(),
initParams.getCosmosQueryRequestOptions(),
initParams.getResourceLink(),
initParams.getQueryInfo().getRewrittenQuery(),
initParams.getCollectionRid(),
initParams.isContinuationExpected(),
initParams.isGetLazyResponseFeed(),
initParams.getCorrelatedActivityId());
context.setTop(initParams.getTop());
try {
context.initialize(
initParams.getCollectionRid(),
initParams.getFeedRanges(),
initParams.getInitialPageSize(),
ModelBridgeInternal.getRequestContinuationFromQueryRequestOptions(initParams.getCosmosQueryRequestOptions()));
return Flux.just(context);
} catch (CosmosException dce) {
return Flux.error(dce);
}
}
public static Flux> createReadManyQueryAsync(
DiagnosticsClientContext diagnosticsClientContext,
IDocumentQueryClient queryClient,
String collectionResourceId, SqlQuerySpec sqlQuery,
Map rangeQueryMap,
CosmosQueryRequestOptions cosmosQueryRequestOptions, String collectionRid, String collectionLink, UUID activityId, Class klass,
ResourceType resourceTypeEnum) {
ParallelDocumentQueryExecutionContext context = new ParallelDocumentQueryExecutionContext(diagnosticsClientContext,
queryClient,
resourceTypeEnum,
klass,
sqlQuery,
cosmosQueryRequestOptions,
collectionLink,
sqlQuery.getQueryText(),
collectionRid,
false,
false,
activityId);
context
.initializeReadMany(queryClient, collectionResourceId, sqlQuery, rangeQueryMap, cosmosQueryRequestOptions,
activityId, collectionRid);
return Flux.just(context);
}
private void initialize(
String collectionRid,
List feedRanges,
int initialPageSize,
String continuationToken) {
// Generate the corresponding continuation token map.
if (continuationToken == null) {
// If the user does not give a continuation token,
// then just start the query from the first partition.
for (FeedRangeEpkImpl feedRangeEpk : feedRanges) {
partitionKeyRangeToContinuationTokenMap.put(feedRangeEpk,
null);
}
} else {
// Figure out which partitions to resume from:
// If a continuation token is given then we need to figure out partition key
// range it maps to
// in order to filter the partition key ranges.
// For example if suppliedCompositeContinuationToken.RANGE.Min ==
// partition3.RANGE.Min,
// then we know that partitions 0, 1, 2 are fully drained.
// Check to see if composite continuation token is a valid JSON.
ValueHolder outCompositeContinuationToken = new ValueHolder<>();
if (!CompositeContinuationToken.tryParse(continuationToken,
outCompositeContinuationToken)) {
String message = String.format("INVALID JSON in continuation token %s for Parallel~Context",
continuationToken);
throw BridgeInternal.createCosmosException(HttpConstants.StatusCodes.BADREQUEST,
message);
}
CompositeContinuationToken compositeContinuationToken = outCompositeContinuationToken.v;
PartitionMapper.PartitionMapping partitionMapping =
PartitionMapper.getPartitionMapping(feedRanges, Collections.singletonList(compositeContinuationToken));
// Skip all the partitions left of the target range, since they have already been drained fully.
populatePartitionToContinuationMap(partitionMapping.getTargetMapping());
populatePartitionToContinuationMap(partitionMapping.getMappingRightOfTarget());
}
super.initialize(collectionRid,
partitionKeyRangeToContinuationTokenMap,
initialPageSize,
this.querySpec);
}
private void populatePartitionToContinuationMap(
Map partitionMapping) {
for (Map.Entry entry : partitionMapping.entrySet()) {
if (entry.getValue() != null) {
partitionKeyRangeToContinuationTokenMap.put(entry.getKey(), entry.getValue().getToken());
} else {
partitionKeyRangeToContinuationTokenMap.put(entry.getKey(), /*continuation*/ null);
}
}
}
/* private List getPartitionKeyRangesForContinuation(
CompositeContinuationToken compositeContinuationToken,
List partitionKeyRanges) {
Map partitionRangeIdToTokenMap = new HashMap<>();
ValueHolder