public static List <ToDoItem> MockSinglePartitionKeyRangeContext( Mock <CosmosQueryClient> mockQueryContext, int[] responseMessagesPageSize, SqlQuerySpec sqlQuerySpec, PartitionKeyRange partitionKeyRange, string continuationToken, int maxPageSize, string collectionRid, TimeSpan?responseDelay, CancellationToken cancellationToken) { // Setup a list of query responses. It generates a new continuation token for each response. This allows the mock to return the messages in the correct order. List <ToDoItem> allItems = new List <ToDoItem>(); string previousContinuationToken = continuationToken; for (int i = 0; i < responseMessagesPageSize.Length; i++) { string newContinuationToken = null; // The last response should have a null continuation token if (i + 1 != responseMessagesPageSize.Length) { newContinuationToken = Guid.NewGuid().ToString(); } (QueryResponse response, IList <ToDoItem> items)queryResponse = QueryResponseMessageFactory.Create( itemIdPrefix: $"page{i}-pk{partitionKeyRange.Id}-", continuationToken: newContinuationToken, collectionRid: collectionRid, itemCount: responseMessagesPageSize[i]); allItems.AddRange(queryResponse.items); mockQueryContext.Setup(x => x.ExecuteItemQueryAsync( It.IsAny <Uri>(), ResourceType.Document, OperationType.Query, collectionRid, It.IsAny <QueryRequestOptions>(), It.Is <SqlQuerySpec>(specInput => IsSqlQuerySpecEqual(sqlQuerySpec, specInput)), previousContinuationToken, It.Is <PartitionKeyRangeIdentity>(rangeId => string.Equals(rangeId.PartitionKeyRangeId, partitionKeyRange.Id) && string.Equals(rangeId.CollectionRid, collectionRid)), It.IsAny <bool>(), maxPageSize, cancellationToken)) .Callback(() => { if (responseDelay.HasValue) { Thread.Sleep(responseDelay.Value); } }) .Returns(Task.FromResult(queryResponse.response)); if (responseMessagesPageSize[i] != QueryResponseMessageFactory.SPLIT) { previousContinuationToken = newContinuationToken; } } return(allItems); }
private static string AddPartitionKeyRangeToContinuationToken(string continuationToken, PartitionKeyRange partitionKeyRange) { return(JsonConvert.SerializeObject(new CompositeContinuationToken { Token = continuationToken, Range = partitionKeyRange.ToRange(), })); }
public ResolvedRangeInfo(PartitionKeyRange range, List <CompositeContinuationToken> continuationTokens) { this.ResolvedRange = range; this.ContinuationTokens = continuationTokens; }
/// <summary> /// <para> /// If a query encounters split up resuming using continuation, we need to regenerate the continuation tokens. /// Specifically, since after split we will have new set of ranges, we need to remove continuation token for the /// parent partition and introduce continuation token for the child partitions. /// </para> /// <para> /// This function does that. Also in that process, we also check validity of the input continuation tokens. For example, /// even after split the boundary ranges of the child partitions should match with the parent partitions. If the Min and Max /// range of a target partition in the continuation token was Min1 and Max1. Then the Min and Max range info for the two /// corresponding child partitions C1Min, C1Max, C2Min, and C2Max should follow the constrain below: /// PMax = C2Max > C2Min > C1Max > C1Min = PMin. /// </para> /// </summary> /// <param name="partitionKeyRanges">The partition key ranges to extract continuation tokens for.</param> /// <param name="suppliedContinuationTokens">The continuation token that the user supplied.</param> /// <param name="targetRangeToContinuationTokenMap">The output dictionary of partition key range to continuation token.</param> /// <typeparam name="TContinuationToken">The type of continuation token to generate.</typeparam> /// <Remarks> /// The code assumes that merge doesn't happen and /// </Remarks> /// <returns>The index of the partition whose MinInclusive is equal to the suppliedContinuationTokens</returns> protected int FindTargetRangeAndExtractContinuationTokens <TContinuationToken>( List <PartitionKeyRange> partitionKeyRanges, IEnumerable <Tuple <TContinuationToken, Range <string> > > suppliedContinuationTokens, out Dictionary <string, TContinuationToken> targetRangeToContinuationTokenMap) { if (partitionKeyRanges == null) { throw new ArgumentNullException($"{nameof(partitionKeyRanges)} can not be null."); } if (partitionKeyRanges.Count < 1) { throw new ArgumentException($"{nameof(partitionKeyRanges)} must have atleast one element."); } foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { if (partitionKeyRange == null) { throw new ArgumentException($"{nameof(partitionKeyRanges)} can not have null elements."); } } if (suppliedContinuationTokens == null) { throw new ArgumentNullException($"{nameof(suppliedContinuationTokens)} can not be null."); } if (suppliedContinuationTokens.Count() < 1) { throw new ArgumentException($"{nameof(suppliedContinuationTokens)} must have atleast one element."); } if (suppliedContinuationTokens.Count() > partitionKeyRanges.Count) { throw new ArgumentException($"{nameof(suppliedContinuationTokens)} can not have more elements than {nameof(partitionKeyRanges)}."); } targetRangeToContinuationTokenMap = new Dictionary <string, TContinuationToken>(); // Find the minimum index. Tuple <TContinuationToken, Range <string> > firstContinuationTokenAndRange = suppliedContinuationTokens .OrderBy((tuple) => tuple.Item2.Min) .First(); TContinuationToken firstContinuationToken = firstContinuationTokenAndRange.Item1; PartitionKeyRange firstContinuationRange = new PartitionKeyRange { MinInclusive = firstContinuationTokenAndRange.Item2.Min, MaxExclusive = firstContinuationTokenAndRange.Item2.Max }; int minIndex = partitionKeyRanges.BinarySearch( firstContinuationRange, Comparer <PartitionKeyRange> .Create((range1, range2) => string.CompareOrdinal(range1.MinInclusive, range2.MinInclusive))); if (minIndex < 0) { this.TraceWarning(string.Format( CultureInfo.InvariantCulture, "Could not find continuation token: {0}", firstContinuationToken.ToString())); throw new BadRequestException(RMResources.InvalidContinuationToken); } foreach (Tuple <TContinuationToken, Range <string> > suppledContinuationToken in suppliedContinuationTokens) { // find what ranges make up the supplied continuation token TContinuationToken continuationToken = suppledContinuationToken.Item1; Range <string> range = suppledContinuationToken.Item2; IEnumerable <PartitionKeyRange> replacementRanges = partitionKeyRanges .Where((partitionKeyRange) => string.CompareOrdinal(range.Min, partitionKeyRange.MinInclusive) <= 0 && string.CompareOrdinal(range.Max, partitionKeyRange.MaxExclusive) >= 0) .OrderBy((partitionKeyRange) => partitionKeyRange.MinInclusive); // Could not find the child ranges if (replacementRanges.Count() == 0) { this.TraceWarning(string.Format( CultureInfo.InvariantCulture, "Could not find continuation token: {0}", continuationToken.ToString())); throw new BadRequestException(RMResources.InvalidContinuationToken); } // PMax = C2Max > C2Min > C1Max > C1Min = PMin. string parentMax = range.Max; string child2Max = replacementRanges.Last().MaxExclusive; string child2Min = replacementRanges.Last().MinInclusive; string child1Max = replacementRanges.First().MaxExclusive; string child1Min = replacementRanges.First().MinInclusive; string parentMin = range.Min; if (!(parentMax == child2Max && string.CompareOrdinal(child2Max, child2Min) >= 0 && (replacementRanges.Count() == 1 ? true : string.CompareOrdinal(child2Min, child1Max) >= 0) && string.CompareOrdinal(child1Max, child1Min) >= 0 && child1Min == parentMin)) { this.TraceWarning(string.Format( CultureInfo.InvariantCulture, "PMax = C2Max > C2Min > C1Max > C1Min = PMin: {0}", continuationToken.ToString())); throw new BadRequestException(RMResources.InvalidContinuationToken); } foreach (PartitionKeyRange partitionKeyRange in replacementRanges) { targetRangeToContinuationTokenMap.Add(partitionKeyRange.Id, continuationToken); } } return(minIndex); }
public static async Task <List <PartitionKeyRange> > GetReplacementRangesAsync(PartitionKeyRange targetRange, IRoutingMapProvider routingMapProvider, string collectionRid) { return((await routingMapProvider.TryGetOverlappingRangesAsync(collectionRid, targetRange.ToRange(), true)).ToList()); }
public async Task TestItemProducerTreeWithFailure() { int callBackCount = 0; Mock <CosmosQueryContext> mockQueryContext = new Mock <CosmosQueryContext>(); SqlQuerySpec sqlQuerySpec = new SqlQuerySpec("Select * from t"); PartitionKeyRange partitionKeyRange = new PartitionKeyRange { Id = "0", MinInclusive = "A", MaxExclusive = "B" }; void produceAsyncCompleteCallback( ItemProducerTree producer, int itemsBuffered, double resourceUnitUsage, long responseLengthBytes, CancellationToken token) { callBackCount++; } Mock <IComparer <ItemProducerTree> > comparer = new Mock <IComparer <ItemProducerTree> >(); Mock <IEqualityComparer <CosmosElement> > cosmosElementComparer = new Mock <IEqualityComparer <CosmosElement> >(); CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(); IReadOnlyList <CosmosElement> cosmosElements = new List <CosmosElement>() { new Mock <CosmosElement>(CosmosElementType.Object).Object }; CosmosDiagnosticsContext diagnosticsContext = new CosmosDiagnosticsContextCore(); diagnosticsContext.AddDiagnosticsInternal(new PointOperationStatistics( Guid.NewGuid().ToString(), System.Net.HttpStatusCode.OK, subStatusCode: SubStatusCodes.Unknown, responseTimeUtc: DateTime.UtcNow, requestCharge: 42, errorMessage: null, method: HttpMethod.Post, requestUri: new Uri("http://localhost.com"), requestSessionToken: null, responseSessionToken: null)); QueryPageDiagnostics diagnostics = new QueryPageDiagnostics( clientQueryCorrelationId: Guid.NewGuid(), partitionKeyRangeId: "0", queryMetricText: "SomeRandomQueryMetricText", indexUtilizationText: null, diagnosticsContext: diagnosticsContext); IReadOnlyCollection <QueryPageDiagnostics> pageDiagnostics = new List <QueryPageDiagnostics>() { diagnostics }; mockQueryContext.Setup(x => x.ContainerResourceId).Returns("MockCollectionRid"); mockQueryContext.Setup(x => x.ExecuteQueryAsync( sqlQuerySpec, It.IsAny <string>(), It.IsAny <PartitionKeyRangeIdentity>(), It.IsAny <bool>(), It.IsAny <int>(), cancellationTokenSource.Token)).Returns( Task.FromResult(QueryResponseCore.CreateSuccess( result: cosmosElements, requestCharge: 42, activityId: "AA470D71-6DEF-4D61-9A08-272D8C9ABCFE", responseLengthBytes: 500, disallowContinuationTokenMessage: null, continuationToken: "TestToken"))); ItemProducerTree itemProducerTree = new ItemProducerTree( queryContext: mockQueryContext.Object, querySpecForInit: sqlQuerySpec, partitionKeyRange: partitionKeyRange, produceAsyncCompleteCallback: produceAsyncCompleteCallback, itemProducerTreeComparer: comparer.Object, equalityComparer: cosmosElementComparer.Object, testSettings: new TestInjections(simulate429s: false, simulateEmptyPages: false), deferFirstPage: false, collectionRid: "collectionRid", initialContinuationToken: null, initialPageSize: 50); // Buffer to success responses await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); CosmosDiagnosticsContext diagnosticsContextInternalServerError = new CosmosDiagnosticsContextCore(); diagnosticsContextInternalServerError.AddDiagnosticsInternal(new PointOperationStatistics( Guid.NewGuid().ToString(), System.Net.HttpStatusCode.InternalServerError, subStatusCode: SubStatusCodes.Unknown, responseTimeUtc: DateTime.UtcNow, requestCharge: 10.2, errorMessage: "Error message", method: HttpMethod.Post, requestUri: new Uri("http://localhost.com"), requestSessionToken: null, responseSessionToken: null)); diagnostics = new QueryPageDiagnostics( clientQueryCorrelationId: Guid.NewGuid(), partitionKeyRangeId: "0", queryMetricText: null, indexUtilizationText: null, diagnosticsContext: diagnosticsContextInternalServerError); pageDiagnostics = new List <QueryPageDiagnostics>() { diagnostics }; // Buffer a failure mockQueryContext.Setup(x => x.ExecuteQueryAsync( sqlQuerySpec, It.IsAny <string>(), It.IsAny <PartitionKeyRangeIdentity>(), It.IsAny <bool>(), It.IsAny <int>(), cancellationTokenSource.Token)).Returns( Task.FromResult(QueryResponseCore.CreateFailure( statusCode: HttpStatusCode.InternalServerError, subStatusCodes: null, cosmosException: CosmosExceptionFactory.CreateInternalServerErrorException( "Error message"), requestCharge: 10.2, activityId: Guid.NewGuid().ToString()))); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); // First item should be a success { (bool movedToNextPage, QueryResponseCore? failureResponse) = await itemProducerTree.TryMoveNextPageAsync(cancellationTokenSource.Token); Assert.IsTrue(movedToNextPage); Assert.IsNull(failureResponse); Assert.IsTrue(itemProducerTree.TryMoveNextDocumentWithinPage()); Assert.IsFalse(itemProducerTree.TryMoveNextDocumentWithinPage()); Assert.IsTrue(itemProducerTree.HasMoreResults); } // Second item should be a success { (bool movedToNextPage, QueryResponseCore? failureResponse) = await itemProducerTree.TryMoveNextPageAsync(cancellationTokenSource.Token); Assert.IsTrue(movedToNextPage); Assert.IsNull(failureResponse); Assert.IsTrue(itemProducerTree.TryMoveNextDocumentWithinPage()); Assert.IsFalse(itemProducerTree.TryMoveNextDocumentWithinPage()); Assert.IsTrue(itemProducerTree.HasMoreResults); } // Third item should be a failure { (bool movedToNextPage, QueryResponseCore? failureResponse) = await itemProducerTree.TryMoveNextPageAsync(cancellationTokenSource.Token); Assert.IsFalse(movedToNextPage); Assert.IsNotNull(failureResponse); Assert.IsFalse(itemProducerTree.HasMoreResults); } // Try to buffer after failure. It should return the previous cached failure and not try to buffer again. mockQueryContext.Setup(x => x.ExecuteQueryAsync( sqlQuerySpec, It.IsAny <string>(), It.IsAny <PartitionKeyRangeIdentity>(), It.IsAny <bool>(), It.IsAny <int>(), cancellationTokenSource.Token)). Throws(new Exception("Previous buffer failed. Operation should return original failure and not try again")); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); Assert.IsFalse(itemProducerTree.HasMoreResults); }
/// <summary> /// Initializes a new instance of the DocumentProducer class. /// </summary> /// <param name="partitionKeyRange">The partition key range.</param> /// <param name="createRequestFunc">The callback to create a request.</param> /// <param name="executeRequestFunc">The callback to execute the request.</param> /// <param name="createRetryPolicyFunc">The callback to create the retry policy.</param> /// <param name="produceAsyncCompleteCallback">The callback to call once you are done fetching.</param> /// <param name="equalityComparer">The comparer to use to determine whether the producer has seen a new document.</param> /// <param name="initialPageSize">The initial page size.</param> /// <param name="initialContinuationToken">The initial continuation token.</param> public DocumentProducer( PartitionKeyRange partitionKeyRange, Func <PartitionKeyRange, string, int, DocumentServiceRequest> createRequestFunc, Func <DocumentServiceRequest, IDocumentClientRetryPolicy, CancellationToken, Task <DocumentFeedResponse <CosmosElement> > > executeRequestFunc, Func <IDocumentClientRetryPolicy> createRetryPolicyFunc, ProduceAsyncCompleteDelegate produceAsyncCompleteCallback, IEqualityComparer <CosmosElement> equalityComparer, long initialPageSize = 50, string initialContinuationToken = null) { this.bufferedPages = new AsyncCollection <TryMonad <DocumentFeedResponse <CosmosElement> > >(); // We use a binary semaphore to get the behavior of a mutex, // since fetching documents from the backend using a continuation token is a critical section. this.fetchSemaphore = new SemaphoreSlim(1, 1); if (partitionKeyRange == null) { throw new ArgumentNullException(nameof(partitionKeyRange)); } if (createRequestFunc == null) { throw new ArgumentNullException(nameof(createRequestFunc)); } if (executeRequestFunc == null) { throw new ArgumentNullException(nameof(executeRequestFunc)); } if (createRetryPolicyFunc == null) { throw new ArgumentNullException(nameof(createRetryPolicyFunc)); } if (produceAsyncCompleteCallback == null) { throw new ArgumentNullException(nameof(produceAsyncCompleteCallback)); } if (equalityComparer == null) { throw new ArgumentNullException(nameof(equalityComparer)); } this.PartitionKeyRange = partitionKeyRange; this.createRequestFunc = createRequestFunc; this.executeRequestFunc = executeRequestFunc; this.createRetryPolicyFunc = createRetryPolicyFunc; this.produceAsyncCompleteCallback = produceAsyncCompleteCallback; this.equalityComparer = equalityComparer; this.pageSize = initialPageSize; this.currentContinuationToken = initialContinuationToken; this.backendContinuationToken = initialContinuationToken; this.previousContinuationToken = initialContinuationToken; if (!string.IsNullOrEmpty(initialContinuationToken)) { this.hasStartedFetching = true; this.isActive = true; } this.fetchSchedulingMetrics = new SchedulingStopwatch(); this.fetchSchedulingMetrics.Ready(); this.fetchExecutionRangeAccumulator = new FetchExecutionRangeAccumulator(); this.hasMoreResults = true; }
protected PartitionRangePageAsyncEnumerator(PartitionKeyRange range, CancellationToken cancellationToken, TState state = default) { this.Range = range; this.State = state; this.cancellationToken = cancellationToken; }
public static (ItemProducerTree itemProducerTree, ReadOnlyCollection <ToDoItem> allItems) CreateTree( Mock <CosmosQueryContext> mockQueryContext = null, int[] responseMessagesPageSize = null, SqlQuerySpec sqlQuerySpec = null, PartitionKeyRange partitionKeyRange = null, string continuationToken = null, int maxPageSize = 50, bool deferFirstPage = true, string collectionRid = null, IComparer <ItemProducerTree> itemProducerTreeComparer = null, ItemProducerTree.ProduceAsyncCompleteDelegate completeDelegate = null, Action executeCallback = null, CancellationToken cancellationToken = default(CancellationToken)) { if (responseMessagesPageSize == null) { responseMessagesPageSize = DefaultResponseSizes; } if (sqlQuerySpec == null) { sqlQuerySpec = DefaultQuerySpec; } if (partitionKeyRange == null) { partitionKeyRange = DefaultPartitionKeyRange; } if (completeDelegate == null) { completeDelegate = DefaultTreeProduceAsyncCompleteDelegate; } if (itemProducerTreeComparer == null) { itemProducerTreeComparer = new ParallelItemProducerTreeComparer(); } if (mockQueryContext == null) { mockQueryContext = new Mock <CosmosQueryContext>(); } mockQueryContext.Setup(x => x.ContainerResourceId).Returns(collectionRid); // Setup a list of query responses. It generates a new continuation token for each response. This allows the mock to return the messages in the correct order. List <ToDoItem> allItems = MockSinglePartitionKeyRangeContext( mockQueryContext, responseMessagesPageSize, sqlQuerySpec, partitionKeyRange, continuationToken, maxPageSize, collectionRid, executeCallback, cancellationToken); ItemProducerTree itemProducerTree = new ItemProducerTree( mockQueryContext.Object, sqlQuerySpec, partitionKeyRange, completeDelegate, itemProducerTreeComparer, CosmosElementEqualityComparer.Value, deferFirstPage, collectionRid, maxPageSize, initialContinuationToken: continuationToken); return(itemProducerTree, allItems.AsReadOnly()); }
public DocumentProducer( ComparableTaskScheduler taskScheduler, Func <string, int, DocumentServiceRequest> createRequestFunc, PartitionKeyRange targetRange, Func <DocumentProducer <T>, int> taskPriorityFunc, Func <DocumentServiceRequest, CancellationToken, Task <FeedResponse <T> > > executeRequestFunc, Func <IDocumentClientRetryPolicy> createRetryPolicyFunc, ProduceAsyncCompleteDelegate produceAsyncCompleteCallback, Guid correlatedActivityId, long initialPageSize = 50, string initialContinuationToken = null) { if (taskScheduler == null) { throw new ArgumentNullException("taskScheduler"); } if (createRequestFunc == null) { throw new ArgumentNullException("documentServiceRequest"); } if (targetRange == null) { throw new ArgumentNullException("targetRange"); } if (taskPriorityFunc == null) { throw new ArgumentNullException("taskPriorityFunc"); } if (executeRequestFunc == null) { throw new ArgumentNullException("executeRequestFunc"); } if (createRetryPolicyFunc == null) { throw new ArgumentNullException("createRetryPolicyFunc"); } if (produceAsyncCompleteCallback == null) { throw new ArgumentNullException("produceAsyncCallback"); } this.taskScheduler = taskScheduler; this.itemBuffer = new AsyncCollection <FetchResult>(); this.createRequestFunc = createRequestFunc; this.targetRange = targetRange; this.taskPriorityFunc = taskPriorityFunc; this.createRetryPolicyFunc = createRetryPolicyFunc; this.executeRequestFunc = executeRequestFunc; this.produceAsyncCompleteCallback = produceAsyncCompleteCallback; this.PageSize = initialPageSize; if ((int)this.PageSize < 0) { throw new ArithmeticException("page size is negative.."); } this.correlatedActivityId = correlatedActivityId; this.CurrentBackendContinuationToken = initialContinuationToken; this.moveNextSchedulingMetrics = new SchedulingStopwatch(); this.moveNextSchedulingMetrics.Ready(); this.fetchSchedulingMetrics = new SchedulingStopwatch(); this.fetchSchedulingMetrics.Ready(); this.fetchExecutionRangeAccumulator = new FetchExecutionRangeAccumulator(this.targetRange.Id); this.fetchStateSemaphore = new SemaphoreSlim(1, 1); }
private static async Task <Tuple <bool, PartitionKeyRange> > TryResolvePartitionKeyRangeAsync( DocumentServiceRequest request, ISessionContainer sessionContainer, PartitionKeyRangeCache partitionKeyRangeCache, CollectionCache clientCollectionCache, bool refreshCache) { if (refreshCache) { request.ForceMasterRefresh = true; request.ForceNameCacheRefresh = true; } PartitionKeyRange partitonKeyRange = null; ContainerProperties collection = await clientCollectionCache.ResolveCollectionAsync( request, CancellationToken.None, NoOpTrace.Singleton); string partitionKeyString = request.Headers[HttpConstants.HttpHeaders.PartitionKey]; if (partitionKeyString != null) { CollectionRoutingMap collectionRoutingMap = await partitionKeyRangeCache.TryLookupAsync( collectionRid : collection.ResourceId, previousValue : null, request : request, cancellationToken : CancellationToken.None, NoOpTrace.Singleton); if (refreshCache && collectionRoutingMap != null) { collectionRoutingMap = await partitionKeyRangeCache.TryLookupAsync( collectionRid : collection.ResourceId, previousValue : collectionRoutingMap, request : request, cancellationToken : CancellationToken.None, NoOpTrace.Singleton); } partitonKeyRange = AddressResolver.TryResolveServerPartitionByPartitionKey( request: request, partitionKeyString: partitionKeyString, collectionCacheUptoDate: false, collection: collection, routingMap: collectionRoutingMap); } else if (request.PartitionKeyRangeIdentity != null) { PartitionKeyRangeIdentity partitionKeyRangeId = request.PartitionKeyRangeIdentity; partitonKeyRange = await partitionKeyRangeCache.TryGetPartitionKeyRangeByIdAsync( collection.ResourceId, partitionKeyRangeId.PartitionKeyRangeId, NoOpTrace.Singleton, refreshCache); } else if (request.RequestContext.ResolvedPartitionKeyRange != null) { partitonKeyRange = request.RequestContext.ResolvedPartitionKeyRange; } if (partitonKeyRange == null) { if (refreshCache) { return(new Tuple <bool, PartitionKeyRange>(false, null)); } // need to refresh cache. Maybe split happened. return(await GatewayStoreModel.TryResolvePartitionKeyRangeAsync( request : request, sessionContainer : sessionContainer, partitionKeyRangeCache : partitionKeyRangeCache, clientCollectionCache : clientCollectionCache, refreshCache : true)); } return(new Tuple <bool, PartitionKeyRange>(true, partitonKeyRange)); }
public async Task TestItemProducerTreeWithFailure() { int callBackCount = 0; Mock <CosmosQueryContext> mockQueryContext = new Mock <CosmosQueryContext>(); SqlQuerySpec sqlQuerySpec = new SqlQuerySpec("Select * from t"); PartitionKeyRange partitionKeyRange = new PartitionKeyRange { Id = "0", MinInclusive = "A", MaxExclusive = "B" }; Action <ItemProducerTree, int, double, QueryMetrics, long, CancellationToken> produceAsyncCompleteCallback = ( ItemProducerTree producer, int itemsBuffered, double resourceUnitUsage, QueryMetrics queryMetrics, long responseLengthBytes, CancellationToken token) => { callBackCount++; }; Mock <IComparer <ItemProducerTree> > comparer = new Mock <IComparer <ItemProducerTree> >(); Mock <IEqualityComparer <CosmosElement> > cosmosElementComparer = new Mock <IEqualityComparer <CosmosElement> >(); CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(); IEnumerable <CosmosElement> cosmosElements = new List <CosmosElement>() { new Mock <CosmosElement>(CosmosElementType.Object).Object }; CosmosQueryResponseMessageHeaders headers = new CosmosQueryResponseMessageHeaders("TestToken", null, ResourceType.Document, "ContainerRid") { ActivityId = "AA470D71-6DEF-4D61-9A08-272D8C9ABCFE", RequestCharge = 42 }; mockQueryContext.Setup(x => x.ExecuteQueryAsync(sqlQuerySpec, cancellationTokenSource.Token, It.IsAny <Action <RequestMessage> >())).Returns( Task.FromResult(QueryResponse.CreateSuccess(cosmosElements, 1, 500, headers))); ItemProducerTree itemProducerTree = new ItemProducerTree( queryContext: mockQueryContext.Object, querySpecForInit: sqlQuerySpec, partitionKeyRange: partitionKeyRange, produceAsyncCompleteCallback: produceAsyncCompleteCallback, itemProducerTreeComparer: comparer.Object, equalityComparer: cosmosElementComparer.Object, deferFirstPage: false, collectionRid: "collectionRid", initialContinuationToken: null, initialPageSize: 50); // Buffer to success responses await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); // Buffer a failure mockQueryContext.Setup(x => x.ExecuteQueryAsync(sqlQuerySpec, cancellationTokenSource.Token, It.IsAny <Action <RequestMessage> >())).Returns( Task.FromResult(QueryResponse.CreateFailure(headers, HttpStatusCode.InternalServerError, null, "Error message", null))); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); // First item should be a success var result = await itemProducerTree.MoveNextAsync(cancellationTokenSource.Token); Assert.IsTrue(result.successfullyMovedNext); Assert.IsNull(result.failureResponse); Assert.IsTrue(itemProducerTree.HasMoreResults); // Second item should be a success result = await itemProducerTree.MoveNextAsync(cancellationTokenSource.Token); Assert.IsTrue(result.successfullyMovedNext); Assert.IsNull(result.failureResponse); Assert.IsTrue(itemProducerTree.HasMoreResults); // Third item should be a failure result = await itemProducerTree.MoveNextAsync(cancellationTokenSource.Token); Assert.IsFalse(result.successfullyMovedNext); Assert.IsNotNull(result.failureResponse); Assert.IsFalse(itemProducerTree.HasMoreResults); // Try to buffer after failure. It should return the previous cached failure and not try to buffer again. mockQueryContext.Setup(x => x.ExecuteQueryAsync(sqlQuerySpec, cancellationTokenSource.Token, It.IsAny <Action <RequestMessage> >())). Throws(new Exception("Previous buffer failed. Operation should return original failure and not try again")); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); Assert.IsFalse(result.successfullyMovedNext); Assert.IsNotNull(result.failureResponse); Assert.IsFalse(itemProducerTree.HasMoreResults); }
public Task <TryCatch <List <PartitionKeyRange> > > MonadicGetChildRangeAsync( PartitionKeyRange partitionKeyRange, CancellationToken cancellationToken) => this.monadicDocumentContainer.MonadicGetChildRangeAsync( partitionKeyRange, cancellationToken);
/// <summary> /// If a query encounters split up resuming using continuation, we need to regenerate the continuation tokens. /// Specifically, since after split we will have new set of ranges, we need to remove continuation token for the /// parent partition and introduce continuation token for the child partitions. /// /// This function does that. Also in that process, we also check validity of the input continuation tokens. For example, /// even after split the boundary ranges of the child partitions should match with the parent partitions. If the Min and Max /// range of a target partition in the continuation token was Min1 and Max1. Then the Min and Max range info for the two /// corresponding child partitions C1Min, C1Max, C2Min, and C2Max should follow the constrain below: /// PMax = C2Max > C2Min > C1Max > C1Min = PMin. /// /// Note that, /// this is assuming the fact that the target partition was split once. But, in reality, the target partition might be split /// multiple times /// </summary> /// <Remarks> /// The code assumes that merge doesn't happen /// </Remarks> protected int FindTargetRangeAndExtractContinuationTokens <TContinuationToken>( List <PartitionKeyRange> partitionKeyRanges, IEnumerable <Tuple <TContinuationToken, Range <string> > > suppliedContinuationTokens, out Dictionary <string, TContinuationToken> targetRangeToContinuationTokenMap) { targetRangeToContinuationTokenMap = new Dictionary <string, TContinuationToken>(); bool foundInitialRange = false; int index = 0; int minIndex = -1; foreach (Tuple <TContinuationToken, Range <string> > tuple in suppliedContinuationTokens) { if (!foundInitialRange) { PartitionKeyRange targetRange = new PartitionKeyRange { MinInclusive = tuple.Item2.Min, MaxExclusive = tuple.Item2.Max }; minIndex = partitionKeyRanges.BinarySearch( targetRange, Comparer <PartitionKeyRange> .Create((range1, range2) => string.CompareOrdinal(range1.MinInclusive, range2.MinInclusive))); if (minIndex < 0) { DefaultTrace.TraceWarning( string.Format( CultureInfo.InvariantCulture, "{0}, CorrelatedActivityId: {2} | Invalid format for continuation token {1} for OrderBy~Context.", DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture), tuple.Item1.ToString(), this.CorrelatedActivityId)); throw new BadRequestException(RMResources.InvalidContinuationToken); } index = minIndex; foundInitialRange = true; } if (partitionKeyRanges[index].ToRange().Equals(tuple.Item2)) { targetRangeToContinuationTokenMap.Add(partitionKeyRanges[index++].Id, tuple.Item1); } else { bool canConsume = true; if (string.CompareOrdinal(partitionKeyRanges[index].MinInclusive, tuple.Item2.Min) == 0 && string.CompareOrdinal(tuple.Item2.Max, partitionKeyRanges[index].MaxExclusive) > 0) { while (index < partitionKeyRanges.Count && string.CompareOrdinal(partitionKeyRanges[index].MaxExclusive, tuple.Item2.Max) <= 0) { targetRangeToContinuationTokenMap.Add(partitionKeyRanges[index++].Id, tuple.Item1); } if (index > 0 && string.CompareOrdinal(partitionKeyRanges[index - 1].MaxExclusive, tuple.Item2.Max) != 0) { canConsume = false; } } else { canConsume = false; } if (!canConsume) { DefaultTrace.TraceWarning( string.Format( CultureInfo.InvariantCulture, "{0}, CorrelatedActivityId: {1} | Invalid format for continuation token {2} for OrderBy~Context.", DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture), this.CorrelatedActivityId, tuple.Item1.ToString())); throw new BadRequestException(RMResources.InvalidContinuationToken); } } if (index >= partitionKeyRanges.Count) { break; } } return(minIndex); }
private async Task PrintTopPartitionKeysFromSampleData(ResourceResponse <DocumentCollection> collection, PartitionKeyRange pkRange) { Dictionary <string, int> partitionKeyStats = new Dictionary <string, int>(); int numDocumentsRead = 0; string partitionKeyProperty = GetPartitionKeyPropertyName(collection.Resource); while (numDocumentsRead < Options.SampleCount) { FeedResponse <Document> sampleResults = await Client.CreateDocumentChangeFeedQuery( DocumentCollectionUri, new ChangeFeedOptions { StartFromBeginning = true, PartitionKeyRangeId = pkRange.Id, MaxItemCount = -1 }) .ExecuteNextAsync <Document>(); if (sampleResults.Count == 0) { break; } foreach (Document doc in sampleResults) { string pkValue = doc.GetPropertyValue <string>(partitionKeyProperty) ?? "[undefined]"; if (partitionKeyStats.ContainsKey(pkValue)) { partitionKeyStats[pkValue]++; } else { partitionKeyStats[pkValue] = 1; } } numDocumentsRead += sampleResults.Count; } foreach (KeyValuePair <string, int> partitionKey in partitionKeyStats.OrderByDescending(kvp => kvp.Value)) { if (partitionKey.Value >= 1) { Console.WriteLine("Key: {0}, Count: {1}", partitionKey.Key, partitionKey.Value); } } }
static async Task<Tuple<long, double>> ProcessPartitionKeyRangeId(Uri sourceCollectionUri, PartitionKeyRange pkRange, string continuation) { long totalCount = 0; double totalRequestCharge = 0; List<Task> tasks = new List<Task>(); ChangeFeedOptions options = new ChangeFeedOptions { PartitionKeyRangeId = pkRange.Id, StartFromBeginning = true, RequestContinuation = continuation, //MaxItemCount = -1 MaxItemCount = 1000 }; using (var query = sourceClient.CreateDocumentChangeFeedQuery(sourceCollectionUri, options)) { do { var readChangesResponse = await query.ExecuteNextAsync<Document>(); totalCount += readChangesResponse.Count; totalRequestCharge += readChangesResponse.RequestCharge; Console.WriteLine("Count of documents in this page : {0}", readChangesResponse.Count); Debug.WriteLine("Count of documents in this page : {0}", readChangesResponse.Count); //Console.WriteLine("Request charge for these documents : {0}", readChangesResponse.RequestCharge); if (readChangesResponse.Count > 0) { foreach (Document changedDocument in readChangesResponse) { tasks.Add(targetClient.UpsertDocumentAsync(sourceCollectionUri, changedDocument)); } await Task.WhenAll(tasks); checkpoints[pkRange.Id] = readChangesResponse.ResponseContinuation; } } while (query.HasMoreResults); } return Tuple.Create(totalCount, totalRequestCharge); }
private async Task <ResourceResponse <Document> > GetPartitionUsageStats(DocumentCollection collection, PartitionKeyRange pkRange) { Document sampleDocument = GetRandomDocumentFromPartition(DocumentCollectionUri, pkRange); if (sampleDocument == null) { return(null); } //TODO: support partition key definitions for nested properties, numeric partition keys object partitionKeyValue = sampleDocument.GetPropertyValue <string>(GetPartitionKeyPropertyName(collection)); if (partitionKeyValue == null) { partitionKeyValue = Undefined.Value; } ResourceResponse <Document> perPartitionResponse = await Client.ReadDocumentAsync( UriFactory.CreateDocumentUri(Options.Database, Options.Collection, sampleDocument.Id), new RequestOptions { PartitionKey = new PartitionKey(partitionKeyValue) }); return(perPartitionResponse); }
public async Task TestItemProducerTreeWithFailure() { int callBackCount = 0; Mock <CosmosQueryContext> mockQueryContext = new Mock <CosmosQueryContext>(); SqlQuerySpec sqlQuerySpec = new SqlQuerySpec("Select * from t"); PartitionKeyRange partitionKeyRange = new PartitionKeyRange { Id = "0", MinInclusive = "A", MaxExclusive = "B" }; ItemProducerTree.ProduceAsyncCompleteDelegate produceAsyncCompleteCallback = ( ItemProducerTree producer, int itemsBuffered, double resourceUnitUsage, QueryMetrics queryMetrics, long responseLengthBytes, CancellationToken token) => { callBackCount++; }; Mock <IComparer <ItemProducerTree> > comparer = new Mock <IComparer <ItemProducerTree> >(); Mock <IEqualityComparer <CosmosElement> > cosmosElementComparer = new Mock <IEqualityComparer <CosmosElement> >(); CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(); IReadOnlyList <CosmosElement> cosmosElements = new List <CosmosElement>() { new Mock <CosmosElement>(CosmosElementType.Object).Object }; mockQueryContext.Setup(x => x.ContainerResourceId).Returns("MockCollectionRid"); mockQueryContext.Setup(x => x.ExecuteQueryAsync( sqlQuerySpec, It.IsAny <string>(), It.IsAny <PartitionKeyRangeIdentity>(), It.IsAny <bool>(), It.IsAny <int>(), cancellationTokenSource.Token)).Returns( Task.FromResult(QueryResponseCore.CreateSuccess( result: cosmosElements, requestCharge: 42, activityId: "AA470D71-6DEF-4D61-9A08-272D8C9ABCFE", queryMetrics: null, queryMetricsText: null, requestStatistics: null, responseLengthBytes: 500, disallowContinuationTokenMessage: null, continuationToken: "TestToken"))); ItemProducerTree itemProducerTree = new ItemProducerTree( queryContext: mockQueryContext.Object, querySpecForInit: sqlQuerySpec, partitionKeyRange: partitionKeyRange, produceAsyncCompleteCallback: produceAsyncCompleteCallback, itemProducerTreeComparer: comparer.Object, equalityComparer: cosmosElementComparer.Object, deferFirstPage: false, collectionRid: "collectionRid", initialContinuationToken: null, initialPageSize: 50); // Buffer to success responses await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); // Buffer a failure mockQueryContext.Setup(x => x.ExecuteQueryAsync( sqlQuerySpec, It.IsAny <string>(), It.IsAny <PartitionKeyRangeIdentity>(), It.IsAny <bool>(), It.IsAny <int>(), cancellationTokenSource.Token)).Returns( Task.FromResult(QueryResponseCore.CreateFailure( statusCode: HttpStatusCode.InternalServerError, subStatusCodes: null, errorMessage: "Error message", requestCharge: 10.2, activityId: Guid.NewGuid().ToString(), queryMetricsText: null, queryMetrics: null))); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); // First item should be a success (bool successfullyMovedNext, QueryResponseCore? failureResponse)result = await itemProducerTree.MoveNextAsync(cancellationTokenSource.Token); Assert.IsTrue(result.successfullyMovedNext); Assert.IsNull(result.failureResponse); Assert.IsTrue(itemProducerTree.HasMoreResults); // Second item should be a success result = await itemProducerTree.MoveNextAsync(cancellationTokenSource.Token); Assert.IsTrue(result.successfullyMovedNext); Assert.IsNull(result.failureResponse); Assert.IsTrue(itemProducerTree.HasMoreResults); // Third item should be a failure result = await itemProducerTree.MoveNextAsync(cancellationTokenSource.Token); Assert.IsFalse(result.successfullyMovedNext); Assert.IsNotNull(result.failureResponse); Assert.IsFalse(itemProducerTree.HasMoreResults); // Try to buffer after failure. It should return the previous cached failure and not try to buffer again. mockQueryContext.Setup(x => x.ExecuteQueryAsync( sqlQuerySpec, It.IsAny <string>(), It.IsAny <PartitionKeyRangeIdentity>(), It.IsAny <bool>(), It.IsAny <int>(), cancellationTokenSource.Token)). Throws(new Exception("Previous buffer failed. Operation should return original failure and not try again")); await itemProducerTree.BufferMoreDocumentsAsync(cancellationTokenSource.Token); Assert.IsFalse(result.successfullyMovedNext); Assert.IsNotNull(result.failureResponse); Assert.IsFalse(itemProducerTree.HasMoreResults); }
private static IList <ToDoItem> GenerateAndMockResponseHelper( Mock <CosmosQueryClient> mockQueryClient, Mock <IRoutingMapProvider> mockRoutingMap, IList <ToDoItem> allItemsOrdered, bool isOrderByQuery, SqlQuerySpec sqlQuerySpec, string containerRid, string initContinuationToken, int maxPageSize, MockPartitionResponse[] mockResponseForSinglePartition, CancellationToken cancellationTokenForMocks) { if (mockResponseForSinglePartition == null) { throw new ArgumentNullException(nameof(mockResponseForSinglePartition)); } // Loop through all the partitions foreach (MockPartitionResponse partitionAndMessages in mockResponseForSinglePartition) { PartitionKeyRange partitionKeyRange = partitionAndMessages.PartitionKeyRange; string previousContinuationToken = initContinuationToken; // Loop through each message inside the partition List <int[]> messages = partitionAndMessages.MessagesWithItemIndex; int messagesCount = messages == null ? 0 : messages.Count; int lastMessageIndex = messagesCount - 1; for (int i = 0; i < messagesCount; i++) { int[] message = partitionAndMessages.MessagesWithItemIndex[i]; string newContinuationToken = null; List <ToDoItem> currentPageItems = new List <ToDoItem>(); // Null represents an empty page if (message != null) { foreach (int itemPosition in message) { currentPageItems.Add(allItemsOrdered[itemPosition]); } } // Last message should have null continuation token // Split means it's not the last message for this PK range if (i != lastMessageIndex || partitionAndMessages.HasSplit) { newContinuationToken = Guid.NewGuid().ToString(); } QueryResponse queryResponse = QueryResponseMessageFactory.CreateQueryResponse( currentPageItems, isOrderByQuery, newContinuationToken, containerRid); mockQueryClient.Setup(x => x.ExecuteItemQueryAsync( It.IsAny <Uri>(), ResourceType.Document, OperationType.Query, containerRid, It.IsAny <QueryRequestOptions>(), It.Is <SqlQuerySpec>(specInput => MockItemProducerFactory.IsSqlQuerySpecEqual(sqlQuerySpec, specInput)), previousContinuationToken, It.Is <PartitionKeyRangeIdentity>(rangeId => string.Equals(rangeId.PartitionKeyRangeId, partitionKeyRange.Id) && string.Equals(rangeId.CollectionRid, containerRid)), It.IsAny <bool>(), maxPageSize, cancellationTokenForMocks)) .Returns(Task.FromResult(queryResponse)); previousContinuationToken = newContinuationToken; } if (partitionAndMessages.HasSplit) { QueryResponse querySplitResponse = QueryResponseMessageFactory.CreateSplitResponse(containerRid); mockRoutingMap.Setup(x => x.TryGetOverlappingRangesAsync( containerRid, It.Is <Documents.Routing.Range <string> >(inputRange => inputRange.Equals(partitionKeyRange.ToRange())), true)).Returns(Task.FromResult(partitionAndMessages.GetPartitionKeyRangeOfSplit())); mockQueryClient.Setup(x => x.ExecuteItemQueryAsync( It.IsAny <Uri>(), ResourceType.Document, OperationType.Query, containerRid, It.IsAny <QueryRequestOptions>(), It.Is <SqlQuerySpec>(specInput => MockItemProducerFactory.IsSqlQuerySpecEqual(sqlQuerySpec, specInput)), previousContinuationToken, It.Is <PartitionKeyRangeIdentity>(rangeId => string.Equals(rangeId.PartitionKeyRangeId, partitionKeyRange.Id) && string.Equals(rangeId.CollectionRid, containerRid)), It.IsAny <bool>(), maxPageSize, cancellationTokenForMocks)) .Returns(Task.FromResult(querySplitResponse)); GenerateAndMockResponseHelper( mockQueryClient: mockQueryClient, mockRoutingMap: mockRoutingMap, allItemsOrdered: allItemsOrdered, isOrderByQuery: isOrderByQuery, sqlQuerySpec: sqlQuerySpec, containerRid: containerRid, initContinuationToken: previousContinuationToken, maxPageSize: maxPageSize, mockResponseForSinglePartition: partitionAndMessages.Split, cancellationTokenForMocks: cancellationTokenForMocks); } } return(allItemsOrdered); }
private static IEnumerable <CosmosElement> ExecuteOrderByClause( IEnumerable <CosmosElement> dataSource, SqlOrderByClause sqlOrderByClause, IReadOnlyDictionary <string, PartitionKeyRange> ridToPartitionKeyRange) { // Sort by the columns left to right SqlOrderByItem firstItem = sqlOrderByClause.OrderByItems[0]; // Since we don't supply an explicit index on the policy undefined items don't show up in the sort order if (sqlOrderByClause.OrderByItems.Length == 1) { dataSource = dataSource.Where(element => firstItem.Expression.Accept( ScalarExpressionEvaluator.Singleton, element) != Undefined); } IOrderedEnumerable <CosmosElement> orderedDataSource; if (firstItem.IsDescending) { orderedDataSource = dataSource.OrderByDescending( element => firstItem.Expression.Accept( ScalarExpressionEvaluator.Singleton, element)); } else { orderedDataSource = dataSource.OrderBy( element => firstItem.Expression.Accept( ScalarExpressionEvaluator.Singleton, element)); } foreach (SqlOrderByItem sqlOrderByItem in sqlOrderByClause.OrderByItems.Skip(1)) { if (sqlOrderByItem.IsDescending) { orderedDataSource = orderedDataSource.ThenByDescending( element => sqlOrderByItem.Expression.Accept( ScalarExpressionEvaluator.Singleton, element)); } else { orderedDataSource = orderedDataSource.ThenBy( element => sqlOrderByItem.Expression.Accept( ScalarExpressionEvaluator.Singleton, element)); } } // Grab from the left most partition first orderedDataSource = orderedDataSource .ThenBy((element) => { string rid = ((CosmosString)((CosmosObject)element)["_rid"]).Value; PartitionKeyRange partitionKeyRange = ridToPartitionKeyRange[rid]; return(partitionKeyRange.MinInclusive); }, StringComparer.Ordinal); // Break all final ties within partition by document id if (firstItem.IsDescending) { orderedDataSource = orderedDataSource .ThenByDescending(element => ResourceId.Parse(((CosmosString)((CosmosObject)element)["_rid"]).Value).Document); } else { orderedDataSource = orderedDataSource .ThenBy(element => ResourceId.Parse(((CosmosString)((CosmosObject)element)["_rid"]).Value).Document); } return(orderedDataSource); }
/// <summary> /// Initializes a new instance of the DocumentProducerTree class. /// </summary> /// <param name="partitionKeyRange">The partition key range.</param> /// <param name="createRequestFunc">Callback to create a request.</param> /// <param name="executeRequestFunc">Callback to execute a request.</param> /// <param name="createRetryPolicyFunc">Callback to create a retry policy.</param> /// <param name="produceAsyncCompleteCallback">Callback to invoke once a fetch finishes.</param> /// <param name="documentProducerTreeComparer">Comparer to determine, which tree to produce from.</param> /// <param name="equalityComparer">Comparer to see if we need to return the continuation token for a partition.</param> /// <param name="client">The client</param> /// <param name="deferFirstPage">Whether or not to defer fetching the first page.</param> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="initialPageSize">The initial page size.</param> /// <param name="initialContinuationToken">The initial continuation token.</param> public DocumentProducerTree( PartitionKeyRange partitionKeyRange, Func <PartitionKeyRange, string, int, DocumentServiceRequest> createRequestFunc, Func <DocumentServiceRequest, CancellationToken, Task <FeedResponse <CosmosElement> > > executeRequestFunc, Func <IDocumentClientRetryPolicy> createRetryPolicyFunc, Action <DocumentProducerTree, int, double, QueryMetrics, long, CancellationToken> produceAsyncCompleteCallback, IComparer <DocumentProducerTree> documentProducerTreeComparer, IEqualityComparer <CosmosElement> equalityComparer, IDocumentQueryClient client, bool deferFirstPage, string collectionRid, long initialPageSize = 50, string initialContinuationToken = null) { if (documentProducerTreeComparer == null) { throw new ArgumentNullException($"{nameof(documentProducerTreeComparer)}"); } if (createRequestFunc == null) { throw new ArgumentNullException($"{nameof(createRequestFunc)}"); } if (executeRequestFunc == null) { throw new ArgumentNullException($"{nameof(executeRequestFunc)}"); } if (createRetryPolicyFunc == null) { throw new ArgumentNullException($"{nameof(createRetryPolicyFunc)}"); } if (produceAsyncCompleteCallback == null) { throw new ArgumentNullException($"{nameof(produceAsyncCompleteCallback)}"); } if (documentProducerTreeComparer == null) { throw new ArgumentNullException($"{nameof(documentProducerTreeComparer)}"); } if (equalityComparer == null) { throw new ArgumentNullException($"{nameof(equalityComparer)}"); } if (client == null) { throw new ArgumentNullException($"{nameof(client)}"); } if (string.IsNullOrEmpty(collectionRid)) { throw new ArgumentException($"{nameof(collectionRid)} can not be null or empty."); } this.root = new DocumentProducer( partitionKeyRange, createRequestFunc, executeRequestFunc, createRetryPolicyFunc, (documentProducer, itemsBuffered, resourceUnitUsage, queryMetrics, requestLength, token) => produceAsyncCompleteCallback(this, itemsBuffered, resourceUnitUsage, queryMetrics, requestLength, token), equalityComparer, initialPageSize, initialContinuationToken); this.children = new PriorityQueue <DocumentProducerTree>(documentProducerTreeComparer, true); this.deferFirstPage = deferFirstPage; this.client = client; this.collectionRid = collectionRid; this.createDocumentProducerTreeCallback = DocumentProducerTree.CreateDocumentProducerTreeCallback( createRequestFunc, executeRequestFunc, createRetryPolicyFunc, produceAsyncCompleteCallback, documentProducerTreeComparer, equalityComparer, client, deferFirstPage, collectionRid, initialPageSize); this.executeWithSplitProofingSemaphore = new SemaphoreSlim(1, 1); }
private void TestSessionContainer(Func <int, ISessionToken> getSessionToken) { ConcurrentDictionary <UInt64, ConcurrentDictionary <string, ISessionToken> > sessionTokens = new ConcurrentDictionary <ulong, ConcurrentDictionary <string, ISessionToken> >(); ConcurrentDictionary <string, ConcurrentDictionary <string, ISessionToken> > sessionTokensNameBased = new ConcurrentDictionary <string, ConcurrentDictionary <string, ISessionToken> >(); int numCollections = 2; int numPartitionKeyRangeIds = 5; for (int i = 0; i < numCollections; i++) { string collName = "dbs/db1/colls/collName_" + i; ulong collId = (ulong)i; ConcurrentDictionary <string, ISessionToken> idToTokenMap = new ConcurrentDictionary <string, ISessionToken>(); ConcurrentDictionary <string, ISessionToken> idToTokenMapNameBased = new ConcurrentDictionary <string, ISessionToken>(); for (int j = 0; j < numPartitionKeyRangeIds; j++) { string range = "range_" + j; ISessionToken token = getSessionToken(j); bool successFlag = idToTokenMap.TryAdd(range, token) && idToTokenMapNameBased.TryAdd(range, token); if (!successFlag) { throw new InvalidOperationException("Add should not fail!"); } } bool successFlag2 = sessionTokens.TryAdd(collId, idToTokenMap) && sessionTokensNameBased.TryAdd(collName, idToTokenMapNameBased); if (!successFlag2) { throw new InvalidOperationException("Add should not fail!"); } } SessionContainer sessionContainer = new SessionContainer("127.0.0.1", sessionTokens, sessionTokensNameBased); using (DocumentServiceRequest request = DocumentServiceRequest.Create( Cosmos.Internal.OperationType.ReadFeed, Cosmos.Internal.ResourceType.Collection, new Uri("https://foo.com/dbs/db1/colls/collName_1", UriKind.Absolute), new MemoryStream(Encoding.UTF8.GetBytes("content1")), AuthorizationTokenType.PrimaryMasterKey, null)) { ISessionToken sessionToken = sessionContainer.ResolvePartitionLocalSessionToken(request, "range_1"); Assert.IsTrue(sessionToken.Equals(getSessionToken(1))); DocumentServiceRequestContext dsrContext = new DocumentServiceRequestContext(); PartitionKeyRange resolvedPKRange = new PartitionKeyRange(); resolvedPKRange.Id = "range_" + (numPartitionKeyRangeIds + 10); resolvedPKRange.Parents = new Collection <string>(new List <string> { "range_2", "range_x" }); dsrContext.ResolvedPartitionKeyRange = resolvedPKRange; request.RequestContext = dsrContext; sessionToken = sessionContainer.ResolvePartitionLocalSessionToken(request, resolvedPKRange.Id); Assert.IsTrue(sessionToken.Equals(getSessionToken(2))); } }
/// <summary> /// Gets <see cref="PartitionKeyRange"/> instance which corresponds to <paramref name="rangeFromContinuationToken"/> /// </summary> /// <param name="providedPartitionKeyRanges"></param> /// <param name="routingMapProvider"></param> /// <param name="collectionRid"></param> /// <param name="rangeFromContinuationToken"></param> /// <param name="suppliedTokens"></param> /// <param name="direction"></param> /// <returns>null if collection with specified <paramref name="collectionRid"/> doesn't exist, which potentially means /// that collection was resolved to outdated Rid by name. Also null can be returned if <paramref name="rangeFromContinuationToken"/> /// is not found - this means it was split. /// </returns> public virtual async Task <ResolvedRangeInfo> TryGetTargetRangeFromContinuationTokenRangeAsync( IReadOnlyList <Range <string> > providedPartitionKeyRanges, IRoutingMapProvider routingMapProvider, string collectionRid, Range <string> rangeFromContinuationToken, List <CompositeContinuationToken> suppliedTokens, RntdbEnumerationDirection direction = RntdbEnumerationDirection.Forward) { // For queries such as "SELECT * FROM root WHERE false", // we will have empty ranges and just forward the request to the first partition if (providedPartitionKeyRanges.Count == 0) { return(new ResolvedRangeInfo( await routingMapProvider.TryGetRangeByEffectivePartitionKeyAsync( collectionRid, PartitionKeyInternal.MinimumInclusiveEffectivePartitionKey), suppliedTokens)); } // Initially currentRange will be empty if (rangeFromContinuationToken.IsEmpty) { if (direction == RntdbEnumerationDirection.Reverse) { PartitionKeyRange lastPartitionKeyRange = (await routingMapProvider.TryGetOverlappingRangesAsync(collectionRid, providedPartitionKeyRanges.Single())).Last(); return(new ResolvedRangeInfo( lastPartitionKeyRange, suppliedTokens)); } Range <string> minimumRange = PartitionRoutingHelper.Min( providedPartitionKeyRanges, Range <string> .MinComparer.Instance); return(new ResolvedRangeInfo( await routingMapProvider.TryGetRangeByEffectivePartitionKeyAsync(collectionRid, minimumRange.Min), suppliedTokens)); } PartitionKeyRange targetPartitionKeyRange = await routingMapProvider.TryGetRangeByEffectivePartitionKeyAsync(collectionRid, rangeFromContinuationToken.Min); if (targetPartitionKeyRange == null) { return(new ResolvedRangeInfo(null, suppliedTokens)); } if (!rangeFromContinuationToken.Equals(targetPartitionKeyRange.ToRange())) { // Cannot find target range. Either collection was resolved incorrectly or the range was split List <PartitionKeyRange> replacedRanges = (await routingMapProvider.TryGetOverlappingRangesAsync(collectionRid, rangeFromContinuationToken, true)).ToList(); if (replacedRanges == null || replacedRanges.Count < 1) { return(new ResolvedRangeInfo(null, null)); } else { if (!(replacedRanges[0].MinInclusive.Equals(rangeFromContinuationToken.Min) && replacedRanges[replacedRanges.Count - 1].MaxExclusive.Equals(rangeFromContinuationToken.Max))) { return(new ResolvedRangeInfo(null, null)); } } if (direction == RntdbEnumerationDirection.Reverse) { replacedRanges.Reverse(); } List <CompositeContinuationToken> continuationTokensToBePersisted = null; if (suppliedTokens != null && suppliedTokens.Count > 0) { continuationTokensToBePersisted = new List <CompositeContinuationToken>(replacedRanges.Count + suppliedTokens.Count - 1); foreach (PartitionKeyRange partitionKeyRange in replacedRanges) { CompositeContinuationToken token = (CompositeContinuationToken)suppliedTokens[0].ShallowCopy(); token.Range = partitionKeyRange.ToRange(); continuationTokensToBePersisted.Add(token); } continuationTokensToBePersisted.AddRange(suppliedTokens.Skip(1)); } return(new ResolvedRangeInfo(replacedRanges[0], continuationTokensToBePersisted)); } return(new ResolvedRangeInfo(targetPartitionKeyRange, suppliedTokens)); }
public DocumentServiceRequest CreateDocumentServiceRequest(INameValueCollection requestHeaders, SqlQuerySpec querySpec, PartitionKeyRange targetRange, string collectionRid) { DocumentServiceRequest request = this.CreateDocumentServiceRequest(requestHeaders, querySpec); this.PopulatePartitionKeyRangeInfo(request, targetRange, collectionRid); request.Properties = this.feedOptions.Properties; return(request); }
public virtual async Task <bool> TryAddPartitionKeyRangeToContinuationTokenAsync( INameValueCollection backendResponseHeaders, IReadOnlyList <Range <string> > providedPartitionKeyRanges, IRoutingMapProvider routingMapProvider, string collectionRid, ResolvedRangeInfo resolvedRangeInfo, RntdbEnumerationDirection direction = RntdbEnumerationDirection.Forward) { Debug.Assert(resolvedRangeInfo.ResolvedRange != null, "ResolvedRange can't be null"); PartitionKeyRange currentRange = resolvedRangeInfo.ResolvedRange; // IF : Split happened, or already had multiple target ranges in the continuation if (resolvedRangeInfo.ContinuationTokens != null && resolvedRangeInfo.ContinuationTokens.Count > 1) { if (!string.IsNullOrEmpty(backendResponseHeaders[HttpConstants.HttpHeaders.Continuation])) { resolvedRangeInfo.ContinuationTokens[0].Token = backendResponseHeaders[HttpConstants.HttpHeaders.Continuation]; } else { resolvedRangeInfo.ContinuationTokens.RemoveAt(0); } backendResponseHeaders[HttpConstants.HttpHeaders.Continuation] = JsonConvert.SerializeObject(resolvedRangeInfo.ContinuationTokens); } else { //// ELSE: Single target Range was provided, and no split happened PartitionKeyRange rangeToUse = currentRange; // We only need to get the next range if we have to if (string.IsNullOrEmpty(backendResponseHeaders[HttpConstants.HttpHeaders.Continuation])) { if (direction == RntdbEnumerationDirection.Reverse) { rangeToUse = PartitionRoutingHelper.MinBefore( (await routingMapProvider.TryGetOverlappingRangesAsync(collectionRid, providedPartitionKeyRanges.Single())).ToList(), currentRange); } else { Range <string> nextProvidedRange = PartitionRoutingHelper.MinAfter( providedPartitionKeyRanges, currentRange.ToRange(), Range <string> .MaxComparer.Instance); if (nextProvidedRange == null) { return(true); } string max = string.CompareOrdinal(nextProvidedRange.Min, currentRange.MaxExclusive) > 0 ? nextProvidedRange.Min : currentRange.MaxExclusive; if (string.CompareOrdinal(max, PartitionKeyInternal.MaximumExclusiveEffectivePartitionKey) == 0) { return(true); } PartitionKeyRange nextRange = await routingMapProvider.TryGetRangeByEffectivePartitionKeyAsync(collectionRid, max); if (nextRange == null) { return(false); } rangeToUse = nextRange; } } if (rangeToUse != null) { backendResponseHeaders[HttpConstants.HttpHeaders.Continuation] = PartitionRoutingHelper.AddPartitionKeyRangeToContinuationToken( backendResponseHeaders[HttpConstants.HttpHeaders.Continuation], rangeToUse); } } return(true); }
/// <summary> /// Resolves the endpoint of the partition for the given request /// </summary> /// <param name="request">Request for which the partition endpoint resolution is to be performed</param> /// <param name="forceRefreshPartitionAddresses">Force refresh the partition's endpoint</param> /// <param name="cancellationToken">Cancellation token</param> /// <returns>An instance of <see cref="ResolutionResult"/>.</returns> private async Task <ResolutionResult> ResolveAddressesAndIdentityAsync( DocumentServiceRequest request, bool forceRefreshPartitionAddresses, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); if (request.ServiceIdentity != null) { if (request.ServiceIdentity.IsMasterService && request.ForceMasterRefresh && this.masterServiceIdentityProvider != null) { await this.masterServiceIdentityProvider.RefreshAsync(request.ServiceIdentity, cancellationToken); ServiceIdentity newMasterServiceIdentity = this.masterServiceIdentityProvider.MasterServiceIdentity; bool masterServiceIdentityChanged = newMasterServiceIdentity != null && !newMasterServiceIdentity.Equals(request.ServiceIdentity); DefaultTrace.TraceInformation( "Refreshed master service identity. masterServiceIdentityChanged = {0}, " + "previousRequestServiceIdentity = {1}, newMasterServiceIdentity = {2}", masterServiceIdentityChanged, request.ServiceIdentity, newMasterServiceIdentity); if (masterServiceIdentityChanged) { request.RouteTo(newMasterServiceIdentity); } } // In this case we don't populate request.RequestContext.ResolvedPartitionKeyRangeId, // which is needed for session token. // The assumption is that: // 1. Master requests never use session consistency. // 2. Service requests (like collection create etc.) don't use session consistency. // 3. Requests which target specific partition of an existing collection will use x-ms-documentdb-partitionkeyrangeid header // to send request to specific partition and will not set request.ServiceIdentity ServiceIdentity identity = request.ServiceIdentity; PartitionAddressInformation addresses = await this.addressCache.TryGetAddressesAsync(request, null, identity, forceRefreshPartitionAddresses, cancellationToken); if (addresses == null && identity.IsMasterService && this.masterServiceIdentityProvider != null) { DefaultTrace.TraceWarning("Could not get addresses for MasterServiceIdentity {0}. will refresh masterServiceIdentity and retry", identity); await this.masterServiceIdentityProvider.RefreshAsync(identity, cancellationToken); identity = this.masterServiceIdentityProvider.MasterServiceIdentity; addresses = await this.addressCache.TryGetAddressesAsync(request, null, identity, forceRefreshPartitionAddresses, cancellationToken); } if (addresses == null) { DefaultTrace.TraceInformation("Could not get addresses for explicitly specified ServiceIdentity {0}", identity); throw new NotFoundException() { ResourceAddress = request.ResourceAddress }; } return(new ResolutionResult(addresses, identity)); } if (ReplicatedResourceClient.IsReadingFromMaster(request.ResourceType, request.OperationType) && request.PartitionKeyRangeIdentity == null) { DefaultTrace.TraceInformation("Resolving Master service address, forceMasterRefresh: {0}, currentMaster: {1}", request.ForceMasterRefresh, this.masterServiceIdentityProvider?.MasterServiceIdentity); // Client implementation, GlobalAddressResolver passes in a null IMasterServiceIdentityProvider, because it doesn't actually use the serviceIdentity // in the addressCache.TryGetAddresses method. In GatewayAddressCache.cs, the master address is resolved by making a call to Gateway AddressFeed, // not using the serviceIdentity that is passed in if (request.ForceMasterRefresh && this.masterServiceIdentityProvider != null) { ServiceIdentity previousMasterService = this.masterServiceIdentityProvider.MasterServiceIdentity; await this.masterServiceIdentityProvider.RefreshAsync(previousMasterService, cancellationToken); } ServiceIdentity serviceIdentity = this.masterServiceIdentityProvider?.MasterServiceIdentity; PartitionKeyRangeIdentity partitionKeyRangeIdentity = this.masterPartitionKeyRangeIdentity; PartitionAddressInformation addresses = await this.addressCache.TryGetAddressesAsync( request, partitionKeyRangeIdentity, serviceIdentity, forceRefreshPartitionAddresses, cancellationToken); if (addresses == null) { // This shouldn't really happen. DefaultTrace.TraceCritical("Could not get addresses for master partition {0}", serviceIdentity); throw new NotFoundException() { ResourceAddress = request.ResourceAddress }; } PartitionKeyRange partitionKeyRange = new PartitionKeyRange { Id = PartitionKeyRange.MasterPartitionKeyRangeId }; return(new ResolutionResult(partitionKeyRange, addresses, serviceIdentity)); } bool collectionCacheIsUptoDate = !request.IsNameBased || (request.PartitionKeyRangeIdentity != null && request.PartitionKeyRangeIdentity.CollectionRid != null); bool collectionRoutingMapCacheIsUptoDate = false; ContainerProperties collection = await this.collectionCache.ResolveCollectionAsync(request, cancellationToken); CollectionRoutingMap routingMap = await this.collectionRoutingMapCache.TryLookupAsync( collection.ResourceId, null, request, cancellationToken); if (routingMap != null && request.ForceCollectionRoutingMapRefresh) { DefaultTrace.TraceInformation( "AddressResolver.ResolveAddressesAndIdentityAsync ForceCollectionRoutingMapRefresh collection.ResourceId = {0}", collection.ResourceId); routingMap = await this.collectionRoutingMapCache.TryLookupAsync(collection.ResourceId, routingMap, request, cancellationToken); } if (request.ForcePartitionKeyRangeRefresh) { collectionRoutingMapCacheIsUptoDate = true; request.ForcePartitionKeyRangeRefresh = false; if (routingMap != null) { routingMap = await this.collectionRoutingMapCache.TryLookupAsync(collection.ResourceId, routingMap, request, cancellationToken); } } if (routingMap == null && !collectionCacheIsUptoDate) { // Routing map was not found by resolved collection rid. Maybe collection rid is outdated. // Refresh collection cache and reresolve routing map. request.ForceNameCacheRefresh = true; collectionCacheIsUptoDate = true; collectionRoutingMapCacheIsUptoDate = false; collection = await this.collectionCache.ResolveCollectionAsync(request, cancellationToken); routingMap = await this.collectionRoutingMapCache.TryLookupAsync( collection.ResourceId, previousValue : null, request : request, cancellationToken : cancellationToken); } AddressResolver.EnsureRoutingMapPresent(request, routingMap, collection); // At this point we have both collection and routingMap. ResolutionResult result = await this.TryResolveServerPartitionAsync( request, collection, routingMap, collectionCacheIsUptoDate, collectionRoutingMapCacheIsUptodate : collectionRoutingMapCacheIsUptoDate, forceRefreshPartitionAddresses : forceRefreshPartitionAddresses, cancellationToken : cancellationToken); if (result == null) { // Couldn't resolve server partition or its addresses. // Either collection cache is outdated or routing map cache is outdated. if (!collectionCacheIsUptoDate) { request.ForceNameCacheRefresh = true; collectionCacheIsUptoDate = true; collection = await this.collectionCache.ResolveCollectionAsync(request, cancellationToken); if (collection.ResourceId != routingMap.CollectionUniqueId) { // Collection cache was stale. We resolved to new Rid. routing map cache is potentially stale // for this new collection rid. Mark it as such. collectionRoutingMapCacheIsUptoDate = false; routingMap = await this.collectionRoutingMapCache.TryLookupAsync( collection.ResourceId, previousValue : null, request : request, cancellationToken : cancellationToken); } } if (!collectionRoutingMapCacheIsUptoDate) { collectionRoutingMapCacheIsUptoDate = true; routingMap = await this.collectionRoutingMapCache.TryLookupAsync( collection.ResourceId, previousValue : routingMap, request : request, cancellationToken : cancellationToken); } AddressResolver.EnsureRoutingMapPresent(request, routingMap, collection); result = await this.TryResolveServerPartitionAsync( request, collection, routingMap, collectionCacheIsUptodate : true, collectionRoutingMapCacheIsUptodate : true, forceRefreshPartitionAddresses : forceRefreshPartitionAddresses, cancellationToken : cancellationToken); } if (result == null) { DefaultTrace.TraceInformation("Couldn't route partitionkeyrange-oblivious request after retry/cache refresh. Collection doesn't exist."); // At this point collection cache and routing map caches are refreshed. // The only reason we will get here is if collection doesn't exist. // Case when partitionkeyrange doesn't exist is handled in the corresponding method. throw new NotFoundException() { ResourceAddress = request.ResourceAddress }; } if (request.IsNameBased) { // Append collection rid. // If we resolved collection rid incorrectly because of outdated cache, this can lead // to incorrect routing decisions. But backend will validate collection rid and throw // InvalidPartitionException if we reach wrong collection. // Also this header will be used by backend to inject collection rid into metrics for // throttled requests. request.Headers[WFConstants.BackendHeaders.CollectionRid] = collection.ResourceId; } return(result); }
private static PartitionKeyRange MinBefore(IReadOnlyList <PartitionKeyRange> values, PartitionKeyRange minValue) { if (values.Count == 0) { throw new ArgumentException(nameof(values)); } IComparer <Range <string> > comparer = Range <string> .MinComparer.Instance; PartitionKeyRange min = null; foreach (PartitionKeyRange value in values) { if (comparer.Compare(value.ToRange(), minValue.ToRange()) < 0 && (min == null || comparer.Compare(value.ToRange(), min.ToRange()) > 0)) { min = value; } } return(min); }
private async Task PrintPartitionStatsByPartitionKeyRange(ResourceResponse <DocumentCollection> collection, PartitionKeyRange pkRange) { ResourceResponse <Document> perPartitionResponse = await GetPartitionUsageStats(collection, pkRange); if (perPartitionResponse == null) { Console.WriteLine("\tPartition.{0} documentsSize: 0 GB", pkRange.Id); return; } string[] perPartitionKeyValuePairs = perPartitionResponse.CurrentResourceQuotaUsage.Split(new char[] { ';' }, StringSplitOptions.RemoveEmptyEntries); foreach (string kvp in perPartitionKeyValuePairs) { string metricName = kvp.Split('=')[0]; string metricValue = kvp.Split('=')[1]; switch (metricName) { case "documentsSize": Console.WriteLine("\tPartition.{0} {1}: {2} GB", pkRange.Id, metricName, Math.Round(int.Parse(metricValue) / (1024 * 1024.0), 3)); break; default: break; } } if (Options.PartitionId == pkRange.Id) { await PrintTopPartitionKeysFromSampleData(collection, pkRange); } }
/// <summary> /// Initializes a new instance of the ItemProducerTree class. /// </summary> /// <param name="queryContext">query context.</param> /// <param name="querySpecForInit">query spec init.</param> /// <param name="partitionKeyRange">The partition key range.</param> /// <param name="produceAsyncCompleteCallback">Callback to invoke once a fetch finishes.</param> /// <param name="itemProducerTreeComparer">Comparer to determine, which tree to produce from.</param> /// <param name="equalityComparer">Comparer to see if we need to return the continuation token for a partition.</param> /// <param name="deferFirstPage">Whether or not to defer fetching the first page.</param> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="initialPageSize">The initial page size.</param> /// <param name="initialContinuationToken">The initial continuation token.</param> public ItemProducerTree( CosmosQueryContext queryContext, SqlQuerySpec querySpecForInit, PartitionKeyRange partitionKeyRange, Action <ItemProducerTree, int, double, QueryMetrics, long, CancellationToken> produceAsyncCompleteCallback, IComparer <ItemProducerTree> itemProducerTreeComparer, IEqualityComparer <CosmosElement> equalityComparer, bool deferFirstPage, string collectionRid, long initialPageSize = 50, string initialContinuationToken = null) { if (queryContext == null) { throw new ArgumentNullException($"{nameof(queryContext)}"); } if (itemProducerTreeComparer == null) { throw new ArgumentNullException($"{nameof(itemProducerTreeComparer)}"); } if (produceAsyncCompleteCallback == null) { throw new ArgumentNullException($"{nameof(produceAsyncCompleteCallback)}"); } if (itemProducerTreeComparer == null) { throw new ArgumentNullException($"{nameof(itemProducerTreeComparer)}"); } if (equalityComparer == null) { throw new ArgumentNullException($"{nameof(equalityComparer)}"); } if (string.IsNullOrEmpty(collectionRid)) { throw new ArgumentException($"{nameof(collectionRid)} can not be null or empty."); } this.Root = new ItemProducer( queryContext, querySpecForInit, partitionKeyRange, (itemProducer, itemsBuffered, resourceUnitUsage, queryMetrics, requestLength, token) => produceAsyncCompleteCallback(this, itemsBuffered, resourceUnitUsage, queryMetrics, requestLength, token), equalityComparer, initialPageSize, initialContinuationToken); this.queryClient = queryContext.QueryClient; this.children = new PriorityQueue <ItemProducerTree>(itemProducerTreeComparer, true); this.deferFirstPage = deferFirstPage; this.collectionRid = collectionRid; this.createItemProducerTreeCallback = ItemProducerTree.CreateItemProducerTreeCallback( queryContext, querySpecForInit, produceAsyncCompleteCallback, itemProducerTreeComparer, equalityComparer, deferFirstPage, collectionRid, initialPageSize); this.executeWithSplitProofingSemaphore = new SemaphoreSlim(1, 1); }
public static TryCatch <PartitionMapping <PartitionedToken> > MonadicGetPartitionMapping <PartitionedToken>( IReadOnlyList <PartitionKeyRange> partitionKeyRanges, IReadOnlyList <PartitionedToken> partitionedContinuationTokens) where PartitionedToken : IPartitionedToken { if (partitionKeyRanges == null) { throw new ArgumentNullException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens == null) { throw new ArgumentNullException(nameof(partitionedContinuationTokens)); } if (partitionKeyRanges.Count < 1) { throw new ArgumentException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens.Count < 1) { throw new ArgumentException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens.Count > partitionKeyRanges.Count) { throw new ArgumentException($"{nameof(partitionedContinuationTokens)} can not have more elements than {nameof(partitionKeyRanges)}."); } // Find the continuation token for the partition we left off on: PartitionedToken firstContinuationToken = partitionedContinuationTokens .OrderBy((partitionedToken) => partitionedToken.Range.Min) .First(); // Segment the ranges based off that: ReadOnlyMemory <PartitionKeyRange> sortedRanges = partitionKeyRanges .OrderBy((partitionKeyRange) => partitionKeyRange.MinInclusive) .ToArray(); PartitionKeyRange firstContinuationRange = new PartitionKeyRange { MinInclusive = firstContinuationToken.Range.Min, MaxExclusive = firstContinuationToken.Range.Max }; int matchedIndex = sortedRanges.Span.BinarySearch( firstContinuationRange, Comparer <PartitionKeyRange> .Create((range1, range2) => string.CompareOrdinal(range1.MinInclusive, range2.MinInclusive))); if (matchedIndex < 0) { return(TryCatch <PartitionMapping <PartitionedToken> > .FromException( new MalformedContinuationTokenException( $"{RMResources.InvalidContinuationToken} - Could not find continuation token: {firstContinuationToken}"))); } ReadOnlyMemory <PartitionKeyRange> partitionsLeftOfTarget = matchedIndex == 0 ? ReadOnlyMemory <PartitionKeyRange> .Empty : sortedRanges.Slice(start: 0, length: matchedIndex); ReadOnlyMemory <PartitionKeyRange> targetPartition = sortedRanges.Slice(start: matchedIndex, length: 1); ReadOnlyMemory <PartitionKeyRange> partitionsRightOfTarget = matchedIndex == sortedRanges.Length - 1 ? ReadOnlyMemory <PartitionKeyRange> .Empty : sortedRanges.Slice(start: matchedIndex + 1); // Create the continuation token mapping for each region. IReadOnlyDictionary <PartitionKeyRange, PartitionedToken> mappingForPartitionsLeftOfTarget = MatchRangesToContinuationTokens( partitionsLeftOfTarget, partitionedContinuationTokens); IReadOnlyDictionary <PartitionKeyRange, PartitionedToken> mappingForTargetPartition = MatchRangesToContinuationTokens( targetPartition, partitionedContinuationTokens); IReadOnlyDictionary <PartitionKeyRange, PartitionedToken> mappingForPartitionsRightOfTarget = MatchRangesToContinuationTokens( partitionsRightOfTarget, partitionedContinuationTokens); return(TryCatch <PartitionMapping <PartitionedToken> > .FromResult( new PartitionMapping <PartitionedToken>( partitionsLeftOfTarget : mappingForPartitionsLeftOfTarget, targetPartition : mappingForTargetPartition, partitionsRightOfTarget : mappingForPartitionsRightOfTarget))); }