public static TryCatch <PartitionMapping <PartitionedToken> > MonadicGetPartitionMapping <PartitionedToken>( IReadOnlyList <FeedRangeEpk> partitionKeyRanges, IReadOnlyList <PartitionedToken> partitionedContinuationTokens) where PartitionedToken : IPartitionedToken { if (partitionKeyRanges == null) { throw new ArgumentNullException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens == null) { throw new ArgumentNullException(nameof(partitionedContinuationTokens)); } if (partitionKeyRanges.Count < 1) { throw new ArgumentException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens.Count < 1) { throw new ArgumentException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens.Count > partitionKeyRanges.Count) { throw new ArgumentException($"{nameof(partitionedContinuationTokens)} can not have more elements than {nameof(partitionKeyRanges)}."); } // Find the continuation token for the partition we left off on: PartitionedToken firstContinuationToken = partitionedContinuationTokens .OrderBy((partitionedToken) => partitionedToken.Range.Min) .First(); // Segment the ranges based off that: ReadOnlyMemory <FeedRangeEpk> sortedRanges = partitionKeyRanges .OrderBy((partitionKeyRange) => partitionKeyRange.Range.Min) .ToArray(); FeedRangeEpk firstContinuationRange = new FeedRangeEpk( new Documents.Routing.Range <string>( min: firstContinuationToken.Range.Min, max: firstContinuationToken.Range.Max, isMinInclusive: true, isMaxInclusive: false)); int matchedIndex = sortedRanges.Span.BinarySearch( firstContinuationRange, Comparer <FeedRangeEpk> .Create((range1, range2) => string.CompareOrdinal(range1.Range.Min, range2.Range.Min))); if (matchedIndex < 0) { if (partitionKeyRanges.Count != 1) { return(TryCatch <PartitionMapping <PartitionedToken> > .FromException( new MalformedContinuationTokenException( $"{RMResources.InvalidContinuationToken} - Could not find continuation token: {firstContinuationToken}"))); } // The user is doing a partition key query that got split, so it no longer aligns with our continuation token. matchedIndex = 0; } ReadOnlyMemory <FeedRangeEpk> partitionsLeftOfTarget = matchedIndex == 0 ? ReadOnlyMemory <FeedRangeEpk> .Empty : sortedRanges.Slice(start: 0, length: matchedIndex); ReadOnlyMemory <FeedRangeEpk> targetPartition = sortedRanges.Slice(start: matchedIndex, length: 1); ReadOnlyMemory <FeedRangeEpk> partitionsRightOfTarget = matchedIndex == sortedRanges.Length - 1 ? ReadOnlyMemory <FeedRangeEpk> .Empty : sortedRanges.Slice(start: matchedIndex + 1); // Create the continuation token mapping for each region. IReadOnlyDictionary <FeedRangeEpk, PartitionedToken> mappingForPartitionsLeftOfTarget = MatchRangesToContinuationTokens( partitionsLeftOfTarget, partitionedContinuationTokens); IReadOnlyDictionary <FeedRangeEpk, PartitionedToken> mappingForTargetPartition = MatchRangesToContinuationTokens( targetPartition, partitionedContinuationTokens); IReadOnlyDictionary <FeedRangeEpk, PartitionedToken> mappingForPartitionsRightOfTarget = MatchRangesToContinuationTokens( partitionsRightOfTarget, partitionedContinuationTokens); return(TryCatch <PartitionMapping <PartitionedToken> > .FromResult( new PartitionMapping <PartitionedToken>( partitionsLeftOfTarget : mappingForPartitionsLeftOfTarget, targetPartition : mappingForTargetPartition, partitionsRightOfTarget : mappingForPartitionsRightOfTarget))); }
/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="tryFilterAsync">The callback used to filter each partition.</param> /// <param name="cancellationToken">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task <TryCatch> TryInitializeAsync( string collectionRid, IReadOnlyList <PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, IReadOnlyDictionary <string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func <ItemProducerTree, Task <TryCatch> > tryFilterAsync, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); List <ItemProducerTree> itemProducerTrees = new List <ItemProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken; if (targetRangeToContinuationMap != null) { if (!targetRangeToContinuationMap.TryGetValue(partitionKeyRange.Id, out initialContinuationToken)) { initialContinuationToken = null; } } else { initialContinuationToken = null; } ItemProducerTree itemProducerTree = new ItemProducerTree( this.queryContext, querySpecForInit, partitionKeyRange, this.OnItemProducerTreeCompleteFetching, this.itemProducerForest.Comparer as IComparer <ItemProducerTree>, this.equalityComparer, this.testSettings, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken) { Filter = filter }; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(itemProducerTree); } itemProducerTrees.Add(itemProducerTree); } // Using loop fission so that we can load the document producers in parallel foreach (ItemProducerTree itemProducerTree in itemProducerTrees) { if (!deferFirstPage) { while (true) { (bool movedToNextPage, QueryResponseCore? failureResponse) = await itemProducerTree.TryMoveNextPageAsync(cancellationToken); if (failureResponse.HasValue) { return(TryCatch.FromException( new CosmosException( statusCode: failureResponse.Value.StatusCode, subStatusCode: (int)failureResponse.Value.SubStatusCode.GetValueOrDefault(0), message: failureResponse.Value.ErrorMessage, activityId: failureResponse.Value.ActivityId, requestCharge: failureResponse.Value.RequestCharge))); } if (!movedToNextPage) { break; } if (itemProducerTree.IsAtBeginningOfPage) { break; } if (itemProducerTree.TryMoveNextDocumentWithinPage()) { break; } } } if (tryFilterAsync != null) { TryCatch tryFilter = await tryFilterAsync(itemProducerTree); if (!tryFilter.Succeeded) { return(tryFilter); } } this.itemProducerForest.Enqueue(itemProducerTree); } return(TryCatch.FromResult()); }
public static TryCatch <ChangeFeedState> MonadicFromCosmosElement(CosmosElement cosmosElement) { if (cosmosElement == null) { throw new ArgumentNullException(nameof(cosmosElement)); } if (!(cosmosElement is CosmosObject cosmosObject)) { return(TryCatch <ChangeFeedState> .FromException( new FormatException( $"expected change feed state to be an object: {cosmosElement}"))); } if (!cosmosObject.TryGetValue(TypePropertyName, out CosmosString typePropertyValue)) { return(TryCatch <ChangeFeedState> .FromException( new FormatException( $"expected change feed state to have a string type property: {cosmosElement}"))); } ChangeFeedState state; switch (typePropertyValue.Value) { case BeginningTypeValue: state = ChangeFeedState.Beginning(); break; case TimeTypeValue: { if (!cosmosObject.TryGetValue(ValuePropertyName, out CosmosString valuePropertyValue)) { return(TryCatch <ChangeFeedState> .FromException( new FormatException( $"expected change feed state to have a string value property: {cosmosElement}"))); } if (!DateTime.TryParse( valuePropertyValue.Value, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal | DateTimeStyles.AdjustToUniversal | DateTimeStyles.AllowWhiteSpaces, out DateTime utcStartTime)) { return(TryCatch <ChangeFeedState> .FromException( new FormatException( $"failed to parse start time value: {cosmosElement}"))); } state = ChangeFeedState.Time(utcStartTime); } break; case ContinuationTypeValue: { if (!cosmosObject.TryGetValue(ValuePropertyName, out CosmosString valuePropertyValue)) { return(TryCatch <ChangeFeedState> .FromException( new FormatException( $"expected change feed state to have a string value property: {cosmosElement}"))); } state = ChangeFeedState.Continuation(valuePropertyValue); } break; case NowTypeValue: state = ChangeFeedState.Now(); break; default: throw new InvalidOperationException(); } return(TryCatch <ChangeFeedState> .FromResult(state)); }
protected async Task <TryCatch> TryInitializeAsync( string collectionRid, int initialPageSize, SqlQuerySpec querySpecForInit, IReadOnlyDictionary <PartitionKeyRange, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func <ItemProducerTree, Task <TryCatch> > tryFilterAsync, CancellationToken cancellationToken) { if (collectionRid == null) { throw new ArgumentNullException(nameof(collectionRid)); } if (initialPageSize < 0) { throw new ArgumentOutOfRangeException(nameof(initialPageSize)); } if (querySpecForInit == null) { throw new ArgumentNullException(nameof(querySpecForInit)); } if (targetRangeToContinuationMap == null) { throw new ArgumentNullException(nameof(targetRangeToContinuationMap)); } cancellationToken.ThrowIfCancellationRequested(); List <ItemProducerTree> itemProducerTrees = new List <ItemProducerTree>(); foreach (KeyValuePair <PartitionKeyRange, string> rangeAndContinuationToken in targetRangeToContinuationMap) { PartitionKeyRange partitionKeyRange = rangeAndContinuationToken.Key; string continuationToken = rangeAndContinuationToken.Value; ItemProducerTree itemProducerTree = new ItemProducerTree( this.queryContext, querySpecForInit, partitionKeyRange, this.OnItemProducerTreeCompleteFetching, this.itemProducerForest.Comparer as IComparer <ItemProducerTree>, this.equalityComparer, this.testSettings, deferFirstPage, collectionRid, initialPageSize, continuationToken) { Filter = filter }; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(itemProducerTree); } itemProducerTrees.Add(itemProducerTree); } // Using loop fission so that we can load the document producers in parallel foreach (ItemProducerTree itemProducerTree in itemProducerTrees) { if (!deferFirstPage) { while (true) { (bool movedToNextPage, QueryResponseCore? failureResponse) = await itemProducerTree.TryMoveNextPageAsync(cancellationToken); if (failureResponse.HasValue) { return(TryCatch.FromException( failureResponse.Value.CosmosException)); } if (!movedToNextPage) { break; } if (itemProducerTree.IsAtBeginningOfPage) { break; } if (itemProducerTree.TryMoveNextDocumentWithinPage()) { break; } } } if (tryFilterAsync != null) { TryCatch tryFilter = await tryFilterAsync(itemProducerTree); if (!tryFilter.Succeeded) { return(tryFilter); } } this.itemProducerForest.Enqueue(itemProducerTree); } return(TryCatch.FromResult()); }
public override async ValueTask <bool> MoveNextAsync(ITrace trace) { this.cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } if (this.returnedLastPage) { this.Current = default; return(false); } // Draining GROUP BY is broken down into two stages: double requestCharge = 0.0; long responseLengthInBytes = 0; while (await this.inputStage.MoveNextAsync(trace)) { this.cancellationToken.ThrowIfCancellationRequested(); // Stage 1: // Drain the groupings fully from all continuation and all partitions TryCatch <QueryPage> tryGetSourcePage = this.inputStage.Current; if (tryGetSourcePage.Failed) { this.Current = tryGetSourcePage; return(true); } QueryPage sourcePage = tryGetSourcePage.Result; requestCharge += sourcePage.RequestCharge; responseLengthInBytes += sourcePage.ResponseLengthInBytes; this.AggregateGroupings(sourcePage.Documents); } // Stage 2: // Emit the results from the grouping table page by page IReadOnlyList <CosmosElement> results = this.groupingTable.Drain(this.pageSize); if (this.groupingTable.Count == 0) { this.returnedLastPage = true; } QueryPage queryPage = new QueryPage( documents: results, requestCharge: requestCharge, activityId: null, responseLengthInBytes: responseLengthInBytes, cosmosQueryExecutionInfo: null, disallowContinuationTokenMessage: ClientGroupByQueryPipelineStage.ContinuationTokenNotSupportedWithGroupBy, state: null); this.Current = TryCatch <QueryPage> .FromResult(queryPage); return(true); }
// In order to maintain the continuation token for the user we must drain with a few constraints // 1) We fully drain from the left most partition before moving on to the next partition // 2) We drain only full pages from the document producer so we aren't left with a partial page // otherwise we would need to add to the continuation token how many items to skip over on that page. public async ValueTask <bool> MoveNextAsync(ITrace trace) { if (trace == null) { throw new ArgumentNullException(nameof(trace)); } if (!await this.crossPartitionRangePageAsyncEnumerator.MoveNextAsync(trace)) { this.Current = default; return(false); } TryCatch <CrossFeedRangePage <QueryPage, QueryState> > currentCrossPartitionPage = this.crossPartitionRangePageAsyncEnumerator.Current; if (currentCrossPartitionPage.Failed) { this.Current = TryCatch <QueryPage> .FromException(currentCrossPartitionPage.Exception); return(true); } CrossFeedRangePage <QueryPage, QueryState> crossPartitionPageResult = currentCrossPartitionPage.Result; QueryPage backendQueryPage = crossPartitionPageResult.Page; CrossFeedRangeState <QueryState> crossPartitionState = crossPartitionPageResult.State; QueryState queryState; if (crossPartitionState == null) { queryState = null; } else { // left most and any non null continuations IOrderedEnumerable <FeedRangeState <QueryState> > feedRangeStates = crossPartitionState .Value .ToArray() .OrderBy(tuple => ((FeedRangeEpk)tuple.FeedRange).Range.Min); List <ParallelContinuationToken> activeParallelContinuationTokens = new List <ParallelContinuationToken>(); { FeedRangeState <QueryState> firstState = feedRangeStates.First(); ParallelContinuationToken firstParallelContinuationToken = new ParallelContinuationToken( token: firstState.State != null ? ((CosmosString)firstState.State.Value).Value : null, range: ((FeedRangeEpk)firstState.FeedRange).Range); activeParallelContinuationTokens.Add(firstParallelContinuationToken); } foreach (FeedRangeState <QueryState> feedRangeState in feedRangeStates.Skip(1)) { this.cancellationToken.ThrowIfCancellationRequested(); if (feedRangeState.State != null) { ParallelContinuationToken parallelContinuationToken = new ParallelContinuationToken( token: feedRangeState.State != null ? ((CosmosString)feedRangeState.State.Value).Value : null, range: ((FeedRangeEpk)feedRangeState.FeedRange).Range); activeParallelContinuationTokens.Add(parallelContinuationToken); } } IEnumerable <CosmosElement> cosmosElementContinuationTokens = activeParallelContinuationTokens .Select(token => ParallelContinuationToken.ToCosmosElement(token)); CosmosArray cosmosElementParallelContinuationTokens = CosmosArray.Create(cosmosElementContinuationTokens); queryState = new QueryState(cosmosElementParallelContinuationTokens); } QueryPage crossPartitionQueryPage = new QueryPage( backendQueryPage.Documents, backendQueryPage.RequestCharge, backendQueryPage.ActivityId, backendQueryPage.ResponseLengthInBytes, backendQueryPage.CosmosQueryExecutionInfo, backendQueryPage.DisallowContinuationTokenMessage, backendQueryPage.AdditionalHeaders, queryState); this.Current = TryCatch <QueryPage> .FromResult(crossPartitionQueryPage); return(true); }
private static Task <TryCatch <IDocumentQueryExecutionComponent> > CreateSource(CosmosElement continuationToken) { return(Task.FromResult(TryCatch <IDocumentQueryExecutionComponent> .FromResult(new Mock <IDocumentQueryExecutionComponent>().Object))); }
public static TryCatch <DistinctMap> TryCreate(CosmosElement continuationToken) { HashSet <Number64> numbers = new HashSet <Number64>(); HashSet <uint> stringsLength4 = new HashSet <uint>(); HashSet <ulong> stringsLength8 = new HashSet <ulong>(); HashSet <UInt128> stringsLength16 = new HashSet <UInt128>(); HashSet <UInt128> stringsLength16Plus = new HashSet <UInt128>(); HashSet <UInt128> arrays = new HashSet <UInt128>(); HashSet <UInt128> objects = new HashSet <UInt128>(); SimpleValues simpleValues = SimpleValues.None; if (continuationToken != null) { if (!(continuationToken is CosmosObject hashDictionary)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } // Numbers if (!hashDictionary.TryGetValue(UnorderdDistinctMap.PropertyNames.Numbers, out CosmosArray numbersArray)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } foreach (CosmosElement rawNumber in numbersArray) { if (!(rawNumber is CosmosNumber64 number)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } numbers.Add(number.GetValue()); } // Strings Length 4 if (!hashDictionary.TryGetValue(UnorderdDistinctMap.PropertyNames.StringsLength4, out CosmosArray stringsLength4Array)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } foreach (CosmosElement rawStringLength4 in stringsLength4Array) { if (!(rawStringLength4 is CosmosUInt32 stringlength4)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } stringsLength4.Add(stringlength4.GetValue()); } // Strings Length 8 if (!hashDictionary.TryGetValue(UnorderdDistinctMap.PropertyNames.StringsLength8, out CosmosArray stringsLength8Array)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } foreach (CosmosElement rawStringLength8 in stringsLength8Array) { if (!(rawStringLength8 is CosmosInt64 stringlength8)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } stringsLength8.Add((ulong)stringlength8.GetValue()); } // Strings Length 16 stringsLength16 = Parse128BitHashes(hashDictionary, UnorderdDistinctMap.PropertyNames.StringsLength16); // Strings Length 24 stringsLength16Plus = Parse128BitHashes(hashDictionary, UnorderdDistinctMap.PropertyNames.StringsLength16Plus); // Array arrays = Parse128BitHashes(hashDictionary, UnorderdDistinctMap.PropertyNames.Arrays); // Object objects = Parse128BitHashes(hashDictionary, UnorderdDistinctMap.PropertyNames.Object); // Simple Values CosmosElement rawSimpleValues = hashDictionary[UnorderdDistinctMap.PropertyNames.SimpleValues]; if (!(rawSimpleValues is CosmosString simpleValuesString)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } if (!Enum.TryParse <SimpleValues>(simpleValuesString.Value, out simpleValues)) { return(TryCatch <DistinctMap> .FromException( new MalformedContinuationTokenException( $"{nameof(UnorderdDistinctMap)} continuation token was malformed."))); } } return(TryCatch <DistinctMap> .FromResult(new UnorderdDistinctMap( numbers, stringsLength4, stringsLength8, stringsLength16, stringsLength16Plus, arrays, objects, simpleValues))); }
public ChangeFeedIteratorCore( IDocumentContainer documentContainer, ChangeFeedRequestOptions changeFeedRequestOptions, ChangeFeedStartFrom changeFeedStartFrom) { if (changeFeedStartFrom == null) { throw new ArgumentNullException(nameof(changeFeedStartFrom)); } this.documentContainer = documentContainer ?? throw new ArgumentNullException(nameof(documentContainer)); this.changeFeedRequestOptions = changeFeedRequestOptions ?? new ChangeFeedRequestOptions(); this.lazyMonadicEnumerator = new AsyncLazy <TryCatch <CrossPartitionChangeFeedAsyncEnumerator> >( valueFactory: async(cancellationToken) => { if (changeFeedStartFrom is ChangeFeedStartFromContinuation startFromContinuation) { TryCatch <CosmosElement> monadicParsedToken = CosmosElement.Monadic.Parse(startFromContinuation.Continuation); if (monadicParsedToken.Failed) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Failed to parse continuation token: {startFromContinuation.Continuation}.", innerException: monadicParsedToken.Exception))); } TryCatch <VersionedAndRidCheckedCompositeToken> monadicVersionedToken = VersionedAndRidCheckedCompositeToken .MonadicCreateFromCosmosElement(monadicParsedToken.Result); if (monadicVersionedToken.Failed) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Failed to parse continuation token: {startFromContinuation.Continuation}.", innerException: monadicVersionedToken.Exception))); } VersionedAndRidCheckedCompositeToken versionedAndRidCheckedCompositeToken = monadicVersionedToken.Result; if (versionedAndRidCheckedCompositeToken.VersionNumber == VersionedAndRidCheckedCompositeToken.Version.V1) { // Need to migrate continuation token if (!(versionedAndRidCheckedCompositeToken.ContinuationToken is CosmosArray cosmosArray)) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Failed to parse get array continuation token: {startFromContinuation.Continuation}."))); } List <CosmosElement> changeFeedTokensV2 = new List <CosmosElement>(); foreach (CosmosElement arrayItem in cosmosArray) { if (!(arrayItem is CosmosObject cosmosObject)) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Failed to parse get object in composite continuation: {startFromContinuation.Continuation}."))); } if (!cosmosObject.TryGetValue("min", out CosmosString min)) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Failed to parse start of range: {cosmosObject}."))); } if (!cosmosObject.TryGetValue("max", out CosmosString max)) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Failed to parse end of range: {cosmosObject}."))); } if (!cosmosObject.TryGetValue("token", out CosmosElement token)) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Failed to parse token: {cosmosObject}."))); } FeedRangeEpk feedRangeEpk = new FeedRangeEpk(new Documents.Routing.Range <string>( min: min.Value, max: max.Value, isMinInclusive: true, isMaxInclusive: false)); ChangeFeedState state = token is CosmosNull ? ChangeFeedState.Beginning() : ChangeFeedStateContinuation.Continuation(token); FeedRangeState <ChangeFeedState> feedRangeState = new FeedRangeState <ChangeFeedState>(feedRangeEpk, state); changeFeedTokensV2.Add(ChangeFeedFeedRangeStateSerializer.ToCosmosElement(feedRangeState)); } CosmosArray changeFeedTokensArrayV2 = CosmosArray.Create(changeFeedTokensV2); versionedAndRidCheckedCompositeToken = new VersionedAndRidCheckedCompositeToken( VersionedAndRidCheckedCompositeToken.Version.V2, changeFeedTokensArrayV2, versionedAndRidCheckedCompositeToken.Rid); } if (versionedAndRidCheckedCompositeToken.VersionNumber != VersionedAndRidCheckedCompositeToken.Version.V2) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Wrong version number: {versionedAndRidCheckedCompositeToken.VersionNumber}."))); } string collectionRid = await documentContainer.GetResourceIdentifierAsync(cancellationToken); if (versionedAndRidCheckedCompositeToken.Rid != collectionRid) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"rids mismatched. Expected: {collectionRid} but got {versionedAndRidCheckedCompositeToken.Rid}."))); } changeFeedStartFrom = ChangeFeedStartFrom.ContinuationToken(versionedAndRidCheckedCompositeToken.ContinuationToken.ToString()); } TryCatch <ChangeFeedCrossFeedRangeState> monadicChangeFeedCrossFeedRangeState = changeFeedStartFrom.Accept(ChangeFeedStateFromToChangeFeedCrossFeedRangeState.Singleton); if (monadicChangeFeedCrossFeedRangeState.Failed) { return(TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromException( new MalformedChangeFeedContinuationTokenException( message: $"Could not convert to {nameof(ChangeFeedCrossFeedRangeState)}.", innerException: monadicChangeFeedCrossFeedRangeState.Exception))); } CrossPartitionChangeFeedAsyncEnumerator enumerator = CrossPartitionChangeFeedAsyncEnumerator.Create( documentContainer, changeFeedRequestOptions, new CrossFeedRangeState <ChangeFeedState>(monadicChangeFeedCrossFeedRangeState.Result.FeedRangeStates), cancellationToken: default); TryCatch <CrossPartitionChangeFeedAsyncEnumerator> monadicEnumerator = TryCatch <CrossPartitionChangeFeedAsyncEnumerator> .FromResult(enumerator); return(monadicEnumerator); }); this.hasMoreResults = true; }
// In order to maintain the continuation token for the user we must drain with a few constraints // 1) We fully drain from the left most partition before moving on to the next partition // 2) We drain only full pages from the document producer so we aren't left with a partial page // otherwise we would need to add to the continuation token how many items to skip over on that page. public async ValueTask <bool> MoveNextAsync() { this.cancellationToken.ThrowIfCancellationRequested(); if (!await this.crossPartitionRangePageAsyncEnumerator.MoveNextAsync()) { this.Current = default; return(false); } TryCatch <CrossPartitionPage <QueryPage, QueryState> > currentCrossPartitionPage = this.crossPartitionRangePageAsyncEnumerator.Current; if (currentCrossPartitionPage.Failed) { this.Current = TryCatch <QueryPage> .FromException(currentCrossPartitionPage.Exception); return(true); } CrossPartitionPage <QueryPage, QueryState> crossPartitionPageResult = currentCrossPartitionPage.Result; QueryPage backendQueryPage = crossPartitionPageResult.Page; CrossPartitionState <QueryState> crossPartitionState = crossPartitionPageResult.State; QueryState queryState; if (crossPartitionState == null) { queryState = null; } else { // Left most and any non null continuations List <(PartitionKeyRange, QueryState)> rangesAndStates = crossPartitionState.Value.OrderBy(tuple => tuple.Item1, PartitionKeyRangeComparer.Singleton).ToList(); List <ParallelContinuationToken> activeParallelContinuationTokens = new List <ParallelContinuationToken>(); for (int i = 0; i < rangesAndStates.Count; i++) { this.cancellationToken.ThrowIfCancellationRequested(); (PartitionKeyRange range, QueryState state) = rangesAndStates[i]; if ((i == 0) || (state != null)) { ParallelContinuationToken parallelContinuationToken = new ParallelContinuationToken( token: state != null ? ((CosmosString)state.Value).Value : null, range: range.ToRange()); activeParallelContinuationTokens.Add(parallelContinuationToken); } } IEnumerable <CosmosElement> cosmosElementContinuationTokens = activeParallelContinuationTokens .Select(token => ParallelContinuationToken.ToCosmosElement(token)); CosmosArray cosmosElementParallelContinuationTokens = CosmosArray.Create(cosmosElementContinuationTokens); queryState = new QueryState(cosmosElementParallelContinuationTokens); } QueryPage crossPartitionQueryPage = new QueryPage( backendQueryPage.Documents, backendQueryPage.RequestCharge, backendQueryPage.ActivityId, backendQueryPage.ResponseLengthInBytes, backendQueryPage.CosmosQueryExecutionInfo, backendQueryPage.DisallowContinuationTokenMessage, queryState); this.Current = TryCatch <QueryPage> .FromResult(crossPartitionQueryPage); return(true); }
/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="tryFilterAsync">The callback used to filter each partition.</param> /// <param name="cancellationToken">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task<TryCatch<bool>> TryInitializeAsync( string collectionRid, IReadOnlyList<PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, IReadOnlyDictionary<string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func<ItemProducerTree, Task<TryCatch<bool>>> tryFilterAsync, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); List<ItemProducerTree> itemProducerTrees = new List<ItemProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken; if (targetRangeToContinuationMap != null) { if (!targetRangeToContinuationMap.TryGetValue(partitionKeyRange.Id, out initialContinuationToken)) { initialContinuationToken = null; } } else { initialContinuationToken = null; } ItemProducerTree itemProducerTree = new ItemProducerTree( this.queryContext, querySpecForInit, partitionKeyRange, this.OnItemProducerTreeCompleteFetching, this.itemProducerForest.Comparer as IComparer<ItemProducerTree>, this.equalityComparer, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken) { Filter = filter }; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(itemProducerTree); } itemProducerTrees.Add(itemProducerTree); } // Using loop fission so that we can load the document producers in parallel foreach (ItemProducerTree itemProducerTree in itemProducerTrees) { if (!deferFirstPage) { (bool successfullyMovedNext, QueryResponseCore? failureResponse) = await itemProducerTree.MoveNextIfNotSplitAsync(cancellationToken); if (failureResponse != null) { // Set the failure so on drain it can be returned. this.FailureResponse = failureResponse; // No reason to enqueue the rest of the itemProducerTrees since there is a failure. break; } } if (tryFilterAsync != null) { TryCatch<bool> tryFilter = await tryFilterAsync(itemProducerTree); if (!tryFilter.Succeeded) { return tryFilter; } } if (itemProducerTree.HasMoreResults) { this.itemProducerForest.Enqueue(itemProducerTree); } } return TryCatch<bool>.FromResult(true); }
public override async ValueTask <bool> MoveNextAsync(ITrace trace) { if (trace == null) { throw new ArgumentNullException(nameof(trace)); } if (!await this.inputStage.MoveNextAsync(trace)) { this.Current = default; return(false); } TryCatch <QueryPage> tryGetSourcePage = this.inputStage.Current; if (tryGetSourcePage.Failed) { this.Current = tryGetSourcePage; return(true); } QueryPage sourcePage = tryGetSourcePage.Result; List <CosmosElement> distinctResults = new List <CosmosElement>(); foreach (CosmosElement document in sourcePage.Documents) { if (this.distinctMap.Add(document, out UInt128 _)) { distinctResults.Add(document); } } QueryState queryState; if (sourcePage.State != null) { DistinctContinuationToken distinctContinuationToken = new DistinctContinuationToken( sourceToken: sourcePage.State.Value, distinctMapToken: this.distinctMap.GetCosmosElementContinuationToken()); queryState = new QueryState(DistinctContinuationToken.ToCosmosElement(distinctContinuationToken)); } else { queryState = null; } QueryPage queryPage = new QueryPage( documents: distinctResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, disallowContinuationTokenMessage: ComputeDistinctQueryPipelineStage.UseTryGetContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, state: queryState); this.Current = TryCatch <QueryPage> .FromResult(queryPage); return(true); }
public static TryCatch <SingleGroupAggregator> TryCreate( IReadOnlyDictionary <string, AggregateOperator?> aggregateAliasToAggregateType, IReadOnlyList <string> orderedAliases, CosmosElement continuationToken) { if (aggregateAliasToAggregateType == null) { throw new ArgumentNullException(nameof(aggregateAliasToAggregateType)); } if (orderedAliases == null) { throw new ArgumentNullException(nameof(orderedAliases)); } CosmosObject aliasToContinuationToken; if (continuationToken != null) { if (!(continuationToken is CosmosObject cosmosObject)) { return(TryCatch <SingleGroupAggregator> .FromException( new MalformedContinuationTokenException( $"{nameof(SelectListAggregateValues)} continuation token is malformed: {continuationToken}."))); } aliasToContinuationToken = cosmosObject; } else { aliasToContinuationToken = null; } Dictionary <string, AggregateValue> groupingTable = new Dictionary <string, AggregateValue>(); foreach (KeyValuePair <string, AggregateOperator?> aliasToAggregate in aggregateAliasToAggregateType) { string alias = aliasToAggregate.Key; AggregateOperator?aggregateOperator = aliasToAggregate.Value; CosmosElement aliasContinuationToken; if (aliasToContinuationToken != null) { aliasContinuationToken = aliasToContinuationToken[alias]; } else { aliasContinuationToken = null; } TryCatch <AggregateValue> tryCreateAggregateValue = AggregateValue.TryCreate( aggregateOperator, aliasContinuationToken); if (tryCreateAggregateValue.Succeeded) { groupingTable[alias] = tryCreateAggregateValue.Result; } else { return(TryCatch <SingleGroupAggregator> .FromException(tryCreateAggregateValue.Exception)); } } return(TryCatch <SingleGroupAggregator> .FromResult(new SelectListAggregateValues(groupingTable, orderedAliases))); }
public async ValueTask <bool> MoveNextAsync() { this.cancellationToken.ThrowIfCancellationRequested(); PriorityQueue <PartitionRangePageAsyncEnumerator <TPage, TState> > enumerators = await this.lazyEnumerators.GetValueAsync(cancellationToken : this.cancellationToken); if (enumerators.Count == 0) { return(false); } PartitionRangePageAsyncEnumerator <TPage, TState> currentPaginator = enumerators.Dequeue(); if (!await currentPaginator.MoveNextAsync()) { // Current enumerator is empty, // so recursively retry on the next enumerator. return(await this.MoveNextAsync()); } if (currentPaginator.Current.Failed) { // Check if it's a retryable exception. Exception exception = currentPaginator.Current.Exception; while (exception.InnerException != null) { exception = exception.InnerException; } if (IsSplitException(exception)) { // Handle split IEnumerable <PartitionKeyRange> childRanges = await this.feedRangeProvider.GetChildRangeAsync( currentPaginator.Range, cancellationToken : this.cancellationToken); foreach (PartitionKeyRange childRange in childRanges) { PartitionRangePageAsyncEnumerator <TPage, TState> childPaginator = this.createPartitionRangeEnumerator( childRange, currentPaginator.State); enumerators.Enqueue(childPaginator); } // Recursively retry return(await this.MoveNextAsync()); } if (IsMergeException(exception)) { throw new NotImplementedException(); } // Just enqueue the paginator and the user can decide if they want to retry. enumerators.Enqueue(currentPaginator); this.Current = TryCatch <CrossPartitionPage <TPage, TState> > .FromException(currentPaginator.Current.Exception); return(true); } if (currentPaginator.State != default) { // Don't enqueue the paginator otherwise it's an infinite loop. enumerators.Enqueue(currentPaginator); } CrossPartitionState <TState> crossPartitionState; if (enumerators.Count == 0) { crossPartitionState = null; } else { List <(PartitionKeyRange, TState)> feedRangeAndStates = new List <(PartitionKeyRange, TState)>(enumerators.Count); foreach (PartitionRangePageAsyncEnumerator <TPage, TState> enumerator in enumerators) { feedRangeAndStates.Add((enumerator.Range, enumerator.State)); } crossPartitionState = new CrossPartitionState <TState>(feedRangeAndStates); } this.Current = TryCatch <CrossPartitionPage <TPage, TState> > .FromResult( new CrossPartitionPage <TPage, TState>(currentPaginator.Current.Result, crossPartitionState)); return(true); }
private static TryCatch <QueryPage> GetCosmosElementResponse( Guid clientQueryCorrelationId, QueryRequestOptions requestOptions, ResourceType resourceType, ResponseMessage cosmosResponseMessage, PartitionKeyRangeIdentity partitionKeyRangeIdentity, Action <QueryPageDiagnostics> queryPageDiagnostics) { using (cosmosResponseMessage) { QueryPageDiagnostics queryPage = new QueryPageDiagnostics( clientQueryCorrelationId: clientQueryCorrelationId, partitionKeyRangeId: partitionKeyRangeIdentity.PartitionKeyRangeId, queryMetricText: cosmosResponseMessage.Headers.QueryMetricsText, indexUtilizationText: cosmosResponseMessage.Headers[HttpConstants.HttpHeaders.IndexUtilization], diagnosticsContext: cosmosResponseMessage.DiagnosticsContext); queryPageDiagnostics(queryPage); if (!cosmosResponseMessage.IsSuccessStatusCode) { CosmosException exception; if (cosmosResponseMessage.CosmosException != null) { exception = cosmosResponseMessage.CosmosException; } else { exception = new CosmosException( cosmosResponseMessage.ErrorMessage, cosmosResponseMessage.StatusCode, (int)cosmosResponseMessage.Headers.SubStatusCode, cosmosResponseMessage.Headers.ActivityId, cosmosResponseMessage.Headers.RequestCharge); } return(TryCatch <QueryPage> .FromException(exception)); } if (!(cosmosResponseMessage.Content is MemoryStream memoryStream)) { memoryStream = new MemoryStream(); cosmosResponseMessage.Content.CopyTo(memoryStream); } long responseLengthBytes = memoryStream.Length; CosmosArray documents = CosmosQueryClientCore.ParseElementsFromRestStream( memoryStream, resourceType, requestOptions.CosmosSerializationFormatOptions); CosmosQueryExecutionInfo cosmosQueryExecutionInfo; if (cosmosResponseMessage.Headers.TryGetValue(QueryExecutionInfoHeader, out string queryExecutionInfoString)) { cosmosQueryExecutionInfo = JsonConvert.DeserializeObject <CosmosQueryExecutionInfo>(queryExecutionInfoString); } else { cosmosQueryExecutionInfo = default; } QueryState queryState; if (cosmosResponseMessage.Headers.ContinuationToken != null) { queryState = new QueryState(CosmosString.Create(cosmosResponseMessage.Headers.ContinuationToken)); } else { queryState = default; } QueryPage response = new QueryPage( documents, cosmosResponseMessage.Headers.RequestCharge, cosmosResponseMessage.Headers.ActivityId, responseLengthBytes, cosmosQueryExecutionInfo, disallowContinuationTokenMessage: null, queryState); return(TryCatch <QueryPage> .FromResult(response)); } }
public override TryCatch <ChangeFeedCrossFeedRangeState> Visit(ChangeFeedStartFromNow startFromNow) { return(TryCatch <ChangeFeedCrossFeedRangeState> .FromResult( ChangeFeedCrossFeedRangeState.CreateFromNow(startFromNow.FeedRange))); }
private static TryCatch <CrossFeedRangeState <QueryState> > MonadicExtractState( CosmosElement continuationToken, IReadOnlyList <FeedRangeEpk> ranges) { if (continuationToken == null) { // Full fan out to the ranges with null continuations CrossFeedRangeState <QueryState> fullFanOutState = new CrossFeedRangeState <QueryState>(ranges.Select(range => new FeedRangeState <QueryState>(range, (QueryState)null)).ToArray()); return(TryCatch <CrossFeedRangeState <QueryState> > .FromResult(fullFanOutState)); } if (!(continuationToken is CosmosArray parallelContinuationTokenListRaw)) { return(TryCatch <CrossFeedRangeState <QueryState> > .FromException( new MalformedContinuationTokenException( $"Invalid format for continuation token {continuationToken} for {nameof(ParallelCrossPartitionQueryPipelineStage)}"))); } if (parallelContinuationTokenListRaw.Count == 0) { return(TryCatch <CrossFeedRangeState <QueryState> > .FromException( new MalformedContinuationTokenException( $"Invalid format for continuation token {continuationToken} for {nameof(ParallelCrossPartitionQueryPipelineStage)}"))); } List <ParallelContinuationToken> parallelContinuationTokens = new List <ParallelContinuationToken>(); foreach (CosmosElement parallelContinuationTokenRaw in parallelContinuationTokenListRaw) { TryCatch <ParallelContinuationToken> tryCreateParallelContinuationToken = ParallelContinuationToken.TryCreateFromCosmosElement(parallelContinuationTokenRaw); if (tryCreateParallelContinuationToken.Failed) { return(TryCatch <CrossFeedRangeState <QueryState> > .FromException( tryCreateParallelContinuationToken.Exception)); } parallelContinuationTokens.Add(tryCreateParallelContinuationToken.Result); } TryCatch <PartitionMapping <ParallelContinuationToken> > partitionMappingMonad = PartitionMapper.MonadicGetPartitionMapping( ranges, parallelContinuationTokens); if (partitionMappingMonad.Failed) { return(TryCatch <CrossFeedRangeState <QueryState> > .FromException( partitionMappingMonad.Exception)); } PartitionMapping <ParallelContinuationToken> partitionMapping = partitionMappingMonad.Result; List <FeedRangeState <QueryState> > feedRangeStates = new List <FeedRangeState <QueryState> >(); List <IReadOnlyDictionary <FeedRangeEpk, ParallelContinuationToken> > rangesToInitialize = new List <IReadOnlyDictionary <FeedRangeEpk, ParallelContinuationToken> >() { // Skip all the partitions left of the target range, since they have already been drained fully. partitionMapping.TargetMapping, partitionMapping.MappingRightOfTarget, }; foreach (IReadOnlyDictionary <FeedRangeEpk, ParallelContinuationToken> rangeToInitalize in rangesToInitialize) { foreach (KeyValuePair <FeedRangeEpk, ParallelContinuationToken> kvp in rangeToInitalize) { FeedRangeState <QueryState> feedRangeState = new FeedRangeState <QueryState>(kvp.Key, kvp.Value?.Token != null ? new QueryState(CosmosString.Create(kvp.Value.Token)) : null); feedRangeStates.Add(feedRangeState); } } CrossFeedRangeState <QueryState> crossPartitionState = new CrossFeedRangeState <QueryState>(feedRangeStates.ToArray()); return(TryCatch <CrossFeedRangeState <QueryState> > .FromResult(crossPartitionState)); }
public override TryCatch <ChangeFeedCrossFeedRangeState> Visit(ChangeFeedStartFromTime startFromTime) { return(TryCatch <ChangeFeedCrossFeedRangeState> .FromResult( ChangeFeedCrossFeedRangeState.CreateFromTime(startFromTime.StartTime, startFromTime.FeedRange))); }
public async ValueTask <bool> MoveNextAsync(ITrace trace) { this.cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } using (ITrace changeFeedMoveNextTrace = trace.StartChild("ChangeFeed MoveNextAsync", TraceComponent.ChangeFeed, TraceLevel.Info)) { if (this.bufferedException.HasValue) { this.Current = this.bufferedException.Value; this.bufferedException = null; return(true); } if (!await this.crossPartitionEnumerator.MoveNextAsync(changeFeedMoveNextTrace)) { throw new InvalidOperationException("ChangeFeed should always have a next page."); } TryCatch <CrossFeedRangePage <ChangeFeedPage, ChangeFeedState> > monadicCrossPartitionPage = this.crossPartitionEnumerator.Current; if (monadicCrossPartitionPage.Failed) { this.Current = TryCatch <CrossFeedRangePage <ChangeFeedPage, ChangeFeedState> > .FromException(monadicCrossPartitionPage.Exception); return(true); } CrossFeedRangePage <ChangeFeedPage, ChangeFeedState> crossFeedRangePage = monadicCrossPartitionPage.Result; ChangeFeedPage backendPage = crossFeedRangePage.Page; if (backendPage is ChangeFeedNotModifiedPage) { using (ITrace drainNotModifedPages = changeFeedMoveNextTrace.StartChild("Drain NotModified Pages", TraceComponent.ChangeFeed, TraceLevel.Info)) { // Keep draining the cross partition enumerator until // We get a non 304 page or we loop back to the same range or run into an exception FeedRangeInternal originalRange = this.crossPartitionEnumerator.CurrentRange; double totalRequestCharge = backendPage.RequestCharge; do { if (!await this.crossPartitionEnumerator.MoveNextAsync(drainNotModifedPages)) { throw new InvalidOperationException("ChangeFeed should always have a next page."); } monadicCrossPartitionPage = this.crossPartitionEnumerator.Current; if (monadicCrossPartitionPage.Failed) { // Buffer the exception, since we need to return the request charge so far. this.bufferedException = TryCatch <CrossFeedRangePage <ChangeFeedPage, ChangeFeedState> > .FromException(monadicCrossPartitionPage.Exception); } else { crossFeedRangePage = monadicCrossPartitionPage.Result; backendPage = crossFeedRangePage.Page; totalRequestCharge += backendPage.RequestCharge; } }while (!(backendPage is ChangeFeedSuccessPage || this.crossPartitionEnumerator.CurrentRange.Equals(originalRange) || this.bufferedException.HasValue)); // Create a page with the aggregated request charge if (backendPage is ChangeFeedSuccessPage changeFeedSuccessPage) { backendPage = new ChangeFeedSuccessPage( changeFeedSuccessPage.Content, totalRequestCharge, changeFeedSuccessPage.ActivityId, changeFeedSuccessPage.State); } else { backendPage = new ChangeFeedNotModifiedPage( totalRequestCharge, backendPage.ActivityId, backendPage.State); } } } crossFeedRangePage = new CrossFeedRangePage <ChangeFeedPage, ChangeFeedState>( backendPage, crossFeedRangePage.State); this.Current = TryCatch <CrossFeedRangePage <ChangeFeedPage, ChangeFeedState> > .FromResult(crossFeedRangePage); return(true); } }
public override TryCatch <ChangeFeedCrossFeedRangeState> Visit(ChangeFeedStartFromBeginning startFromBeginning) { return(TryCatch <ChangeFeedCrossFeedRangeState> .FromResult( ChangeFeedCrossFeedRangeState.CreateFromBeginning(startFromBeginning.FeedRange))); }
public async ValueTask <bool> MoveNextAsync(ITrace trace) { if (trace == null) { throw new ArgumentNullException(nameof(trace)); } using (ITrace childTrace = trace.StartChild(name: nameof(MoveNextAsync), component: TraceComponent.Pagination, level: TraceLevel.Info)) { IQueue <PartitionRangePageAsyncEnumerator <TPage, TState> > enumerators = await this.lazyEnumerators.GetValueAsync( childTrace, cancellationToken : this.cancellationToken); if (enumerators.Count == 0) { this.Current = default; this.CurrentRange = default; this.nextState = default; return(false); } PartitionRangePageAsyncEnumerator <TPage, TState> currentPaginator = enumerators.Dequeue(); currentPaginator.SetCancellationToken(this.cancellationToken); bool moveNextResult = false; try { moveNextResult = await currentPaginator.MoveNextAsync(childTrace); } catch { // Re-queue the enumerator to avoid emptying the queue enumerators.Enqueue(currentPaginator); throw; } if (!moveNextResult) { // Current enumerator is empty, // so recursively retry on the next enumerator. return(await this.MoveNextAsync(childTrace)); } if (currentPaginator.Current.Failed) { // Check if it's a retryable exception. Exception exception = currentPaginator.Current.Exception; while (exception.InnerException != null) { exception = exception.InnerException; } if (IsSplitException(exception)) { // Handle split List <FeedRangeEpk> childRanges = await this.feedRangeProvider.GetChildRangeAsync( currentPaginator.FeedRangeState.FeedRange, childTrace, this.cancellationToken); if (childRanges.Count <= 1) { // We optimistically assumed that the cache is not stale. // In the event that it is (where we only get back one child / the partition that we think got split) // Then we need to refresh the cache await this.feedRangeProvider.RefreshProviderAsync(childTrace, this.cancellationToken); childRanges = await this.feedRangeProvider.GetChildRangeAsync( currentPaginator.FeedRangeState.FeedRange, childTrace, this.cancellationToken); } if (childRanges.Count < 1) { string errorMessage = "SDK invariant violated 4795CC37: Must have at least one EPK range in a cross partition enumerator"; throw Resource.CosmosExceptions.CosmosExceptionFactory.CreateInternalServerErrorException( message: errorMessage, headers: null, stackTrace: null, trace: childTrace, error: new Microsoft.Azure.Documents.Error { Code = "SDK_invariant_violated_4795CC37", Message = errorMessage }); } if (childRanges.Count == 1) { // On a merge, the 410/1002 results in a single parent // We maintain the current enumerator's range and let the RequestInvokerHandler logic kick in enumerators.Enqueue(currentPaginator); } else { // Split foreach (FeedRangeInternal childRange in childRanges) { PartitionRangePageAsyncEnumerator <TPage, TState> childPaginator = this.createPartitionRangeEnumerator( new FeedRangeState <TState>(childRange, currentPaginator.FeedRangeState.State)); enumerators.Enqueue(childPaginator); } } // Recursively retry return(await this.MoveNextAsync(childTrace)); } // Just enqueue the paginator and the user can decide if they want to retry. enumerators.Enqueue(currentPaginator); this.Current = TryCatch <CrossFeedRangePage <TPage, TState> > .FromException(currentPaginator.Current.Exception); this.CurrentRange = currentPaginator.FeedRangeState.FeedRange; this.nextState = CrossPartitionRangePageAsyncEnumerator <TPage, TState> .GetNextRange(enumerators); return(true); } if (currentPaginator.FeedRangeState.State != default) { // Don't enqueue the paginator otherwise it's an infinite loop. enumerators.Enqueue(currentPaginator); } CrossFeedRangeState <TState> crossPartitionState; if (enumerators.Count == 0) { crossPartitionState = null; } else { FeedRangeState <TState>[] feedRangeAndStates = new FeedRangeState <TState> [enumerators.Count]; int i = 0; foreach (PartitionRangePageAsyncEnumerator <TPage, TState> enumerator in enumerators) { feedRangeAndStates[i++] = enumerator.FeedRangeState; } crossPartitionState = new CrossFeedRangeState <TState>(feedRangeAndStates); } this.Current = TryCatch <CrossFeedRangePage <TPage, TState> > .FromResult( new CrossFeedRangePage <TPage, TState>(currentPaginator.Current.Result, crossPartitionState)); this.CurrentRange = currentPaginator.FeedRangeState.FeedRange; this.nextState = CrossPartitionRangePageAsyncEnumerator <TPage, TState> .GetNextRange(enumerators); return(true); } }
public override async ValueTask <bool> MoveNextAsync(ITrace trace) { this.cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } if (!await this.inputStage.MoveNextAsync(trace)) { this.Current = default; return(false); } TryCatch <QueryPage> tryGetSourcePage = this.inputStage.Current; if (tryGetSourcePage.Failed) { this.Current = tryGetSourcePage; return(true); } QueryPage sourcePage = tryGetSourcePage.Result; List <CosmosElement> distinctResults = new List <CosmosElement>(); foreach (CosmosElement document in sourcePage.Documents) { this.cancellationToken.ThrowIfCancellationRequested(); if (this.distinctMap.Add(document, out UInt128 _)) { distinctResults.Add(document); } } // For clients we write out the continuation token if it's a streaming query. QueryPage queryPage; if (this.distinctQueryType == DistinctQueryType.Ordered) { QueryState state; if (sourcePage.State != null) { string updatedContinuationToken = new DistinctContinuationToken( sourceToken: sourcePage.State.Value.ToString(), distinctMapToken: this.distinctMap.GetContinuationToken()).ToString(); state = new QueryState(CosmosElement.Parse(updatedContinuationToken)); } else { state = null; } queryPage = new QueryPage( documents: distinctResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, state: state); } else { queryPage = new QueryPage( documents: distinctResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, disallowContinuationTokenMessage: ClientDistinctQueryPipelineStage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, state: null); } this.Current = TryCatch <QueryPage> .FromResult(queryPage); return(true); }
public static TryCatch <PartitionMapping <PartitionedToken> > TryGetInitializationInfo <PartitionedToken>( IReadOnlyList <PartitionKeyRange> partitionKeyRanges, IReadOnlyList <PartitionedToken> partitionedContinuationTokens) where PartitionedToken : IPartitionedToken { if (partitionKeyRanges == null) { throw new ArgumentNullException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens == null) { throw new ArgumentNullException(nameof(partitionedContinuationTokens)); } if (partitionKeyRanges.Count < 1) { throw new ArgumentException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens.Count < 1) { throw new ArgumentException(nameof(partitionKeyRanges)); } if (partitionedContinuationTokens.Count > partitionKeyRanges.Count) { throw new ArgumentException($"{nameof(partitionedContinuationTokens)} can not have more elements than {nameof(partitionKeyRanges)}."); } // Find the continuation token for the partition we left off on: PartitionedToken firstContinuationToken = partitionedContinuationTokens .OrderBy((partitionedToken) => partitionedToken.PartitionRange.Min) .First(); // Segment the ranges based off that: ReadOnlyMemory <PartitionKeyRange> sortedRanges = partitionKeyRanges .OrderBy((partitionKeyRange) => partitionKeyRange.MinInclusive) .ToArray(); PartitionKeyRange firstContinuationRange = new PartitionKeyRange { MinInclusive = firstContinuationToken.PartitionRange.Min, MaxExclusive = firstContinuationToken.PartitionRange.Max }; int matchedIndex = sortedRanges.Span.BinarySearch( firstContinuationRange, Comparer <PartitionKeyRange> .Create((range1, range2) => string.CompareOrdinal(range1.MinInclusive, range2.MinInclusive))); if (matchedIndex < 0) { return(TryCatch <PartitionMapping <PartitionedToken> > .FromException( new MalformedContinuationTokenException( $"{RMResources.InvalidContinuationToken} - Could not find continuation token: {firstContinuationToken}"))); } ReadOnlyMemory <PartitionKeyRange> partitionsLeftOfTarget = matchedIndex == 0 ? ReadOnlyMemory <PartitionKeyRange> .Empty : sortedRanges.Slice(start: 0, length: matchedIndex); ReadOnlyMemory <PartitionKeyRange> targetPartition = sortedRanges.Slice(start: matchedIndex, length: 1); ReadOnlyMemory <PartitionKeyRange> partitionsRightOfTarget = matchedIndex == sortedRanges.Length - 1 ? ReadOnlyMemory <PartitionKeyRange> .Empty : sortedRanges.Slice(start: matchedIndex + 1); // Create the continuation token mapping for each region. IReadOnlyDictionary <PartitionKeyRange, PartitionedToken> mappingForPartitionsLeftOfTarget = MatchRangesToContinuationTokens( partitionsLeftOfTarget, partitionedContinuationTokens); IReadOnlyDictionary <PartitionKeyRange, PartitionedToken> mappingForTargetPartition = MatchRangesToContinuationTokens( targetPartition, partitionedContinuationTokens); IReadOnlyDictionary <PartitionKeyRange, PartitionedToken> mappingForPartitionsRightOfTarget = MatchRangesToContinuationTokens( partitionsRightOfTarget, partitionedContinuationTokens); return(TryCatch <PartitionMapping <PartitionedToken> > .FromResult( new PartitionMapping <PartitionedToken>( partitionsLeftOfTarget : mappingForPartitionsLeftOfTarget, targetPartition : mappingForTargetPartition, partitionsRightOfTarget : mappingForPartitionsRightOfTarget))); }
public static TryCatch <IQueryPipelineStage> MonadicCreate( CosmosElement requestContinuation, CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage, DistinctQueryType distinctQueryType) { if (monadicCreatePipelineStage == null) { throw new ArgumentNullException(nameof(monadicCreatePipelineStage)); } DistinctContinuationToken distinctContinuationToken; if (requestContinuation != null) { if (!DistinctContinuationToken.TryParse(requestContinuation, out distinctContinuationToken)) { return(TryCatch <IQueryPipelineStage> .FromException( new MalformedContinuationTokenException( $"Invalid {nameof(DistinctContinuationToken)}: {requestContinuation}"))); } } else { distinctContinuationToken = new DistinctContinuationToken( sourceToken: null, distinctMapToken: null); } CosmosElement distinctMapToken = distinctContinuationToken.DistinctMapToken != null ? CosmosString.Create(distinctContinuationToken.DistinctMapToken) : null; TryCatch <DistinctMap> tryCreateDistinctMap = DistinctMap.TryCreate( distinctQueryType, distinctMapToken); if (!tryCreateDistinctMap.Succeeded) { return(TryCatch <IQueryPipelineStage> .FromException(tryCreateDistinctMap.Exception)); } CosmosElement sourceToken; if (distinctContinuationToken.SourceToken != null) { TryCatch <CosmosElement> tryParse = CosmosElement.Monadic.Parse(distinctContinuationToken.SourceToken); if (tryParse.Failed) { return(TryCatch <IQueryPipelineStage> .FromException( new MalformedContinuationTokenException( message : $"Invalid Source Token: {distinctContinuationToken.SourceToken}", innerException : tryParse.Exception))); } sourceToken = tryParse.Result; } else { sourceToken = null; } TryCatch <IQueryPipelineStage> tryCreateSource = monadicCreatePipelineStage(sourceToken, cancellationToken); if (!tryCreateSource.Succeeded) { return(TryCatch <IQueryPipelineStage> .FromException(tryCreateSource.Exception)); } return(TryCatch <IQueryPipelineStage> .FromResult( new ClientDistinctQueryPipelineStage( distinctQueryType, tryCreateDistinctMap.Result, tryCreateSource.Result, cancellationToken))); }
public static TryCatch <OrderByContinuationToken> TryCreateFromCosmosElement(CosmosElement cosmosElement) { if (!(cosmosElement is CosmosObject cosmosObject)) { return(TryCatch <OrderByContinuationToken> .FromException( new MalformedContinuationTokenException($"{nameof(OrderByContinuationToken)} is not an object: {cosmosElement}"))); } if (!cosmosObject.TryGetValue(PropertyNames.CompositeToken, out CosmosElement compositeContinuationTokenElement)) { return(TryCatch <OrderByContinuationToken> .FromException( new MalformedContinuationTokenException($"{nameof(OrderByContinuationToken)} is missing field: '{PropertyNames.CompositeToken}': {cosmosElement}"))); } TryCatch <ParallelContinuationToken> tryCompositeContinuation = ParallelContinuationToken.TryCreateFromCosmosElement(compositeContinuationTokenElement); if (!tryCompositeContinuation.Succeeded) { return(TryCatch <OrderByContinuationToken> .FromException(tryCompositeContinuation.Exception)); } ParallelContinuationToken compositeContinuationToken = tryCompositeContinuation.Result; if (!cosmosObject.TryGetValue(PropertyNames.OrderByItems, out CosmosArray orderByItemsRaw)) { return(TryCatch <OrderByContinuationToken> .FromException( new MalformedContinuationTokenException($"{nameof(OrderByContinuationToken)} is missing field: '{PropertyNames.OrderByItems}': {cosmosElement}"))); } List <OrderByItem> orderByItems = orderByItemsRaw.Select(x => OrderByItem.FromCosmosElement(x)).ToList(); if (!cosmosObject.TryGetValue(PropertyNames.Rid, out CosmosString ridRaw)) { return(TryCatch <OrderByContinuationToken> .FromException( new MalformedContinuationTokenException($"{nameof(OrderByContinuationToken)} is missing field: '{PropertyNames.Rid}': {cosmosElement}"))); } string rid = ridRaw.Value; if (!cosmosObject.TryGetValue(PropertyNames.SkipCount, out CosmosNumber64 skipCountRaw)) { return(TryCatch <OrderByContinuationToken> .FromException( new MalformedContinuationTokenException($"{nameof(OrderByContinuationToken)} is missing field: '{PropertyNames.SkipCount}': {cosmosElement}"))); } int skipCount = (int)Number64.ToLong(skipCountRaw.GetValue()); if (!cosmosObject.TryGetValue(PropertyNames.Filter, out CosmosElement filterRaw)) { return(TryCatch <OrderByContinuationToken> .FromException( new MalformedContinuationTokenException($"{nameof(OrderByContinuationToken)} is missing field: '{PropertyNames.Filter}': {cosmosElement}"))); } string filter; if (filterRaw is CosmosString filterStringRaw) { filter = filterStringRaw.Value; } else { filter = null; } OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( compositeContinuationToken, orderByItems, rid, skipCount, filter); return(TryCatch <OrderByContinuationToken> .FromResult(orderByContinuationToken)); }
private (Func <string, Task <TryCatch <IDocumentQueryExecutionComponent> > >, QueryResponseCore) SetupBaseContextToVerifyFailureScenario() { IReadOnlyCollection <QueryPageDiagnostics> diagnostics = new List <QueryPageDiagnostics>() { new QueryPageDiagnostics( "0", "SomeQueryMetricText", "SomeIndexUtilText", new PointOperationStatistics( Guid.NewGuid().ToString(), System.Net.HttpStatusCode.Unauthorized, subStatusCode: SubStatusCodes.PartitionKeyMismatch, requestCharge: 4, errorMessage: null, method: HttpMethod.Post, requestUri: new Uri("http://localhost.com"), requestSessionToken: null, responseSessionToken: null, clientSideRequestStatistics: null), new SchedulingStopwatch()) }; QueryResponseCore failure = QueryResponseCore.CreateFailure( System.Net.HttpStatusCode.Unauthorized, SubStatusCodes.PartitionKeyMismatch, "Random error message", 42.89, "TestActivityId", diagnostics); Mock <IDocumentQueryExecutionComponent> baseContext = new Mock <IDocumentQueryExecutionComponent>(); baseContext.Setup(x => x.DrainAsync(It.IsAny <int>(), It.IsAny <CancellationToken>())).Returns(Task.FromResult <QueryResponseCore>(failure)); Func <string, Task <TryCatch <IDocumentQueryExecutionComponent> > > callBack = x => Task.FromResult <TryCatch <IDocumentQueryExecutionComponent> >(TryCatch <IDocumentQueryExecutionComponent> .FromResult(baseContext.Object)); return(callBack, failure); }
/// <summary> /// <para> /// If a query encounters split up resuming using continuation, we need to regenerate the continuation tokens. /// Specifically, since after split we will have new set of ranges, we need to remove continuation token for the /// parent partition and introduce continuation token for the child partitions. /// </para> /// <para> /// This function does that. Also in that process, we also check validity of the input continuation tokens. For example, /// even after split the boundary ranges of the child partitions should match with the parent partitions. If the Min and Max /// range of a target partition in the continuation token was Min1 and Max1. Then the Min and Max range info for the two /// corresponding child partitions C1Min, C1Max, C2Min, and C2Max should follow the constrain below: /// PMax = C2Max > C2Min > C1Max > C1Min = PMin. /// </para> /// </summary> /// <param name="partitionKeyRanges">The partition key ranges to extract continuation tokens for.</param> /// <param name="suppliedContinuationTokens">The continuation token that the user supplied.</param> /// <typeparam name="TContinuationToken">The type of continuation token to generate.</typeparam> /// <Remarks> /// The code assumes that merge doesn't happen and /// </Remarks> /// <returns>The index of the partition whose MinInclusive is equal to the suppliedContinuationTokens along with the continuation tokens.</returns> public static TryCatch <InitInfo <TContinuationToken> > TryFindTargetRangeAndExtractContinuationTokens <TContinuationToken>( List <PartitionKeyRange> partitionKeyRanges, IEnumerable <Tuple <TContinuationToken, Documents.Routing.Range <string> > > suppliedContinuationTokens) { if (partitionKeyRanges == null) { throw new ArgumentNullException(nameof(partitionKeyRanges)); } if (partitionKeyRanges.Count < 1) { throw new ArgumentException(nameof(partitionKeyRanges)); } foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { if (partitionKeyRange == null) { throw new ArgumentException(nameof(partitionKeyRanges)); } } if (suppliedContinuationTokens == null) { throw new ArgumentNullException(nameof(suppliedContinuationTokens)); } if (suppliedContinuationTokens.Count() < 1) { throw new ArgumentException(nameof(suppliedContinuationTokens)); } if (suppliedContinuationTokens.Count() > partitionKeyRanges.Count) { throw new ArgumentException($"{nameof(suppliedContinuationTokens)} can not have more elements than {nameof(partitionKeyRanges)}."); } Dictionary <string, TContinuationToken> targetRangeToContinuationTokenMap = new Dictionary <string, TContinuationToken>(); // Find the minimum index. Tuple <TContinuationToken, Documents.Routing.Range <string> > firstContinuationTokenAndRange = suppliedContinuationTokens .OrderBy((tuple) => tuple.Item2.Min) .First(); TContinuationToken firstContinuationToken = firstContinuationTokenAndRange.Item1; PartitionKeyRange firstContinuationRange = new PartitionKeyRange { MinInclusive = firstContinuationTokenAndRange.Item2.Min, MaxExclusive = firstContinuationTokenAndRange.Item2.Max }; int minIndex = partitionKeyRanges.BinarySearch( firstContinuationRange, Comparer <PartitionKeyRange> .Create((range1, range2) => string.CompareOrdinal(range1.MinInclusive, range2.MinInclusive))); if (minIndex < 0) { return(TryCatch <InitInfo <TContinuationToken> > .FromException( new MalformedContinuationTokenException( $"{RMResources.InvalidContinuationToken} - Could not find continuation token: {firstContinuationToken}"))); } foreach (Tuple <TContinuationToken, Documents.Routing.Range <string> > suppledContinuationToken in suppliedContinuationTokens) { // find what ranges make up the supplied continuation token TContinuationToken continuationToken = suppledContinuationToken.Item1; Documents.Routing.Range <string> range = suppledContinuationToken.Item2; IEnumerable <PartitionKeyRange> replacementRanges = partitionKeyRanges .Where((partitionKeyRange) => string.CompareOrdinal(range.Min, partitionKeyRange.MinInclusive) <= 0 && string.CompareOrdinal(range.Max, partitionKeyRange.MaxExclusive) >= 0) .OrderBy((partitionKeyRange) => partitionKeyRange.MinInclusive); // Could not find the child ranges if (replacementRanges.Count() == 0) { return(TryCatch <InitInfo <TContinuationToken> > .FromException( new MalformedContinuationTokenException( $"{RMResources.InvalidContinuationToken} - Could not find continuation token: {continuationToken}"))); } // PMax = C2Max > C2Min > C1Max > C1Min = PMin. string parentMax = range.Max; string child2Max = replacementRanges.Last().MaxExclusive; string child2Min = replacementRanges.Last().MinInclusive; string child1Max = replacementRanges.First().MaxExclusive; string child1Min = replacementRanges.First().MinInclusive; string parentMin = range.Min; if (!(parentMax == child2Max && string.CompareOrdinal(child2Max, child2Min) >= 0 && (replacementRanges.Count() == 1 ? true : string.CompareOrdinal(child2Min, child1Max) >= 0) && string.CompareOrdinal(child1Max, child1Min) >= 0 && child1Min == parentMin)) { return(TryCatch <InitInfo <TContinuationToken> > .FromException( new MalformedContinuationTokenException( $"{RMResources.InvalidContinuationToken} - PMax = C2Max > C2Min > C1Max > C1Min = PMin: {continuationToken}"))); } foreach (PartitionKeyRange partitionKeyRange in replacementRanges) { targetRangeToContinuationTokenMap.Add(partitionKeyRange.Id, continuationToken); } } return(TryCatch <InitInfo <TContinuationToken> > .FromResult( new InitInfo <TContinuationToken>( minIndex, targetRangeToContinuationTokenMap))); }
public ReadFeedIteratorCore( IDocumentContainer documentContainer, string continuationToken, ReadFeedPaginationOptions readFeedPaginationOptions, QueryRequestOptions queryRequestOptions, CancellationToken cancellationToken) { this.queryRequestOptions = queryRequestOptions; readFeedPaginationOptions ??= ReadFeedPaginationOptions.Default; if (!string.IsNullOrEmpty(continuationToken)) { bool isNewArrayFormat = (continuationToken.Length >= 2) && (continuationToken[0] == '[') && (continuationToken[continuationToken.Length - 1] == ']'); if (!isNewArrayFormat) { // One of the two older formats if (!FeedRangeContinuation.TryParse(continuationToken, out FeedRangeContinuation feedRangeContinuation)) { // Backward compatible with old format feedRangeContinuation = new FeedRangeCompositeContinuation( containerRid: string.Empty, FeedRangeEpk.FullRange, new List <Documents.Routing.Range <string> >() { new Documents.Routing.Range <string>( Documents.Routing.PartitionKeyInternal.MinimumInclusiveEffectivePartitionKey, Documents.Routing.PartitionKeyInternal.MaximumExclusiveEffectivePartitionKey, isMinInclusive: true, isMaxInclusive: false) }, continuationToken); } // need to massage it a little List <CosmosElement> feedRangeStates = new List <CosmosElement>(); string oldContinuationFormat = feedRangeContinuation.ToString(); if (feedRangeContinuation.FeedRange is FeedRangePartitionKey feedRangePartitionKey) { CosmosObject cosmosObject = CosmosObject.Parse(oldContinuationFormat); CosmosArray continuations = (CosmosArray)cosmosObject["Continuation"]; if (continuations.Count != 1) { throw new InvalidOperationException("Expected only one continuation for partition key queries"); } CosmosElement continuation = continuations[0]; CosmosObject continuationObject = (CosmosObject)continuation; CosmosElement token = continuationObject["token"]; ReadFeedState state; if (token is CosmosNull) { state = ReadFeedState.Beginning(); } else { CosmosString tokenAsString = (CosmosString)token; state = ReadFeedState.Continuation(CosmosElement.Parse(tokenAsString.Value)); } FeedRangeState <ReadFeedState> feedRangeState = new FeedRangeState <ReadFeedState>(feedRangePartitionKey, state); feedRangeStates.Add(ReadFeedFeedRangeStateSerializer.ToCosmosElement(feedRangeState)); } else { CosmosObject cosmosObject = CosmosObject.Parse(oldContinuationFormat); CosmosArray continuations = (CosmosArray)cosmosObject["Continuation"]; foreach (CosmosElement continuation in continuations) { CosmosObject continuationObject = (CosmosObject)continuation; CosmosObject rangeObject = (CosmosObject)continuationObject["range"]; string min = ((CosmosString)rangeObject["min"]).Value; string max = ((CosmosString)rangeObject["max"]).Value; CosmosElement token = continuationObject["token"]; FeedRangeInternal feedRange = new FeedRangeEpk(new Documents.Routing.Range <string>(min, max, isMinInclusive: true, isMaxInclusive: false)); ReadFeedState state; if (token is CosmosNull) { state = ReadFeedState.Beginning(); } else { CosmosString tokenAsString = (CosmosString)token; state = ReadFeedState.Continuation(CosmosElement.Parse(tokenAsString.Value)); } FeedRangeState <ReadFeedState> feedRangeState = new FeedRangeState <ReadFeedState>(feedRange, state); feedRangeStates.Add(ReadFeedFeedRangeStateSerializer.ToCosmosElement(feedRangeState)); } } CosmosArray cosmosArrayContinuationTokens = CosmosArray.Create(feedRangeStates); continuationToken = cosmosArrayContinuationTokens.ToString(); } } TryCatch <ReadFeedCrossFeedRangeState> monadicReadFeedState; if (continuationToken == null) { FeedRange feedRange; if ((this.queryRequestOptions != null) && this.queryRequestOptions.PartitionKey.HasValue) { feedRange = new FeedRangePartitionKey(this.queryRequestOptions.PartitionKey.Value); } else if ((this.queryRequestOptions != null) && (this.queryRequestOptions.FeedRange != null)) { feedRange = this.queryRequestOptions.FeedRange; } else { feedRange = FeedRangeEpk.FullRange; } monadicReadFeedState = TryCatch <ReadFeedCrossFeedRangeState> .FromResult(ReadFeedCrossFeedRangeState.CreateFromBeginning(feedRange)); } else { monadicReadFeedState = ReadFeedCrossFeedRangeState.Monadic.Parse(continuationToken); } if (monadicReadFeedState.Failed) { this.monadicEnumerator = TryCatch <CrossPartitionReadFeedAsyncEnumerator> .FromException(monadicReadFeedState.Exception); } else { this.monadicEnumerator = TryCatch <CrossPartitionReadFeedAsyncEnumerator> .FromResult( CrossPartitionReadFeedAsyncEnumerator.Create( documentContainer, new CrossFeedRangeState <ReadFeedState>(monadicReadFeedState.Result.FeedRangeStates), readFeedPaginationOptions, cancellationToken)); } this.hasMoreResults = true; }
private static TryCatch <QueryPage> GetCosmosElementResponse( QueryRequestOptions requestOptions, ResourceType resourceType, ResponseMessage cosmosResponseMessage, ITrace trace) { using (ITrace getCosmosElementResponse = trace.StartChild("Get Cosmos Element Response", TraceComponent.Json, Tracing.TraceLevel.Info)) { using (cosmosResponseMessage) { if (cosmosResponseMessage.Headers.QueryMetricsText != null) { QueryMetricsTraceDatum datum = new QueryMetricsTraceDatum( new Lazy <QueryMetrics>(() => new QueryMetrics( cosmosResponseMessage.Headers.QueryMetricsText, IndexUtilizationInfo.Empty, ClientSideMetrics.Empty))); trace.AddDatum("Query Metrics", datum); } if (!cosmosResponseMessage.IsSuccessStatusCode) { CosmosException exception = cosmosResponseMessage.CosmosException ?? new CosmosException( cosmosResponseMessage.ErrorMessage, cosmosResponseMessage.StatusCode, (int)cosmosResponseMessage.Headers.SubStatusCode, cosmosResponseMessage.Headers.ActivityId, cosmosResponseMessage.Headers.RequestCharge); return(TryCatch <QueryPage> .FromException(exception)); } if (!(cosmosResponseMessage.Content is MemoryStream memoryStream)) { memoryStream = new MemoryStream(); cosmosResponseMessage.Content.CopyTo(memoryStream); } long responseLengthBytes = memoryStream.Length; CosmosArray documents = CosmosQueryClientCore.ParseElementsFromRestStream( memoryStream, resourceType, requestOptions.CosmosSerializationFormatOptions); QueryState queryState; if (cosmosResponseMessage.Headers.ContinuationToken != null) { queryState = new QueryState(CosmosString.Create(cosmosResponseMessage.Headers.ContinuationToken)); } else { queryState = default; } Dictionary <string, string> additionalHeaders = new Dictionary <string, string>(); foreach (string key in cosmosResponseMessage.Headers) { if (!QueryPage.BannedHeaders.Contains(key)) { additionalHeaders[key] = cosmosResponseMessage.Headers[key]; } } Lazy <CosmosQueryExecutionInfo> cosmosQueryExecutionInfo = default; if (cosmosResponseMessage.Headers.TryGetValue(QueryExecutionInfoHeader, out string queryExecutionInfoString)) { cosmosQueryExecutionInfo = new Lazy <CosmosQueryExecutionInfo>( () => JsonConvert.DeserializeObject <CosmosQueryExecutionInfo>(queryExecutionInfoString)); } QueryPage response = new QueryPage( documents, cosmosResponseMessage.Headers.RequestCharge, cosmosResponseMessage.Headers.ActivityId, responseLengthBytes, cosmosQueryExecutionInfo, disallowContinuationTokenMessage: null, additionalHeaders, queryState); return(TryCatch <QueryPage> .FromResult(response)); } } }
public static async Task <TryCatch <IDocumentQueryExecutionComponent> > TryCreateAsync( CosmosElement requestContinuation, Func <CosmosElement, Task <TryCatch <IDocumentQueryExecutionComponent> > > tryCreateSourceAsync, DistinctQueryType distinctQueryType) { if (tryCreateSourceAsync == null) { throw new ArgumentNullException(nameof(tryCreateSourceAsync)); } DistinctContinuationToken distinctContinuationToken; if (requestContinuation != null) { if (!DistinctContinuationToken.TryParse(requestContinuation, out distinctContinuationToken)) { return(TryCatch <IDocumentQueryExecutionComponent> .FromException( new MalformedContinuationTokenException( $"Invalid {nameof(DistinctContinuationToken)}: {requestContinuation}"))); } } else { distinctContinuationToken = new DistinctContinuationToken( sourceToken: null, distinctMapToken: null); } CosmosElement distinctMapToken; if (distinctContinuationToken.DistinctMapToken != null) { distinctMapToken = CosmosString.Create(distinctContinuationToken.DistinctMapToken); } else { distinctMapToken = null; } TryCatch <DistinctMap> tryCreateDistinctMap = DistinctMap.TryCreate( distinctQueryType, distinctMapToken); if (!tryCreateDistinctMap.Succeeded) { return(TryCatch <IDocumentQueryExecutionComponent> .FromException(tryCreateDistinctMap.Exception)); } CosmosElement sourceToken; if (distinctContinuationToken.SourceToken != null) { if (!CosmosElement.TryParse(distinctContinuationToken.SourceToken, out sourceToken)) { return(TryCatch <IDocumentQueryExecutionComponent> .FromException( new MalformedContinuationTokenException( $"Invalid Source Token: {distinctContinuationToken.SourceToken}"))); } } else { sourceToken = null; } TryCatch <IDocumentQueryExecutionComponent> tryCreateSource = await tryCreateSourceAsync(sourceToken); if (!tryCreateSource.Succeeded) { return(TryCatch <IDocumentQueryExecutionComponent> .FromException(tryCreateSource.Exception)); } return(TryCatch <IDocumentQueryExecutionComponent> .FromResult( new ClientDistinctDocumentQueryExecutionComponent( distinctQueryType, tryCreateDistinctMap.Result, tryCreateSource.Result))); }