/// <summary> /// Internal method to rebalance data across a different number of partitions. /// </summary> /// <param name="newPartitionCount">The target number of partitions.</param> /// <returns>The Task object for the asynchronous execution.</returns> private async Task <int> RepartitionData(int newPartitionCount) { // No-op, just delete the last collection. if (newPartitionCount == 0) { return(0); } ManagedHashPartitionResolver currentResolver = (ManagedHashPartitionResolver)this.Client.PartitionResolvers[this.Database.SelfLink]; var nextPartitionResolver = new ManagedHashPartitionResolver( currentResolver.PartitionKeyExtractor, this.Client, this.Database, newPartitionCount, collectionIdPrefix: collectionIdPrefix); TransitionHashPartitionResolver transitionaryResolver = new TransitionHashPartitionResolver( currentResolver, nextPartitionResolver, this.ReadMode); this.Client.PartitionResolvers[this.Database.SelfLink] = transitionaryResolver; // Move data between partitions. Here it's one by one, but you can change this to implement inserts // in bulk using stored procedures (bulkImport and bulkDelete), or run them in parallel. Another // improvement to this would be push down the check for partitioning function down to the individual // collections as a LINQ/SQL query. int numberOfMovedDocuments = 0; foreach (string collectionLink in currentResolver.CollectionLinks) { ResourceFeedReader <Document> feedReader = this.Client.CreateDocumentFeedReader(collectionLink); while (feedReader.HasMoreResults) { foreach (Document document in await feedReader.ExecuteNextAsync()) { object partitionKey = nextPartitionResolver.GetPartitionKey(document); string newCollectionLink = nextPartitionResolver.ResolveForCreate(partitionKey); if (newCollectionLink != collectionLink) { numberOfMovedDocuments++; await this.Client.DeleteDocumentAsync(document.SelfLink); await this.Client.CreateDocumentAsync(newCollectionLink, document); } } } } this.Client.PartitionResolvers[this.Database.SelfLink] = nextPartitionResolver; return(numberOfMovedDocuments); }
/// <summary> /// Internal method to rebalance data across a different number of partitions. /// </summary> /// <param name="newPartitionCount">The target number of partitions.</param> /// <returns>The Task object for the asynchronous execution.</returns> private async Task<int> RepartitionData(int newPartitionCount) { // No-op, just delete the last collection. if (newPartitionCount == 0) { return 0; } ManagedHashPartitionResolver currentResolver = (ManagedHashPartitionResolver)this.Client.PartitionResolvers[this.Database.SelfLink]; var nextPartitionResolver = new ManagedHashPartitionResolver( currentResolver.PartitionKeyExtractor, this.Client, this.Database, newPartitionCount, collectionIdPrefix:collectionIdPrefix); TransitionHashPartitionResolver transitionaryResolver = new TransitionHashPartitionResolver( currentResolver, nextPartitionResolver, this.ReadMode); this.Client.PartitionResolvers[this.Database.SelfLink] = transitionaryResolver; // Move data between partitions. Here it's one by one, but you can change this to implement inserts // in bulk using stored procedures (bulkImport and bulkDelete), or run them in parallel. Another // improvement to this would be push down the check for partitioning function down to the individual // collections as a LINQ/SQL query. int numberOfMovedDocuments = 0; foreach (string collectionLink in currentResolver.CollectionLinks) { ResourceFeedReader<Document> feedReader = this.Client.CreateDocumentFeedReader(collectionLink); while (feedReader.HasMoreResults) { foreach (Document document in await feedReader.ExecuteNextAsync()) { object partitionKey = nextPartitionResolver.GetPartitionKey(document); string newCollectionLink = nextPartitionResolver.ResolveForCreate(partitionKey); if (newCollectionLink != collectionLink) { numberOfMovedDocuments++; await this.Client.DeleteDocumentAsync(document.SelfLink); await this.Client.CreateDocumentAsync(newCollectionLink, document); } } } } this.Client.PartitionResolvers[this.Database.SelfLink] = nextPartitionResolver; return numberOfMovedDocuments; }