/// <summary>
 /// Initialize a "managed" HashPartitionResolver that also takes care of creating collections, and cloning collection properties like
 /// stored procedures, offer type and indexing policy.
 /// </summary>
 /// <param name="database">The database to run the samples on.</param>
 /// <returns>The created HashPartitionResolver.</returns>
 private ManagedHashPartitionResolver InitializeManagedHashResolver(Database database)
 {
     var hashResolver = new ManagedHashPartitionResolver(u => ((UserProfile)u).UserId, this.client, database, 3, null, new DocumentCollectionSpec { OfferType = "S2" });
     this.client.PartitionResolvers[database.SelfLink] = hashResolver;
     return hashResolver;
 }
        /// <summary>
        /// Internal method to rebalance data across a different number of partitions.
        /// </summary>
        /// <param name="newPartitionCount">The target number of partitions.</param>
        /// <returns>The Task object for the asynchronous execution.</returns>
        private async Task<int> RepartitionData(int newPartitionCount)
        {
            // No-op, just delete the last collection.
            if (newPartitionCount == 0)
            {
                return 0;
            }

            ManagedHashPartitionResolver currentResolver = (ManagedHashPartitionResolver)this.Client.PartitionResolvers[this.Database.SelfLink];

            var nextPartitionResolver = new ManagedHashPartitionResolver(
                currentResolver.PartitionKeyExtractor,
                this.Client,
                this.Database,
                newPartitionCount);

            TransitionHashPartitionResolver transitionaryResolver = new TransitionHashPartitionResolver(
                currentResolver, 
                nextPartitionResolver,
                this.ReadMode);
            
            this.Client.PartitionResolvers[this.Database.SelfLink] = transitionaryResolver;

            // Move data between partitions. Here it's one by one, but you can change this to implement inserts
            // in bulk using stored procedures (bulkImport and bulkDelete), or run them in parallel. Another 
            // improvement to this would be push down the check for partitioning function down to the individual
            // collections as a LINQ/SQL query.
            int numberOfMovedDocuments = 0;
            foreach (string collectionLink in currentResolver.CollectionLinks)
            {
                ResourceFeedReader<Document> feedReader = this.Client.CreateDocumentFeedReader(collectionLink);

                while (feedReader.HasMoreResults)
                {
                    foreach (Document document in await feedReader.ExecuteNextAsync())
                    {
                        object partitionKey = nextPartitionResolver.GetPartitionKey(document);
                        string newCollectionLink = nextPartitionResolver.ResolveForCreate(partitionKey);

                        if (newCollectionLink != collectionLink)
                        {
                            numberOfMovedDocuments++;
                            await this.Client.DeleteDocumentAsync(document.SelfLink);
                            await this.Client.CreateDocumentAsync(newCollectionLink, document);
                        }
                    }
                }
            }

            this.Client.PartitionResolvers[this.Database.SelfLink] = nextPartitionResolver;
            return numberOfMovedDocuments;
        }