/// <summary> /// Internal method to rebalance data across a different number of partitions. /// </summary> /// <param name="newPartitionCount">The target number of partitions.</param> /// <returns>The Task object for the asynchronous execution.</returns> private async Task <int> RepartitionData(int newPartitionCount) { // No-op, just delete the last collection. if (newPartitionCount == 0) { return(0); } ManagedHashPartitionResolver currentResolver = (ManagedHashPartitionResolver)this.Client.PartitionResolvers[this.Database.SelfLink]; var nextPartitionResolver = new ManagedHashPartitionResolver( currentResolver.PartitionKeyExtractor, this.Client, this.Database, newPartitionCount, collectionIdPrefix: collectionIdPrefix); TransitionHashPartitionResolver transitionaryResolver = new TransitionHashPartitionResolver( currentResolver, nextPartitionResolver, this.ReadMode); this.Client.PartitionResolvers[this.Database.SelfLink] = transitionaryResolver; // Move data between partitions. Here it's one by one, but you can change this to implement inserts // in bulk using stored procedures (bulkImport and bulkDelete), or run them in parallel. Another // improvement to this would be push down the check for partitioning function down to the individual // collections as a LINQ/SQL query. int numberOfMovedDocuments = 0; foreach (string collectionLink in currentResolver.CollectionLinks) { ResourceFeedReader <Document> feedReader = this.Client.CreateDocumentFeedReader(collectionLink); while (feedReader.HasMoreResults) { foreach (Document document in await feedReader.ExecuteNextAsync()) { object partitionKey = nextPartitionResolver.GetPartitionKey(document); string newCollectionLink = nextPartitionResolver.ResolveForCreate(partitionKey); if (newCollectionLink != collectionLink) { numberOfMovedDocuments++; await this.Client.DeleteDocumentAsync(document.SelfLink); await this.Client.CreateDocumentAsync(newCollectionLink, document); } } } } this.Client.PartitionResolvers[this.Database.SelfLink] = nextPartitionResolver; return(numberOfMovedDocuments); }
public async Task RetryOnReadSessionNotAvailableMockTestAsync() { ConnectionPolicy connectionPolicy = new ConnectionPolicy { ConnectionMode = ConnectionMode.Direct, ConnectionProtocol = Protocol.Tcp, PreferredLocations = { "West US" }, }; DocumentClient client = new DocumentClient( new Uri(ConfigurationManager.AppSettings["GatewayEndpoint"]), ConfigurationManager.AppSettings["MasterKey"], connectionPolicy, ConsistencyLevel.Session); await client.GetDatabaseAccountAsync(); // Set up the mock to throw exception on first call, test retry happens and request succeeds. Mock <IStoreModel> mockStoreModel = new Mock <IStoreModel>(); mockStoreModel.Setup(model => model.ProcessMessageAsync(It.IsAny <DocumentServiceRequest>(), default(CancellationToken))) .Returns <DocumentServiceRequest, CancellationToken>((r, cancellationToken) => this.ProcessMessageForRead(client, r)); client.StoreModel = mockStoreModel.Object; client.GatewayStoreModel = mockStoreModel.Object; ResourceResponse <CosmosDatabaseSettings> dbResponse = await client.ReadDatabaseAsync("/dbs/id1"); Assert.IsNotNull(dbResponse); mockStoreModel.Verify(model => model.ProcessMessageAsync(It.IsAny <DocumentServiceRequest>(), default(CancellationToken)), Times.Exactly(2)); // Set up the mock to always throw exception, test retry happens only twice and request fails. mockStoreModel = new Mock <IStoreModel>(); mockStoreModel.Setup(model => model.ProcessMessageAsync(It.IsAny <DocumentServiceRequest>(), default(CancellationToken))) .Throws(this.CreateReadSessionNotAvailableException()); client.StoreModel = mockStoreModel.Object; client.GatewayStoreModel = mockStoreModel.Object; bool failed = false; try { dbResponse = await client.ReadDatabaseAsync("/dbs/id1"); Assert.IsNull(dbResponse); } catch (DocumentClientException e) { failed = true; Assert.AreEqual(HttpStatusCode.NotFound, e.StatusCode); } mockStoreModel.Verify(model => model.ProcessMessageAsync(It.IsAny <DocumentServiceRequest>(), default(CancellationToken)), Times.Exactly(2)); Assert.IsTrue(failed); failed = false; try { IQueryable <dynamic> dbIdQuery = client.CreateDatabaseQuery(@"select * from root r").AsQueryable(); Assert.AreEqual(0, dbIdQuery.AsEnumerable().Count()); } catch (AggregateException e) { DocumentClientException docExp = e.InnerExceptions[0] as DocumentClientException; Assert.IsNotNull(docExp); Assert.AreEqual(HttpStatusCode.NotFound, docExp.StatusCode); failed = true; } mockStoreModel.Verify(model => model.ProcessMessageAsync(It.IsAny <DocumentServiceRequest>(), default(CancellationToken)), Times.Exactly(4)); Assert.IsTrue(failed); failed = false; try { ResourceFeedReader <CosmosDatabaseSettings> dbFeed = client.CreateDatabaseFeedReader(); FeedResponse <CosmosDatabaseSettings> response = await dbFeed.ExecuteNextAsync(); Assert.AreEqual(1, response.Count); Assert.AreEqual(false, dbFeed.HasMoreResults); } catch (DocumentClientException docExp) { Assert.IsNotNull(docExp); Assert.AreEqual(HttpStatusCode.NotFound, docExp.StatusCode); failed = true; } mockStoreModel.Verify(model => model.ProcessMessageAsync(It.IsAny <DocumentServiceRequest>(), default(CancellationToken)), Times.Exactly(6)); Assert.IsTrue(failed); }
static int Main(string[] args) { Options options = new Options(); if (!CommandLine.Parser.Default.ParseArguments(args, options)) { Console.WriteLine("Invalid arguments"); return(1); } using (DocumentClient client = new DocumentClient( new Uri(options.Endpoint), options.AuthKey, new ConnectionPolicy { ConnectionMode = ConnectionMode.Direct, ConnectionProtocol = Protocol.Tcp })) { Database database = client.CreateDatabaseQuery().Where(d => d.Id == options.Database).AsEnumerable().FirstOrDefault(); if (database == null) { Console.WriteLine("Cannot find database " + options.Database); return(2); } List <DocumentCollection> collections = client.ReadDocumentCollectionFeedAsync(database.SelfLink).Result.ToList(); int minimumRequiredCollections = Math.Max(options.NewCollections, options.CurrentCollections); if (collections.Count < minimumRequiredCollections) { Console.WriteLine("At least {0} collections must be pre-created", minimumRequiredCollections); return(3); } Console.WriteLine("Current distribution of documents across collections:"); LogDocumentCountsPerCollection(client, database).Wait(); Console.WriteLine(); HashPartitionResolver currentPartitionResolver = new HashPartitionResolver(options.PartitionKeyName, collections.Take(options.CurrentCollections).Select(c => c.SelfLink)); HashPartitionResolver nextPartitionResolver = new HashPartitionResolver(options.PartitionKeyName, collections.Take(options.NewCollections).Select(c => c.SelfLink)); int numberOfMovedDocuments = 0; Parallel.ForEach(currentPartitionResolver.CollectionLinks, collectionLink => { ResourceFeedReader <Document> feedReader = client.CreateDocumentFeedReader(collectionLink, new FeedOptions { MaxItemCount = -1 }); while (feedReader.HasMoreResults) { foreach (Document document in DocumentClientHelper.ExecuteWithRetryAsync <FeedResponse <Document> >(() => feedReader.ExecuteNextAsync()).Result) { object partitionKey = nextPartitionResolver.GetPartitionKey(document); string newCollectionLink = nextPartitionResolver.ResolveForCreate(partitionKey); if (newCollectionLink != collectionLink) { int count = Interlocked.Increment(ref numberOfMovedDocuments); DocumentClientHelper.ExecuteWithRetryAsync(() => client.DeleteDocumentAsync(document.SelfLink)).Wait(); DocumentClientHelper.ExecuteWithRetryAsync(() => client.CreateDocumentAsync(newCollectionLink, document)).Wait(); if (count % 100 == 0) { Console.WriteLine("Moved {0} documents between partitions", numberOfMovedDocuments); } } } } }); Console.WriteLine(); Console.WriteLine("Moved {0} documents between partitions.", numberOfMovedDocuments); Console.WriteLine(); Console.WriteLine("Current distribution of documents across collections:"); LogDocumentCountsPerCollection(client, database).Wait(); Console.WriteLine(); } return(0); }