//check if short url existed private static async Task <bool> ShortenExistAsync(string input) { //create endpoint string cosmos_endpoint = Environment.GetEnvironmentVariable("cosmos_endpoint"); string cosmos_key = Environment.GetEnvironmentVariable("cosmos_key"); CosmosClient cosmosClient = new CosmosClient(cosmos_endpoint, cosmos_key); Container container = cosmosClient.GetContainer("timestamp", "url"); //query against cosmos DB var query = string.Format("SELECT c.url,c.shorturl,c.dateGenerated FROM c WHERE c.shorturl = '{0}'", input); QueryDefinition queryDefinition = new QueryDefinition(query); FeedIterator <GeneratedResult> queryResultSetIterator = container.GetItemQueryIterator <GeneratedResult>(queryDefinition); //check number of records int count = 0; while (queryResultSetIterator.HasMoreResults) { FeedResponse <GeneratedResult> currentRecord = await queryResultSetIterator.ReadNextAsync(); count = currentRecord.Count(); } if (count > 0) { return(true); } else { return(false); } }
// </ItemOperationsWithValidPartitionKeyValue> /// <summary> /// The function demonstrates migrating documents that were inserted without a value for partition key, and those inserted /// pre-migration to other logical partitions, those with a value for partition key. /// </summary> // <MigratedItemsFromNonePartitionKeyToValidPartitionKeyValue> private static async Task MigratedItemsFromNonePartitionKeyToValidPartitionKeyValue(Container container) { // Pre-create a few items in the container to demo the migration const int ItemsToCreate = 4; // Insert a few items with no Partition Key for (int i = 0; i < ItemsToCreate; i++) { string itemid = Guid.NewGuid().ToString(); DeviceInformationItem itemWithoutPK = GetDeviceWithNoPartitionKey(itemid); ItemResponse <DeviceInformationItem> createResponse = await container.CreateItemAsync <DeviceInformationItem>( partitionKey : PartitionKey.None, item : itemWithoutPK); } // Query items on the container that have no partition key value by supplying NonePartitionKeyValue // The operation is made in batches to not lose work in case of partial execution int resultsFetched = 0; QueryDefinition sql = new QueryDefinition("select * from r"); using (FeedIterator <DeviceInformationItem> setIterator = container.GetItemQueryIterator <DeviceInformationItem>( sql, requestOptions: new QueryRequestOptions() { PartitionKey = PartitionKey.None, MaxItemCount = 2 })) { while (setIterator.HasMoreResults) { FeedResponse <DeviceInformationItem> queryResponse = await setIterator.ReadNextAsync(); resultsFetched += queryResponse.Count(); // For the items returned with NonePartitionKeyValue IEnumerator <DeviceInformationItem> iter = queryResponse.GetEnumerator(); while (iter.MoveNext()) { DeviceInformationItem item = iter.Current; if (item.DeviceId != null) { // Using existing deviceID for partition key item.PartitionKey = item.DeviceId; Console.WriteLine("Migrating item {0} to Partition {1}", item.Id, item.DeviceId); // Re-Insert into container with a partition key // This could result in exception if the same item was inserted in a previous run of the program on existing container // and the program stopped before the delete. ItemResponse <DeviceInformationItem> createResponseWithPk = await container.CreateItemAsync <DeviceInformationItem>( partitionKey : new PartitionKey(item.PartitionKey), item : item); // Deleting item from fixed container with CosmosContainerSettings.NonePartitionKeyValue. ItemResponse <DeviceInformationItem> deleteResponseWithoutPk = await container.DeleteItemAsync <DeviceInformationItem>( partitionKey : PartitionKey.None, id : item.Id); } } } } }
public async Task TestQuerySinglePartitionKeyAsync() { string[] inputDocs = new[] { @"{""pk"":""doc1""}", @"{""pk"":""doc2""}", @"{""pk"":""doc3""}", @"{""pk"":""doc4""}", @"{""pk"":""doc5""}", @"{""pk"":""doc6""}", }; await this.CreateIngestQueryDeleteAsync( ConnectionModes.Direct | ConnectionModes.Gateway, CollectionTypes.SinglePartition | CollectionTypes.MultiPartition, inputDocs, ImplementationAsync, partitionKey : "/pk"); async Task ImplementationAsync(Container container, IReadOnlyList <CosmosObject> documents) { // Query with partition key should be done in one round trip. FeedIterator <dynamic> resultSetIterator = container.GetItemQueryIterator <dynamic>( "SELECT * FROM c WHERE c.pk = 'doc5'"); FeedResponse <dynamic> response = await resultSetIterator.ReadNextAsync(); Assert.AreEqual(1, response.Count()); Assert.IsNull(response.ContinuationToken); resultSetIterator = container.GetItemQueryIterator <dynamic>( "SELECT * FROM c WHERE c.pk = 'doc10'"); response = await resultSetIterator.ReadNextAsync(); Assert.AreEqual(0, response.Count()); Assert.IsNull(response.ContinuationToken); } }
public async Task ItemLINQQueryWithContinuationTokenTest() { //Creating items for query. IList <ToDoActivity> itemList = await ToDoActivity.CreateRandomItems(container : this.Container, pkCount : 10, perPKItemCount : 1, randomPartitionKey : true); QueryRequestOptions queryRequestOptions = new QueryRequestOptions(); queryRequestOptions.MaxConcurrency = 1; queryRequestOptions.MaxItemCount = 5; IOrderedQueryable <ToDoActivity> linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>(requestOptions: queryRequestOptions); IQueryable <ToDoActivity> queriable = linqQueryable.Where(item => (item.taskNum < 100)); FeedIterator <ToDoActivity> feedIterator = queriable.ToFeedIterator(); int firstItemSet = 0; string continuationToken = null; while (feedIterator.HasMoreResults) { FeedResponse <ToDoActivity> feedResponse = await feedIterator.ReadNextAsync(); firstItemSet = feedResponse.Count(); continuationToken = feedResponse.ContinuationToken; if (firstItemSet > 0) { break; } } linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>(continuationToken: continuationToken, requestOptions: queryRequestOptions); queriable = linqQueryable.Where(item => (item.taskNum < 100)); feedIterator = queriable.ToFeedIterator(); //Test continuationToken with LINQ query generation and asynchronous feedIterator execution. int secondItemSet = 0; while (feedIterator.HasMoreResults) { FeedResponse <ToDoActivity> feedResponse = await feedIterator.ReadNextAsync(); secondItemSet += feedResponse.Count(); } Assert.AreEqual(10 - firstItemSet, secondItemSet); //Test continuationToken with blocking LINQ execution linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>(allowSynchronousQueryExecution: true, continuationToken: continuationToken, requestOptions: queryRequestOptions); int linqExecutionItemCount = linqQueryable.Where(item => (item.taskNum < 100)).Count(); Assert.AreEqual(10 - firstItemSet, linqExecutionItemCount); }
/// <summary> /// Count all elements from the container. /// </summary> /// <returns></returns> public double CountSync() { var queryResult = _container.GetItemQueryIterator <T>(); int result = 0; if (queryResult.HasMoreResults) { FeedResponse <T> resultSet = queryResult.ReadNextAsync().Result; result = resultSet.Count(); } return(result); }
/// <summary> /// Count all elements from the container. /// </summary> /// <returns></returns> public async Task <double> Count() { var queryResult = _container.GetItemQueryIterator <T>(); int result = 0; if (queryResult.HasMoreResults) { FeedResponse <T> resultSet = await queryResult.ReadNextAsync().ConfigureAwait(false); result = resultSet.Count(); } return(result); }
public async Task CosmosConflictsIteratorBuildsSettings() { string conflictResponsePayload = @"{ 'Data':[{ id: 'Conflict1', operationType: 'Replace', resourceType: 'trigger' }]}"; CosmosClient mockClient = MockCosmosUtil.CreateMockCosmosClient( (cosmosClientBuilder) => cosmosClientBuilder.WithConnectionModeDirect()); Container container = mockClient.GetContainer("database", "container"); FeedIterator <ConflictProperties> feedIterator = container.Conflicts.GetConflictQueryIterator <ConflictProperties>(); TestHandler testHandler = new TestHandler((request, cancellationToken) => { Assert.IsTrue(request.IsPartitionedFeedOperation); Assert.AreEqual(OperationType.ReadFeed, request.OperationType); Assert.AreEqual(ResourceType.Conflict, request.ResourceType); ResponseMessage handlerResponse = TestHandler.ReturnSuccess().Result; MemoryStream stream = new MemoryStream(); StreamWriter writer = new StreamWriter(stream); writer.Write(conflictResponsePayload); writer.Flush(); stream.Position = 0; handlerResponse.Content = stream; return(Task.FromResult(handlerResponse)); }); mockClient.RequestHandler.InnerHandler = testHandler; FeedResponse <ConflictProperties> response = await feedIterator.ReadNextAsync(); Assert.AreEqual(1, response.Count()); ConflictProperties responseSettings = response.FirstOrDefault(); Assert.IsNotNull(responseSettings); Assert.AreEqual("Conflict1", responseSettings.Id); Assert.AreEqual(Cosmos.OperationKind.Replace, responseSettings.OperationKind); Assert.AreEqual(typeof(TriggerProperties), responseSettings.ResourceType); }
public async Task ItemLINQQueryWithContinuationTokenTest() { // Creating items for query. IList <ToDoActivity> itemList = await ToDoActivity.CreateRandomItems( container : this.Container, pkCount : 10, perPKItemCount : 1, randomPartitionKey : true); IList <ToDoActivity> filteredList = itemList.Where(item => item.taskNum < 100).ToList(); int filteredDocumentCount = filteredList.Count(); Console.WriteLine($"Filtered List: {JsonConvert.SerializeObject(filteredList)}."); QueryRequestOptions queryRequestOptions = new QueryRequestOptions(); queryRequestOptions.MaxConcurrency = 1; queryRequestOptions.MaxItemCount = 5; IOrderedQueryable <ToDoActivity> linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>(requestOptions: queryRequestOptions); IQueryable <ToDoActivity> queriable = linqQueryable.Where(item => item.taskNum < 100); FeedIterator <ToDoActivity> feedIterator = queriable.ToFeedIterator(); int firstItemSet = 0; string continuationToken = null; while (feedIterator.HasMoreResults) { FeedResponse <ToDoActivity> feedResponse = await feedIterator.ReadNextAsync(); firstItemSet = feedResponse.Count(); continuationToken = feedResponse.ContinuationToken; Console.WriteLine($"First page: {JsonConvert.SerializeObject(feedResponse.Resource)}."); if (firstItemSet > 0) { break; } } linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>( continuationToken: continuationToken, requestOptions: queryRequestOptions); queriable = linqQueryable.Where(item => item.taskNum < 100); feedIterator = queriable.ToFeedIterator(); // Test continuationToken with LINQ query generation and asynchronous feedIterator execution. int secondItemSet = 0; while (feedIterator.HasMoreResults) { FeedResponse <ToDoActivity> feedResponse = await feedIterator.ReadNextAsync(); secondItemSet += feedResponse.Count(); Console.WriteLine($"Second Async page: {JsonConvert.SerializeObject(feedResponse.Resource)}."); } Assert.AreEqual( filteredDocumentCount - firstItemSet, secondItemSet, "Failed to resume execution for async iterator."); // Test continuationToken with blocking LINQ execution linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>( allowSynchronousQueryExecution: true, continuationToken: continuationToken, requestOptions: queryRequestOptions); List <ToDoActivity> secondSyncPage = linqQueryable.Where(item => item.taskNum < 100).ToList(); Console.WriteLine($"Second Sync page: {JsonConvert.SerializeObject(secondSyncPage)}."); int linqExecutionItemCount = secondSyncPage.Count(); Assert.AreEqual( filteredDocumentCount - firstItemSet, linqExecutionItemCount, "Failed to resume execution for sync iterator"); }
public async Task ItemLINQQueryTest() { //Creating items for query. IList <ToDoActivity> itemList = await ToDoActivity.CreateRandomItems(container : this.Container, pkCount : 2, perPKItemCount : 1, randomPartitionKey : true); IOrderedQueryable <ToDoActivity> linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>(); IQueryable <ToDoActivity> queriable = linqQueryable.Where(item => item.taskNum < 100); //V3 Asynchronous query execution with LINQ query generation sql text. FeedIterator <ToDoActivity> setIterator = this.Container.GetItemQueryIterator <ToDoActivity>( queriable.ToQueryDefinition(), requestOptions: new QueryRequestOptions() { MaxConcurrency = 2 }); int resultsFetched = 0; while (setIterator.HasMoreResults) { FeedResponse <ToDoActivity> queryResponse = await setIterator.ReadNextAsync(); resultsFetched += queryResponse.Count(); // For the items returned with NonePartitionKeyValue IEnumerator <ToDoActivity> iter = queryResponse.GetEnumerator(); while (iter.MoveNext()) { ToDoActivity activity = iter.Current; Assert.AreEqual(42, activity.taskNum); } Assert.AreEqual(2, resultsFetched); } //LINQ query execution without partition key. linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>(allowSynchronousQueryExecution: true); queriable = linqQueryable.Where(item => item.taskNum < 100); Assert.AreEqual(2, queriable.Count()); Assert.AreEqual(itemList[0].id, queriable.ToList()[0].id); Assert.AreEqual(itemList[1].id, queriable.ToList()[1].id); //LINQ query execution with wrong partition key. linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>( allowSynchronousQueryExecution: true, requestOptions: new QueryRequestOptions() { PartitionKey = new Cosmos.PartitionKey("test") }); queriable = linqQueryable.Where(item => item.taskNum < 100); Assert.AreEqual(0, queriable.Count()); //LINQ query execution with correct partition key. linqQueryable = this.Container.GetItemLinqQueryable <ToDoActivity>( allowSynchronousQueryExecution: true, requestOptions: new QueryRequestOptions { ConsistencyLevel = Cosmos.ConsistencyLevel.Eventual, PartitionKey = new Cosmos.PartitionKey(itemList[1].pk) }); queriable = linqQueryable.Where(item => item.taskNum < 100); Assert.AreEqual(1, queriable.Count()); Assert.AreEqual(itemList[1].id, queriable.ToList()[0].id); }
public async Task TestQueryCrossPartitionTopAsync() { int seed = (int)(DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalSeconds; uint numDocuments = 100; string partitionKey = "field_0"; QueryOracleUtil util = new QueryOracle2(seed); IEnumerable <string> documentsToInsert = util.GetDocuments(numDocuments); await this.CreateIngestQueryDeleteAsync( ConnectionModes.Direct, CollectionTypes.SinglePartition | CollectionTypes.MultiPartition, documentsToInsert, ImplementationAsync, "/" + partitionKey); async Task ImplementationAsync(Container container, IReadOnlyList <CosmosObject> documents) { List <string> queryFormats = new List <string>() { "SELECT {0} TOP {1} * FROM c", // Can't do order by since order by needs to look at all partitions before returning a single document => // thus we can't tell how many documents the SDK needs to recieve. //"SELECT {0} TOP {1} * FROM c ORDER BY c._ts", // Can't do aggregates since that also retrieves more documents than the user sees //"SELECT {0} TOP {1} VALUE AVG(c._ts) FROM c", }; foreach (string queryFormat in queryFormats) { foreach (bool useDistinct in new bool[] { true, false }) { foreach (int topCount in new int[] { 0, 1, 10 }) { foreach (int pageSize in new int[] { 1, 10 }) { // Run the query and use the query metrics to make sure the query didn't grab more documents // than needed. string query = string.Format(queryFormat, useDistinct ? "DISTINCT" : string.Empty, topCount); FeedOptions feedOptions = new FeedOptions { MaxBufferedItemCount = 1000, }; // Max DOP needs to be 0 since the query needs to run in serial => // otherwise the parallel code will prefetch from other partitions, // since the first N-1 partitions might be empty. using (FeedIterator <dynamic> documentQuery = container.GetItemQueryIterator <dynamic>( query, requestOptions: new QueryRequestOptions() { MaxConcurrency = 0, MaxItemCount = pageSize })) { //QueryMetrics aggregatedQueryMetrics = QueryMetrics.Zero; int numberOfDocuments = 0; while (documentQuery.HasMoreResults) { FeedResponse <dynamic> cosmosQueryResponse = await documentQuery.ReadNextAsync(); numberOfDocuments += cosmosQueryResponse.Count(); //foreach (QueryMetrics queryMetrics in cosmosQueryResponse.QueryMetrics.Values) //{ // aggregatedQueryMetrics += queryMetrics; //} } Assert.IsTrue( numberOfDocuments <= topCount, $"Received {numberOfDocuments} documents with query: {query} and pageSize: {pageSize}"); } //if (!useDistinct) //{ // Assert.IsTrue( // aggregatedQueryMetrics.OutputDocumentCount <= topCount, // $"Received {aggregatedQueryMetrics.OutputDocumentCount} documents query: {query} and pageSize: {pageSize}"); //} } } } } } }
// </RunSimpleScript> /// <summary> /// Import many documents using stored procedure. /// </summary> // <RunBulkImport> private static async Task RunBulkImport(Container container) { string inputDirectory = @".\Data\"; string inputFileMask = "*.json"; int maxFiles = 2000; int maxScriptSize = 50000; // 1. Get the files. string[] fileNames = Directory.GetFiles(inputDirectory, inputFileMask); DirectoryInfo di = new DirectoryInfo(inputDirectory); FileInfo[] fileInfos = di.GetFiles(inputFileMask); // 2. Prepare for import. int currentCount = 0; int fileCount = maxFiles != 0 ? Math.Min(maxFiles, fileNames.Length) : fileNames.Length; // 3. Create stored procedure for this script. string scriptId = "BulkImport"; string body = File.ReadAllText(@".\JS\BulkImport.js"); await TryDeleteStoredProcedure(container, scriptId); Scripts cosmosScripts = container.Scripts; StoredProcedureResponse sproc = await cosmosScripts.CreateStoredProcedureAsync(new StoredProcedureProperties(scriptId, body)); // 4. Create a batch of docs (MAX is limited by request size (2M) and to script for execution. // We send batches of documents to create to script. // Each batch size is determined by MaxScriptSize. // MaxScriptSize should be so that: // -- it fits into one request (MAX request size is 16Kb). // -- it doesn't cause the script to time out. // -- it is possible to experiment with MaxScriptSize to get best performance given number of throttles, etc. while (currentCount < fileCount) { // 5. Create args for current batch. // Note that we could send a string with serialized JSON and JSON.parse it on the script side, // but that would cause script to run longer. Since script has timeout, unload the script as much // as we can and do the parsing by client and framework. The script will get JavaScript objects. string argsJson = CreateBulkInsertScriptArguments(fileNames, currentCount, fileCount, maxScriptSize); dynamic[] args = new dynamic[] { JsonConvert.DeserializeObject <dynamic>(argsJson) }; // 6. execute the batch. StoredProcedureExecuteResponse <int> scriptResult = await cosmosScripts.ExecuteStoredProcedureAsync <int>( scriptId, new PartitionKey("Andersen"), args); // 7. Prepare for next batch. int currentlyInserted = scriptResult.Resource; currentCount += currentlyInserted; } // 8. Validate int numDocs = 0; using (FeedIterator <dynamic> setIterator = container.GetItemQueryIterator <dynamic>()) { while (setIterator.HasMoreResults) { FeedResponse <dynamic> response = await setIterator.ReadNextAsync(); numDocs += response.Count(); } } Console.WriteLine("Found {0} documents in the collection. There were originally {1} files in the Data directory\r\n", numDocs, fileCount); }