async public static Task <bool> SaveAsync(CloudTable table, IEnumerable <BaseEntity> items) { TableBatchOperation batch = new TableBatchOperation(); foreach (var item in items) { item.SetKeys(); batch.InsertOrMerge(item); } bool retry = true; bool createTable = false; do { try { if (createTable) { // Try to create the table await table.CreateIfNotExistsAsync(); createTable = false; } // Execute the batch var result = await table.ExecuteBatchAsync(batch); // Check the status codes (success is 200-299) return(result.All(o => o.HttpStatusCode >= 200 && o.HttpStatusCode < 300)); } catch (AggregateException ex) { if (ex.InnerExceptions.Any(o => o is StorageException)) { // Table doesn't exist // Set our value to loop around and try again, creating the table first // Don't do it if we were already trying to create the table if (retry) { createTable = true; retry = false; } else { // We must have generated an exception after trying to create the table, // so don't try again. createTable = false; retry = false; } } } catch (StorageException ex) { // Check the status code System.Diagnostics.Debug.WriteLine(ex); // Table doesn't exist // Set our value to loop around and try again, creating the table first // Don't do it if we were already trying to create the table if (retry) { createTable = true; retry = false; } else { // We must have generated an exception after trying to create the table, // so don't try again. createTable = false; retry = false; } } } while (createTable); return(false); }
/// <summary> /// Removes a batch of items from the table. /// </summary> /// <param name="table">CloudTable instance.</param> /// <param name="tbo">TableBatchOperations instance.</param> internal async Task <int> RemoveItemsAsync(CloudTable table, TableBatchOperation tbo) { if (null == table) { throw new ArgumentNullException("Argument is null", nameof(table)); } if (null == tbo) { throw new ArgumentNullException("Argument is null", nameof(tbo)); } int count = 0; const int maxRetryCount = 5; int retry = maxRetryCount; do { // Reset count in case the while was retried. count = 0; try { TableRequestOptions tro = new TableRequestOptions() { MaximumExecutionTime = TimeSpan.FromSeconds(60), ServerTimeout = TimeSpan.FromSeconds(5), RetryPolicy = new ExponentialRetry(TimeSpan.FromSeconds(1), 3) }; // Ensure that the batch isn't empty, if it is, return the count. if (0 == tbo.Count) { break; } // Execute the batch operations. IList <TableResult> results = results = await table.ExecuteBatchAsync(tbo, tro, null, this._token); if ((null != results) && (results.Count > 0)) { int itemCount = 0, failureCount = 0; foreach (TableResult result in results) { itemCount++; if (false == ((HttpStatusCode)result.HttpStatusCode).IsSuccessCode()) { failureCount++; } } ServiceEventSource.Current.Trace($"Removed {itemCount - failureCount} of {itemCount} items from {table.Name}."); count = itemCount - failureCount; } } catch (StorageException ex) { // ResourceNotFound is returned when one of the batch items isn't found. Need to remove it and try again. if (ex.RequestInformation?.ExtendedErrorInformation?.ErrorCode.Contains("ResourceNotFound") ?? false) { // Get the index of the item within the batch. if (false == int.TryParse( ex.RequestInformation?.ExtendedErrorInformation?.ErrorMessage.Split(':')[0], out int index)) { ServiceEventSource.Current.Trace("Unknown index, setting to 0", table.Name); index = 0; } if (index < tbo.Count) { ServiceEventSource.Current.Trace($"StorageException: ResourceNotFound for item {index}", table.Name); await Task.Delay(500); tbo.RemoveAt(index); retry--; } else { ServiceEventSource.Current.Trace("Abandoning batch.", table.Name); break; } } else { ServiceEventSource.Current.Exception(ex.Message, ex.GetType().Name, ex.StackTrace); break; } } } while ((retry > 0) && (retry < maxRetryCount)); // Only retry if we hit a retryable exception or run out of retries. return(count); }
public async Task CreateAsync(T user) { if (user == null) { throw new ArgumentNullException("user"); } user.SetPartitionAndRowKey(); var userNameIndex = new UserNameIndexEntity(user.UserName.ToSha256(), user.Id); TableOperation indexOperation = TableOperation.Insert(userNameIndex); try { await _userNameIndexTable.ExecuteAsync(indexOperation); } catch (StorageException ex) { if (ex.RequestInformation.HttpStatusCode == 409) { throw new DuplicateUsernameException(); } throw; } if (!String.IsNullOrWhiteSpace(user.Email)) { var userEmailIndexEntity = new UserEmailIndexEntity(user.Email.ToSha256(), user.Id); TableOperation emailIndexOperation = TableOperation.Insert(userEmailIndexEntity); try { await _userEmailIndexTable.ExecuteAsync(emailIndexOperation); } catch (StorageException ex) { try { userNameIndex.ETag = "*"; TableOperation deleteOperation = TableOperation.Delete(userNameIndex); _userNameIndexTable.ExecuteAsync(deleteOperation).Wait(); } catch (Exception) { // if we can't delete the index item throw out the exception below } if (ex.RequestInformation.HttpStatusCode == 409) { throw new DuplicateEmailException(); } throw; } } try { if (user.LockoutEndDate < _minTableStoreDate) { user.LockoutEndDate = _minTableStoreDate; } TableOperation operation = TableOperation.InsertOrReplace(user); await _userTable.ExecuteAsync(operation); if (user.Logins.Any()) { var batch = new TableBatchOperation(); var loginIndexItems = new List <UserLoginProviderKeyIndexEntity>(); foreach (UserLoginEntity login in user.Logins) { login.UserId = user.Id; login.SetPartitionKeyRowKey(); batch.InsertOrReplace(login); var loginIndexItem = new UserLoginProviderKeyIndexEntity(user.Id, login.ProviderKey, login.LoginProvider); loginIndexItems.Add(loginIndexItem); } await _userLoginTable.ExecuteBatchAsync(batch); // can't batch the index items as different primary keys foreach (UserLoginProviderKeyIndexEntity loginIndexItem in loginIndexItems) { await _userLoginProviderKeyIndexTable.ExecuteAsync(TableOperation.InsertOrReplace(loginIndexItem)); } } } catch (Exception) { // attempt to delete the index item - needs work userNameIndex.ETag = "*"; TableOperation deleteOperation = TableOperation.Delete(userNameIndex); _userNameIndexTable.ExecuteAsync(deleteOperation).Wait(); throw; } }
public static async Task Run( [TimerTrigger("0 0 0 * * *")] TimerInfo timer, [Table("%Storage:ContractTable%", Connection = "Storage:Connection")] CloudTable contractTable, [Table("%Storage:StatTable%", Connection = "Storage:Connection")] CloudTable statTable, ILogger log, ExecutionContext context) { IConfigurationRoot config = new ConfigurationBuilder() .SetBasePath(context.FunctionAppDirectory) .AddJsonFile("local.settings.json", optional: true, reloadOnChange: true) .AddEnvironmentVariables() .Build(); var stat = new Dictionary <ProcessingStatFields, long> { { ProcessingStatFields.Processed, 0 }, { ProcessingStatFields.Failed, 0 }, { ProcessingStatFields.Finished, 0 }, { ProcessingStatFields.LowSeverity, 0 }, { ProcessingStatFields.MediumSeverity, 0 }, { ProcessingStatFields.HighSeverity, 0 }, { ProcessingStatFields.NoIssues, 0 }, }; TableContinuationToken token = null; do { var query = new TableQuery <ContractEntity> { TakeCount = 100 }; var segment = contractTable.ExecuteQuerySegmentedAsync(query, token).Result; foreach (var entry in segment.Results) { stat[ProcessingStatFields.Processed] += 1; if (entry.AnalysisStatus == "Error") { stat[ProcessingStatFields.Failed] += 1; } else { stat[ProcessingStatFields.Finished] += 1; } switch (entry.Severity) { case "Low": stat[ProcessingStatFields.LowSeverity] += 1; break; case "Medium": stat[ProcessingStatFields.MediumSeverity] += 1; break; case "High": stat[ProcessingStatFields.HighSeverity] += 1; break; default: stat[ProcessingStatFields.NoIssues] += 1; break; } } token = segment.ContinuationToken; } while (token != null); var batchOperation = new TableBatchOperation(); foreach (var key in stat.Keys) { var entry = new StatEntity { PartitionKey = "ProcessingStat", RowKey = key.ToString(), Count = stat[key] }; batchOperation.InsertOrReplace(entry); } await statTable.ExecuteBatchAsync(batchOperation); }
public static async Task <IActionResult> createWordBulk( [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest req, //Offenbar bekommt die Funktion automatisch ein korrekt eingerichtetes Objekt für die Tabelle mitgegeben [Table("categorization")] CloudTable tabCats, ILogger log) { List <bulkWordData> allBW; //Bulk data - array of objects with name, langu, desc as attributes string requestBody = await new StreamReader(req.Body).ReadToEndAsync(); if (requestBody == null || requestBody == "") { return(null); } log.LogInformation("dummy"); allBW = JsonConvert.DeserializeObject <List <bulkWordData> >(requestBody); mainWordData MWD = new mainWordData(); TableOperation query = TableOperation.Retrieve <mainWordData>("mainWordData", "count"); TableResult tabRes = await tabCats.ExecuteAsync(query); if (tabRes.Result != null) { MWD = (mainWordData)tabRes.Result; } MWD.wordCount++; lastBulkWord = MWD.wordCount; bulkTO = new TableBatchOperation(); bulkTO2 = new TableBatchOperation(); tmpAllW2NI = await intGetAllWordNames(tabCats); int bCount = 0; int xcount = 0; List <Task <IList <TableResult> > > batchTasks = new List <Task <IList <TableResult> > >(); foreach (bulkWordData oneWord in allBW) { xcount++; if (oneWord.n == null || oneWord.n == "" || bCount == 54) { var x = 0; } bCount++; await intCreateWord(tabCats, oneWord.n, true); if (bCount == 99) { if (bulkTO.Count > 0) { batchTasks.Add(tabCats.ExecuteBatchAsync(bulkTO)); } if (bulkTO2.Count > 0) { batchTasks.Add(tabCats.ExecuteBatchAsync(bulkTO2)); } bCount = 0; bulkTO.Clear(); bulkTO2.Clear(); } } MWD.wordCount = lastBulkWord; TableOperation writeBack = TableOperation.InsertOrReplace(MWD); var dummy = tabCats.ExecuteAsync(writeBack); if (bulkTO.Count > 0) { batchTasks.Add(tabCats.ExecuteBatchAsync(bulkTO)); } if (bulkTO2.Count > 0) { batchTasks.Add(tabCats.ExecuteBatchAsync(bulkTO2)); } try { //Task.WaitAll(batchTasks.ToArray()); } catch (System.Exception ex) { log.LogInformation(ex.Message); throw; } return(new OkObjectResult("")); }
public async Task StoreAsync <T>(nStoreOperation storaeOperationType, IEnumerable <T> models) where T : new() { try { // notify delegate if (_delegate != null) { _delegate.OnStoring(typeof(T), storaeOperationType); } // Retrieve a reference to the table. CloudTable table = GetTableReference(GetTableName <T>()); // Create the batch operation. List <TableBatchOperation> batchOperations = new List <TableBatchOperation>(); // Create the first batch var currentBatch = new TableBatchOperation(); batchOperations.Add(currentBatch); // lookup the entitymapper var entityMapper = _entityMapperRegistry[typeof(T)]; // define the modelcounter int modelCounter = 0; // Add all items foreach (var model in models) { switch (storaeOperationType) { case nStoreOperation.insertOperation: currentBatch.Insert(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.insertOrReplaceOperation: currentBatch.InsertOrReplace(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOperation: currentBatch.Merge(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOrInserOperation: currentBatch.InsertOrMerge(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.delete: currentBatch.Delete(new DynamicTableEntity <T>(model, entityMapper)); break; } modelCounter++; if (modelCounter % 100 == 0) { currentBatch = new TableBatchOperation(); batchOperations.Add(currentBatch); } } // execute foreach (var createdBatch in batchOperations) { if (createdBatch.Count() > 0) { await table.ExecuteBatchAsync(createdBatch); // notify delegate if (_delegate != null) { _delegate.OnStored(typeof(T), storaeOperationType, createdBatch.Count(), null); } } } } catch (StorageException ex) { // check the exception if (_autoCreateTable && ex.Message.StartsWith("0:The table specified does not exist", StringComparison.CurrentCulture)) { // try to create the table await CreateTableAsync <T>(); // retry await StoreAsync <T>(storaeOperationType, models); } else { // notify delegate if (_delegate != null) { _delegate.OnStored(typeof(T), storaeOperationType, 0, ex); } throw ex; } } }
public static async Task InsertOrReplaceAsync <T>(this CloudTable table, IEnumerable <T> entitiesToReplace) where T : ITableEntity, new() { await table.ExecuteBatchAsync(entitiesToReplace, (batch, entity) => batch.InsertOrReplace(entity)); }
public async Task <ActionResult> Edit(string partitionKey, string rowKey, string listName, string emailAddress, Subscriber editedSubscriber) { // Since MailingList and UpdateModel are in the view as EditorFor fields, // and since they refer to the same properties as PartitionKey and RowKey, // exclude PartitionKey and RowKey from model binding when calling UpdateModel. var excludeProperties = new string[] { "PartitionKey", "RowKey" }; if (ModelState.IsValid) { try { UpdateModel(editedSubscriber, string.Empty, null, excludeProperties); if (editedSubscriber.PartitionKey == partitionKey && editedSubscriber.RowKey == rowKey) { //Keys didn't change -- Update the row var replaceOperation = TableOperation.Replace(editedSubscriber); await mailingListTable.ExecuteAsync(replaceOperation); } else { // Keys changed, delete the old record and insert the new one. if (editedSubscriber.PartitionKey != partitionKey) { // PartitionKey changed, can't do delete/insert in a batch. var deleteOperation = TableOperation.Delete(new Subscriber { PartitionKey = partitionKey, RowKey = rowKey, ETag = editedSubscriber.ETag }); await mailingListTable.ExecuteAsync(deleteOperation); var insertOperation = TableOperation.Insert(editedSubscriber); await mailingListTable.ExecuteAsync(insertOperation); } else { // RowKey changed, do delete/insert in a batch. var batchOperation = new TableBatchOperation(); batchOperation.Delete(new Subscriber { PartitionKey = partitionKey, RowKey = rowKey, ETag = editedSubscriber.ETag }); batchOperation.Insert(editedSubscriber); await mailingListTable.ExecuteBatchAsync(batchOperation); } } return(RedirectToAction("Index")); } catch (StorageException ex) { if (ex.RequestInformation.HttpStatusCode == 412) { // Concurrency error. // Only catching concurrency errors for non-key fields. If someone // changes a key field we'll get a 404 and we have no way to know // what they changed it to. var currentSubscriber = FindRow(partitionKey, rowKey); if (currentSubscriber.Verified != editedSubscriber.Verified) { ModelState.AddModelError("Verified", "Current value: " + currentSubscriber.Verified); } ModelState.AddModelError(string.Empty, "The record you attempted to edit " + "was modified by another user after you got the original value. The " + "edit operation was canceled and the current values in the database " + "have been displayed. If you still want to edit this record, click " + "the Save button again. Otherwise click the Back to List hyperlink."); ModelState.SetModelValue("ETag", new ValueProviderResult(currentSubscriber.ETag, currentSubscriber.ETag, null)); } else { throw; } } } var lists = await GetListNamesAsync(); ViewBag.ListName = new SelectList(lists, "ListName", "Description", listName); return(View(editedSubscriber)); }
public async Task CommitChanges() { await context.ExecuteBatchAsync(operations); operations = new TableBatchOperation(); }
public static async Task Run( [QueueTrigger("aladin-newbooks")] CloudQueueMessage message, [Table("BookEntity")] CloudTable bookTable, [Table("LineAccount")] CloudTable lineAccountTable, [Table("Credentials", "Twitter")] CloudTable credentialsTable, ILogger log, CancellationToken cancellationToken) { try { var queueItem = JsonConvert.DeserializeObject <QueueItem>(message.AsString); string categoryId = queueItem.CategoryId; var itemList = new List <ItemLookUpResult.Item>(); var batchOperation = new TableBatchOperation(); foreach (int itemId in queueItem.ItemList) { var lookUpResult = await LookUpItemAsync(itemId); foreach (var item in lookUpResult.item) { cancellationToken.ThrowIfCancellationRequested(); var bookEntity = new BookEntity(); bookEntity.PartitionKey = categoryId; bookEntity.RowKey = itemId.ToString(); bookEntity.Name = item.title; batchOperation.InsertOrReplace(bookEntity); itemList.Add(item); } } if (itemList.Count > 0 && !cancellationToken.IsCancellationRequested) { var tokens = await Twitter.Utils.CreateTokenAsync(credentialsTable, queueItem.CategoryId); var tweetTask = tokens != null?TweetItemsAsync(tokens, itemList, cancellationToken) : Task.CompletedTask; // 배치처리는 파티션 키가 동일해야하고, 100개까지 가능하다는데... // 일단 파티션 키는 전부 동일하게 넘어올테고, 100개 넘을일은 없겠...지? var tableTask = bookTable.ExecuteBatchAsync(batchOperation); // 트위터와 테이블 쓰기 작업을 먼저한다. await Task.WhenAll(tweetTask, tableTask); // 라인 메시지를 보낼때 리미트가 걸려 예외가 발생하므로 라인만 따로 한다. if (queueItem.CategoryId == Aladin.Const.CategoryID.Comics) { await SendLineMessageAsync(lineAccountTable, itemList, log); } } } catch (Exception e) { log.LogError(e.Message); if (e is LineResponseException lineEx) { if (lineEx.StatusCode != HttpStatusCode.TooManyRequests) { // 에러 발생시 내 계정으로 예외 정보를 보냄. string adminLineId = Environment.GetEnvironmentVariable("LINE_ADMIN_USER_ID"); var error = new { Type = e.GetType().ToString(), Message = e.Message, StackTrace = e.StackTrace }; string json = JsonConvert.SerializeObject(error, Formatting.Indented); await lineMessagingClient.PushMessageAsync(adminLineId, json); } } } }
public static long DeleteFileProcessStats(DataConfig providerConfig, POCO.System system) { long numEntriesDeleted = 0; string systemKey = system.PartitionKey; if (!systemKey.EndsWith("|")) { systemKey += "|"; } // Create a filter for the system List <Filter> filters = new List <Filter>(); Filter pkfilt = new Filter("PartitionKey", systemKey, "eq"); filters.Add(pkfilt); // Check the data provider to use switch (providerConfig.ProviderType) { case "azure.tableservice": string combinedFilter = Utils.GenerateAzureFilter(filters); CloudTable table = Utils.GetCloudTable(providerConfig, AzureTableNames.FileProcessStats); TableQuery <TableEntity> query = new TableQuery <TableEntity>().Where(combinedFilter); List <TableEntity> recordsToDelete = new List <TableEntity>(); TableContinuationToken token = null; var runningQuery = new TableQuery <TableEntity>() { FilterString = query.FilterString, SelectColumns = query.SelectColumns }; do { runningQuery.TakeCount = query.TakeCount - recordsToDelete.Count; // Add the entries to the recordsToDelete list Task <TableQuerySegment <TableEntity> > tSeg = table.ExecuteQuerySegmentedAsync <TableEntity>(runningQuery, token); tSeg.Wait(); token = tSeg.Result.ContinuationToken; recordsToDelete.AddRange(tSeg.Result); // Create a batch of records to delete TableBatchOperation batch = new TableBatchOperation(); foreach (TableEntity entity in recordsToDelete) { batch.Add(TableOperation.Delete(entity)); if (batch.Count == 100) { numEntriesDeleted += batch.Count; Task tBatchDelete = table.ExecuteBatchAsync(batch); tBatchDelete.Wait(); batch = new TableBatchOperation(); } } if (batch.Count > 0) { numEntriesDeleted += batch.Count; Task tBatchDelete = table.ExecuteBatchAsync(batch); tBatchDelete.Wait(); } } while (token != null && (query.TakeCount == null || recordsToDelete.Count < query.TakeCount.Value)); //!ct.IsCancellationRequested && break; case "internal.mongodb": IMongoCollection <BsonDocument> collection = Utils.GetMongoCollection <BsonDocument>(providerConfig, MongoTableNames.FileProcessStats); // Create the delete filter FilterDefinition <BsonDocument> filter = Utils.GenerateMongoFilter <BsonDocument>(filters); // Replace current document DeleteResult result = collection.DeleteMany(filter); numEntriesDeleted = result.DeletedCount; break; default: throw new ApplicationException("Data provider not recognised: " + providerConfig.ProviderType); } return(numEntriesDeleted); }
/// <summary> /// Executes a batch of operations on a table asynchronously. /// </summary> /// <param name="tableBatchOperation"> /// The <see cref="T:Microsoft.WindowsAzure.Storage.Table.TableBatchOperation" /> object representing the operations to execute on the table. /// </param> /// <param name="cancellationToken">Cancalltion token.</param> /// <returns> /// An enumerable collection of <see cref="T:Microsoft.WindowsAzure.Storage.Table.TableResult" /> objects that contains the results, in order, of each operation in the /// <see cref="T:Microsoft.WindowsAzure.Storage.Table.TableBatchOperation" /> /// on the table. /// </returns> public Task <IList <TableResult> > ExecuteBatchAsync(TableBatchOperation tableBatchOperation, CancellationToken cancellationToken) { return(_cloudTable.ExecuteBatchAsync(tableBatchOperation, cancellationToken)); }
private static async Task <String> InsertAllTheThings(CloudTable _table) { var taskCount = 0; var taskThreshold = 200; // Seems to be a good value to start with var stopwatch = new Stopwatch(); stopwatch.Start(); var items = new List <MyTableEntity>(); // Create a new customer entity. List <String> firstnames = new List <String>(); List <String> lastnames = new List <String>(); Console.WriteLine("Reading CSV files..."); using (var reader = new StreamReader(firstNamesPath)) { var line = reader.ReadLine(); var values = line.Split(','); firstnames = values.ToList <String>(); } using (var reader = new StreamReader(lastNamesPath)) { var line = reader.ReadLine(); var values = line.Split(','); lastnames = values.ToList <String>(); } Console.WriteLine("Reading firstnames and lastnames finished by: " + stopwatch.Elapsed.ToString()); Console.WriteLine("Creating random lists from firstnames and lastnames..."); while (items.Count < soManyItems) { var partitionKey = "placeholder"; var rnd = new Random(); var firstname = firstnames[rnd.Next(0, firstnames.Count - 1)]; var lastname = lastnames[rnd.Next(0, lastnames.Count - 1)]; MyTableEntity entity1 = new MyTableEntity(partitionKey); entity1.Email = String.Format("{0}@{1}.com", firstname, lastname); entity1.PhoneNumber = String.Format("{0}-{1}-{2}", rnd.Next(1, 999), rnd.Next(1, 999), rnd.Next(1, 9999)); entity1.Firstname = firstname; entity1.Lastname = lastname; items.Add(entity1); } Console.WriteLine("Creating random lists finished by: " + stopwatch.Elapsed.ToString()); Console.WriteLine("Starting batch operations for " + soManyItems + " items..."); Console.WriteLine("200 tasks with 100 inserts running parallel..."); Console.WriteLine("for a total of 20.000 inserts per operation..."); var batchTasks = new List <Task <TableBatchResult> >(); var count = 1; var maxParallel = 100; // 100 is max for (var i = 0; i < items.Count; i += maxParallel) { taskCount++; var partitionkey = "" + i + maxParallel; var batchItems = items.Skip(i).Take(maxParallel).ToList(); var batch = new TableBatchOperation(); foreach (var item in batchItems) { item.PartitionKey = "partition" + count; batch.InsertOrMerge(item); } var task = _table.ExecuteBatchAsync(batch); batchTasks.Add(task); if (taskCount >= taskThreshold) { await Task.WhenAll(batchTasks); Console.WriteLine("Finished batch " + count + " by: " + stopwatch.Elapsed.ToString()); count++; taskCount = 0; } } //Console.WriteLine("Finished by: " + stopwatch.Elapsed.ToString()); //Task.WhenAll(batchTasks); //Console.WriteLine("Finished all by: " + stopwatch.Elapsed.ToString()); stopwatch.Stop(); return("Finished " + soManyItems + " inserts by " + stopwatch.Elapsed.ToString()); }
public AzureDatabase(string storageAccountString, string tableName) { CloudStorageAccount account = CloudStorageAccount.Parse(storageAccountString); qclient = account.CreateCloudQueueClient(); client = account.CreateCloudTableClient(); client.DefaultRequestOptions.PayloadFormat = TablePayloadFormat.JsonNoMetadata; table = client.GetTableReference(tableName); mthread = new Thread(async delegate() { await table.CreateIfNotExistsAsync(); while (running) { evt.WaitOne(); evt.Reset(); Dictionary <ulong, List <AzureOperationHandle> > ops; lock (evt) { ops = pendingOperations; pendingOperations = new Dictionary <ulong, List <AzureOperationHandle> >(); } List <Task> runningTasks = new List <Task>(); foreach (var shard in ops) { TableBatchOperation upserts = new TableBatchOperation(); TableBatchOperation deletions = new TableBatchOperation(); Dictionary <ScalableEntity, List <AzureOperationHandle> > retrieves = new Dictionary <ScalableEntity, List <AzureOperationHandle> >(EntityComparer.instance); Dictionary <ByteRange, List <AzureOperationHandle> > rangeRetrieves = new Dictionary <ByteRange, List <AzureOperationHandle> >(); foreach (var op in shard.Value.Where(m => m.Type == OpType.Upsert || m.Type == OpType.Delete)) { switch (op.Type) { case OpType.Upsert: upserts.Add(TableOperation.InsertOrReplace(new AzureEntity() { PartitionKey = op.Entity.Partition.ToString(), RowKey = Uri.EscapeDataString(Convert.ToBase64String(op.Entity.Key)), Value = op.Entity.Value })); if (upserts.Count == 100) { runningTasks.Add(table.ExecuteBatchAsync(upserts)); upserts = new TableBatchOperation(); } break; case OpType.Delete: deletions.Add(TableOperation.Delete(new AzureEntity() { PartitionKey = op.Entity.Partition.ToString(), RowKey = Uri.EscapeDataString(Convert.ToBase64String(op.Entity.Key)), Value = op.Entity.Value })); if (deletions.Count == 100) { runningTasks.Add(table.ExecuteBatchAsync(deletions)); deletions = new TableBatchOperation(); } break; } } Func <IEnumerable <string>, string> and = (q) => { string query = null; foreach (string er in q.Where(m => m != null)) { if (query == null) { query = er; } else { query = TableQuery.CombineFilters(query, TableOperators.And, er); } } return(query); }; Func <IEnumerable <string>, string> or = (q) => { string query = null; foreach (string er in q.Where(m => m != null)) { if (query == null) { query = er; } else { query = TableQuery.CombineFilters(query, TableOperators.Or, er); } } return(query); }; Func <Dictionary <ScalableEntity, List <AzureOperationHandle> >, Dictionary <ByteRange, List <AzureOperationHandle> >, TableContinuationToken, TableQuery <AzureEntity>, Task> runSegmentedQuery = null; runSegmentedQuery = async(tableops, rangeops, token, compiledQuery) => { if (compiledQuery == null) { string query = null; foreach (var iable in tableops.Values.SelectMany(m => m).Where(m => m.Type == OpType.Retrieve)) { if (query == null) { query = TableQuery.CombineFilters(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, iable.Entity.Partition.ToString()), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.Equal, Uri.EscapeDataString(Convert.ToBase64String(iable.Entity.Key)))); } else { query = TableQuery.CombineFilters(query, TableOperators.Or, TableQuery.CombineFilters(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, iable.Entity.Partition.ToString()), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.Equal, Uri.EscapeDataString(Convert.ToBase64String(iable.Entity.Key))))); } } foreach (var iable in rangeops.Values.SelectMany(m => m).Where(m => m.Type == OpType.RangeRetrieve)) { string startQuery = null; string endQuery = null; if (iable.StartRange != null) { startQuery = TableQuery.CombineFilters(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.GreaterThan, iable.StartRange.LinearHash().ToString()), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThan, Uri.EscapeDataString(Convert.ToBase64String(iable.StartRange)))); } if (iable.EndRange != null) { endQuery = TableQuery.CombineFilters(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.LessThan, iable.EndRange.LinearHash().ToString()), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThan, Uri.EscapeDataString(Convert.ToBase64String(iable.EndRange)))); } query = or(new string[] { query, and(new string[] { startQuery, endQuery }) }); } compiledQuery = new TableQuery <AzureEntity>().Where(query); } var segment = await table.ExecuteQuerySegmentedAsync(compiledQuery, token); token = segment.ContinuationToken; List <AzureOperationHandle> finished = new List <AzureOperationHandle>(); foreach (var iable in segment) { var ent = new ScalableEntity(Convert.FromBase64String(Uri.UnescapeDataString(iable.RowKey)), iable.Value); if (tableops.ContainsKey(ent)) { finished.AddRange(tableops[ent].Select(m => m.SetValue(ent.Value))); } if (rangeops.Any()) { var range = new ByteRange(iable.Value, iable.Value); if (rangeops.ContainsKey(range)) { finished.AddRange(rangeops[range].Select(m => m.AddValue(new ScalableEntity(Convert.FromBase64String(Uri.UnescapeDataString(iable.RowKey)), iable.Value)))); } } } //Combine callbacks for finished queries var callbacks = finished.ToLookup(m => m.callback); foreach (var iable in callbacks) { if (!iable.Key(iable.Where(m => m.Entity != null).Select(m => m.Entity).Union(iable.SelectMany(m => m.values), EntityComparer.instance))) { iable.AsParallel().ForAll(m => m.Type = OpType.Nop); compiledQuery = null; //Re-compile query } } if (token != null) { await runSegmentedQuery(tableops, rangeops, token, compiledQuery); } }; foreach (var op in shard.Value.Where(m => m.Type == OpType.Retrieve || m.Type == OpType.RangeRetrieve)) { switch (op.Type) { case OpType.Retrieve: if (!retrieves.ContainsKey(op.Entity)) { retrieves.Add(op.Entity, new List <AzureOperationHandle>()); } retrieves[op.Entity].Add(op); if (retrieves.Count + rangeRetrieves.Count == 100) { runningTasks.Add(runSegmentedQuery(retrieves, rangeRetrieves, null, null)); retrieves = new Dictionary <ScalableEntity, List <AzureOperationHandle> >(EntityComparer.instance); rangeRetrieves = new Dictionary <ByteRange, List <AzureOperationHandle> >(); } break; case OpType.RangeRetrieve: var ranger = new ByteRange(op.StartRange, op.EndRange); if (!rangeRetrieves.ContainsKey(ranger)) { rangeRetrieves.Add(ranger, new List <AzureOperationHandle>()); } rangeRetrieves[ranger].Add(op); if (retrieves.Count + rangeRetrieves.Count == 100) { runningTasks.Add(runSegmentedQuery(retrieves, rangeRetrieves, null, null)); retrieves = new Dictionary <ScalableEntity, List <AzureOperationHandle> >(EntityComparer.instance); rangeRetrieves = new Dictionary <ByteRange, List <AzureOperationHandle> >(); } break; } } foreach (var op in shard.Value.Where(m => m.Type == OpType.Nop)) { op.Task.SetResult(op.Entity); } if (upserts.Any()) { runningTasks.Add(table.ExecuteBatchAsync(upserts)); } if (retrieves.Any() || rangeRetrieves.Any()) { runningTasks.Add(runSegmentedQuery(retrieves, rangeRetrieves, null, null)); } } await Task.WhenAll(runningTasks); ops.SelectMany(m => m.Value).AsParallel().ForAll(m => m.Task.SetResult(m.Entity)); } }); mthread.Name = "AzureDB-webrunner"; mthread.Start(); }
public async Task DoBatchAsync(TableBatchOperation batch) { CloudTable table = await GetTable(); await table.ExecuteBatchAsync(batch); }
public async Task TableFilterTest() { // Reinitialize the name resolver to avoid conflicts _resolver = new RandomNameResolver(); IHost host = new HostBuilder() .ConfigureDefaultTestHost <AzureStorageEndToEndTests>(b => { b.AddAzureStorage(); }) .ConfigureServices(services => { services.AddSingleton <INameResolver>(_resolver); }) .Build(); // write test entities string testTableName = _resolver.ResolveInString(TableName); CloudTableClient tableClient = _storageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference(testTableName); await table.CreateIfNotExistsAsync(); var operation = new TableBatchOperation(); operation.Insert(new Person { PartitionKey = "1", RowKey = "1", Name = "Lary", Age = 20, Location = "Seattle" }); operation.Insert(new Person { PartitionKey = "1", RowKey = "2", Name = "Moe", Age = 35, Location = "Seattle" }); operation.Insert(new Person { PartitionKey = "1", RowKey = "3", Name = "Curly", Age = 45, Location = "Texas" }); operation.Insert(new Person { PartitionKey = "1", RowKey = "4", Name = "Bill", Age = 28, Location = "Tam O'Shanter" }); await table.ExecuteBatchAsync(operation); JobHost jobHost = host.GetJobHost(); var methodInfo = GetType().GetMethod(nameof(TableWithFilter)); var input = new Person { Age = 25, Location = "Seattle" }; string json = JsonConvert.SerializeObject(input); var arguments = new { person = json }; await jobHost.CallAsync(methodInfo, arguments); // wait for test results to appear await TestHelpers.Await(() => testResult != null); JArray results = (JArray)testResult; Assert.Single(results); input = new Person { Age = 25, Location = "Tam O'Shanter" }; json = JsonConvert.SerializeObject(input); arguments = new { person = json }; await jobHost.CallAsync(methodInfo, arguments); await TestHelpers.Await(() => testResult != null); results = (JArray)testResult; Assert.Single(results); Assert.Equal("Bill", (string)results[0]["Name"]); }
public async Task AddBatch(TableBatchOperation tableOperation) { await table.ExecuteBatchAsync(tableOperation); }
public override async Task ExecuteAsync(CloudTable cloudTable = null) { // Collection of ids List <string> moviesType1 = new List <string>(); List <string> moviesType2 = new List <string>(); // Prepare batch operation TableBatchOperation batchOperation1 = new TableBatchOperation(); TableBatchOperation batchOperation2 = new TableBatchOperation(); TableBatchOperation batchOperation3 = new TableBatchOperation(); // Generate some data Console.WriteLine("...Generate test data..."); for (int i = 1; i <= 10; i++) { var movie = new Movie(type: "Comedy", title: $"Movie {i}") { Id = i, Title = $"Movie {i}", Description = "Sample description", Type = "Comedy", Premiere = 2011, Timestamp = new DateTimeOffset(DateTime.UtcNow) }; moviesType1.Add(movie.Title); batchOperation1.Insert(movie); } // Generate some data for (int i = 1; i <= 10; i++) { var movie = new Movie(type: "Horror", title: $"Other Movie {i}") { Id = i, Title = $"Other Movie {i}", Description = "Sample description", Type = "Horror", Premiere = 2001, Timestamp = new DateTimeOffset(DateTime.UtcNow) }; moviesType2.Add(movie.Title); batchOperation2.Insert(movie); } for (int i = 1; i <= 10; i++) { var movie = new Movie(type: "Horror", title: $"Other Horror Movie {i}") { Id = i, Title = $"Other Horror Movie {i}", Description = "Sample description", Type = "Horror", Premiere = 2002, Timestamp = new DateTimeOffset(DateTime.UtcNow) }; batchOperation3.Insert(movie); } // Execute batch Console.WriteLine("...Execute batch operations..."); await cloudTable.ExecuteBatchAsync(batchOperation1); await cloudTable.ExecuteBatchAsync(batchOperation2); await cloudTable.ExecuteBatchAsync(batchOperation3); // Insert Index Entities var index1 = new Index(partitionKey: "Comedy", rowKey: "2011") { IndexEntities = string.Join(',', moviesType1.ToArray()) }; var index2 = new Index(partitionKey: "Horror", rowKey: "2001") { IndexEntities = string.Join(',', moviesType2.ToArray()) }; TableOperation insert1 = TableOperation.Insert(index1); TableOperation insert2 = TableOperation.Insert(index2); // Execute insert operations Console.WriteLine("...Execute insert operations..."); await cloudTable.ExecuteAsync(insert1); await cloudTable.ExecuteAsync(insert2); // Retrive index Console.WriteLine("...Retrive index..."); TableQuery <Index> rangeQuery1 = new TableQuery <Index>() .Where(TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Comedy") , TableOperators.And , TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.Equal, "2011"))); var result1 = await cloudTable.ExecuteQuerySegmentedAsync(rangeQuery1, null); string[] movieTitles = { }; foreach (var index in result1) { Console.WriteLine($"{nameof(index.PartitionKey)}: {index.PartitionKey}, {nameof(index.RowKey)}: {index.RowKey}, {nameof(index.IndexEntities)}: {index.IndexEntities}"); movieTitles = index.IndexEntities.Split(','); } // Retrive entities Console.WriteLine("...Retrive entities - using index..."); TableQuery <Movie> query = new TableQuery <Movie>(); query.Where( TableQuery.CombineFilters(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Comedy"), TableOperators.And, TableQuery.CombineFilters(TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, "Movie 1"), TableOperators.Or, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThanOrEqual, "Movie 10"))) ); var movies = await cloudTable.ExecuteQuerySegmentedAsync(query, null); foreach (var movie in movies) { Console.WriteLine($"Movie Type: {movie.Type}, Premiere: {movie.Premiere}, Title: {movie.Title}"); } }
public static async Task <IActionResult> Run( [HttpTrigger(AuthorizationLevel.Anonymous, "post", Route = "ip")] HttpRequest req, [Table(HostIpDataEntity.TableName, Connection = "AzureWebJobsStorage")] CloudTable cloudTable, ClaimsPrincipal principal, ILogger log) { var userIdClaim = principal?.FindFirst(Claims.ObjectId); var userId = userIdClaim?.Value; if (string.IsNullOrWhiteSpace(userId)) { log.LogError("request has invalid userId {UserId}", userId); return(new UnauthorizedResult()); } log.LogTrace("IP update request received"); string requestBody = await new StreamReader(req.Body).ReadToEndAsync(); UpdateIPRequestModel model; try { model = JsonConvert.DeserializeObject <UpdateIPRequestModel>(requestBody); } catch (JsonException) { log.LogError("IP update request has invalid payload"); return(new BadRequestResult()); } var validationResult = new List <ValidationResult>(); bool isValid = Validator.TryValidateObject(model, new ValidationContext(model), validationResult); if (!isValid) { log.LogError("IP update request has invalid model"); return(new BadRequestObjectResult(validationResult)); } try { TableBatchOperation batchOperations = new TableBatchOperation(); foreach (var nicInfo in model.NicIp) { var entity = new HostIpDataEntity(userId, model.ComputerName, nicInfo.Id, nicInfo.Name, nicInfo.Addresses); TableOperation insertOp = TableOperation.InsertOrMerge(entity); batchOperations.Add(insertOp); } TableBatchResult result = await cloudTable.ExecuteBatchAsync(batchOperations); } catch (StorageException e) { log.LogError(e.Message); throw; } log.LogTrace("successfully updated IP for computer {ComputerName}, {Entries} entries updated", model.ComputerName, model.NicIp.Count); return(new NoContentResult()); }
public async Task DoBatchAsync(TableBatchOperation batch) { CloudTable table = await GetTable(); await table.ExecuteBatchAsync(batch, GetRequestOptions(), null); }
public static async Task RemoveAsync <T>(this CloudTable table, IEnumerable <T> entitiesToRemove) where T : ITableEntity, new() { await table.ExecuteBatchAsync(entitiesToRemove, (batch, entity) => batch.Delete(entity)); }
public static async Task Run([TimerTrigger("0 */1 * * * *")] TimerInfo myTimer, ILogger log, [Table("dselbtable")] CloudTable loadbalancerconfigTable) { log.LogInformation($"C# Timer trigger function executed at: {DateTime.Now}"); // query all rows var tableQuery = new TableQuery <UpdateLoadBalancerEntity>(); var result = await loadbalancerconfigTable.ExecuteQuerySegmentedAsync(tableQuery, null); // Create the batch operation. TableBatchOperation batchDeleteOperation = new TableBatchOperation(); foreach (var row in result) { batchDeleteOperation.Delete(row); } // Execute the batch operation. await loadbalancerconfigTable.ExecuteBatchAsync(batchDeleteOperation); //Get Token string token = AuthHelper.GetTokenAsync().Result; log.LogInformation($"Token Received: {token}"); string subscriptionsUri = "https://management.azure.com/subscriptions?api-version=2016-06-01"; Subscriptions subscriptions = await ResilientRestClient.GetAsync <Subscriptions>(subscriptionsUri, token); log.LogInformation($"Subs Received"); //Query SubscriptionIDs which has Gateway Connections string query = "where type =~ 'microsoft.network/connections'| distinct subscriptionId "; Dictionary <string, int> options = new Dictionary <string, int>(); options["$skip"] = 0; Dictionary <string, object> requestBodyObj = new Dictionary <string, object>(); List <string> subscriptionIds = subscriptions.value.Select(subs => subs.subscriptionId).ToList(); requestBodyObj.Add("subscriptions", subscriptionIds); requestBodyObj.Add("query", query); requestBodyObj.Add("options", options); string resourceGraphUri = "https://management.azure.com/providers/Microsoft.ResourceGraph/resources?api-version=2018-09-01-preview"; List <string> expressRouteConnectedSubscriptions = new List <string>(); //TEST CASE string subId = Environment.GetEnvironmentVariable("TestSubscriptionID", EnvironmentVariableTarget.Process); expressRouteConnectedSubscriptions.Add(subId); //Commenting for test // ResourceGraphResponse resourcesubs = ResilientRestClient.PostAsync<ResourceGraphResponse>(resourceGraphUri, token, requestBodyObj).Result; // foreach (List<string> row in resourcesubs.data.rows) // { // expressRouteConnectedSubscriptions.Add(row[0]); // } // log.LogInformation($"The number of subs are {expressRouteConnectedSubscriptions.Count}"); //TODO ADD Logic to clear the table or after automation job completion trigger an update to remove the upgraded LB from table //Query for Load Balancers string lbquery = "where type =~ 'Microsoft.Network/loadbalancers'| where tostring(sku.name) =='Basic' | project id, subscriptionId, resourceGroup, name, location"; Dictionary <string, int> lboptions = new Dictionary <string, int>(); options["$skip"] = 0; Dictionary <string, object> lbrequestBodyObj = new Dictionary <string, object>(); lbrequestBodyObj.Add("subscriptions", expressRouteConnectedSubscriptions); lbrequestBodyObj.Add("query", lbquery); lbrequestBodyObj.Add("options", lboptions); List <LoadBalancerObj> loadBalancers = new List <LoadBalancerObj>(); ResourceGraphResponse lbs = ResilientRestClient.PostAsync <ResourceGraphResponse>(resourceGraphUri, token, lbrequestBodyObj).Result; int i = 1; foreach (List <string> row in lbs.data.rows) { LoadBalancerObj lb = new LoadBalancerObj { ResourceId = row[0], SubscriptionId = row[1], ResourceGroup = row[2], Name = row[3], Location = row[4] }; loadBalancers.Add(lb); } while (lbs.skipToken != null) { log.LogInformation(i.ToString()); Dictionary <string, int> nextSkip = new Dictionary <string, int>(); nextSkip["$skip"] = i * 1000; Dictionary <string, object> updatedrequestBodyObj = new Dictionary <string, object>(); updatedrequestBodyObj["subscriptions"] = expressRouteConnectedSubscriptions; updatedrequestBodyObj["query"] = lbquery; updatedrequestBodyObj["$skipToken"] = lbs.skipToken; updatedrequestBodyObj["options"] = nextSkip; lbs = ResilientRestClient.PostAsync <ResourceGraphResponse>(resourceGraphUri, token, updatedrequestBodyObj).Result; foreach (List <string> row in lbs.data.rows) { LoadBalancerObj lb = new LoadBalancerObj { ResourceId = row[0], SubscriptionId = row[1], ResourceGroup = row[2], Name = row[3], Location = row[4] }; loadBalancers.Add(lb); } i++; } Parallel.ForEach(loadBalancers, async lb => { UpdateLoadBalancerEntity loadBalancerEntity = new UpdateLoadBalancerEntity { PartitionKey = lb.SubscriptionId, RowKey = lb.Name, Location = lb.Location, ResourceGroup = lb.ResourceGroup, ResourceId = lb.ResourceId }; TableOperation insertOperation = TableOperation.InsertOrMerge(loadBalancerEntity); await loadbalancerconfigTable.ExecuteAsync(insertOperation); }); }
// Processes a page of members (including guests) or owners for one group and stores owners and guests to Azure table storage private static async Task <GroupMemberPageResult> ProcessGroupMemberPageAsync(string getMembersUrl, JToken group, HttpClient httpClient, CloudTable groupGuestsTable, CloudTable groupOwnersTable, bool isOwners) { var result = new GroupMemberPageResult(); HttpResponseMessage groupMembersJson; groupMembersJson = await httpClient.GetAsync(getMembersUrl).ConfigureAwait(false); var groupMemberJsonObject = JObject.Parse(groupMembersJson.Content.ReadAsStringAsync().Result); var groupGuestsBatchOperation = new TableBatchOperation(); var groupOwnersBatchOperation = new TableBatchOperation(); if (groupMemberJsonObject["value"] is JArray groupMembers) { foreach (var groupMember in groupMembers) { var groupMemberTableEntity = new GroupMemberTableEntity() { PartitionKey = group["id"]?.ToString(), RowKey = Guid.NewGuid().ToString(), GroupId = group["id"]?.ToString(), GroupDisplayName = group["displayName"]?.ToString(), GroupMailNickname = group["mailNickname"]?.ToString(), Id = groupMember["id"]?.ToString(), UPN = groupMember["userPrincipalName"]?.ToString(), DisplayName = groupMember["displayName"]?.ToString(), AccountEnabled = groupMember["accountEnabled"]?.ToString() }; if (isOwners) { result.OwnerCount++; groupOwnersBatchOperation.Add(TableOperation.InsertOrReplace(groupMemberTableEntity)); } else { result.MemberCount++; //if member is a guest if (groupMember["userType"] != null && groupMember["userType"].ToString().ToLower().Equals("guest")) { result.GuestCount++; groupGuestsBatchOperation.Add(TableOperation.InsertOrReplace(groupMemberTableEntity)); } } } } if (isOwners && groupOwnersBatchOperation.Count > 0) { await groupOwnersTable.ExecuteBatchAsync(groupOwnersBatchOperation) .ConfigureAwait(false); } else if (groupGuestsBatchOperation.Count > 0) { await groupGuestsTable.ExecuteBatchAsync(groupGuestsBatchOperation) .ConfigureAwait(false); } result.NextLink = groupMemberJsonObject["@odata.nextLink"]?.ToString(); return(result); }
/// <summary> /// Executes a batch operation on a table as an atomic operation. /// </summary> /// <param name="table">Target table.</param> /// <param name="batch"> /// The <see cref="T:Microsoft.WindowsAzure.Storage.Table.TableBatchOperation" /> object representing the operations to execute on the table. /// </param> /// <returns> /// An enumerable collection of <see cref="T:Microsoft.WindowsAzure.Storage.Table.TableResult" /> objects that contains the results, in order, of each operation in the /// <see /// cref="T:Microsoft.WindowsAzure.Storage.Table.TableBatchOperation" /> /// on the table. /// </returns> public static IList <TableResult> ExecuteBatch(this CloudTable table, TableBatchOperation batch) { return(table.ExecuteBatchAsync(batch).ExecuteSynchronously()); }
private async Task UploadBufferAsync() { if (m_Buffer.Count > 1 && m_Buffer[0].UseBatches) { // Batch upload var table = MapTable(m_Buffer[0]); var uploads = new TableBatchOperation(); var links = new TableBatchOperation(); var id = m_Buffer[0].Controller; foreach (var data in m_Buffer) { var entity = data.ToEntity(data.ID ?? m_RowKeyBase + "-" + m_Seq++); uploads.Insert(entity); if (data.ID != null) { links.Insert(new Link(table.Name, data.ID, entity.PartitionKey, entity.RowKey)); } } OnDebug?.Invoke($"Batch uploading {uploads.Count} records to Azure table storage {table.Name} for controller [{id}]..."); try { if (links.Count > 0) { await m_LinksTable.ExecuteBatchAsync(links); } var r = await table.ExecuteBatchAsync(uploads); // Check for errors var errors = m_Buffer .Where((entry, x) => r.Count <= x || (r[x].HttpStatusCode != 201 && r[x].HttpStatusCode != 204)) .ToList(); var successes = m_Buffer .Where((entry, x) => r.Count > x && (r[x].HttpStatusCode == 201 || r[x].HttpStatusCode == 204)) .ToList(); OnUploadSuccess?.Invoke(201, successes, $"{m_Buffer.Count - errors.Count} record(s) out of {m_Buffer.Count} for controller [{id}] successfully uploaded to Azure table storage {table.Name}."); m_Buffer.Clear(); if (errors.Count > 0) { m_Buffer.AddRange(errors); OnUploadError?.Invoke(0, errors, $"{errors.Count} record(s) for controller [{id}] failed to upload to Azure table storage {table.Name}."); } } catch (StorageException ex) { var status = ex.RequestInformation.HttpStatusCode; var errmsg = ex.RequestInformation.ExtendedErrorInformation?.ErrorMessage ?? ex.RequestInformation.HttpStatusMessage ?? ex.Message; switch (status) { case 0: { OnError?.Invoke(ex, $"Azure table storage batch upload to {table.Name} for controller [{id}] failed."); break; } case 401: case 403: { OnUploadError?.Invoke(status, m_Buffer, $"Azure table storage batch upload to {table.Name} for controller [{id}] forbidden: {errmsg}"); break; } default: { OnUploadError?.Invoke(status, m_Buffer, $"Azure table storage batch upload to {table.Name} for controller [{id}] failed: {errmsg}"); break; } } } catch (Exception ex) { OnError?.Invoke(ex, $"Azure table storage batch upload to {table.Name} for controller [{id}] failed."); } } else if (m_Buffer.Count > 0) { // Single upload var data = m_Buffer[0]; var id = data.Controller; var table = MapTable(data); var entity = data.ToEntity(data.ID ?? m_RowKeyBase + "-" + m_Seq++); var insert = TableOperation.Insert(entity); var link = (data.ID != null) ? TableOperation.Insert(new Link(table.Name, data.ID, entity.PartitionKey, entity.RowKey)) : null; OnDebug?.Invoke($"Uploading record to Azure table storage {table.Name} for controller [{id}]..."); try { TableResult r; if (link != null) { r = await m_LinksTable.ExecuteAsync(link); } r = await table.ExecuteAsync(insert); OnUploadSuccess?.Invoke(r.HttpStatusCode, new[] { data }, $"Azure table storage upload to {table.Name} for controller [{id}] succeeded, result = {r.HttpStatusCode}."); if (m_Buffer.Count <= 1) { m_Buffer.Clear(); } else { m_Buffer.RemoveAt(0); } } catch (StorageException ex) { var status = ex.RequestInformation.HttpStatusCode; var errmsg = ex.RequestInformation.ExtendedErrorInformation?.ErrorMessage ?? ex.RequestInformation.HttpStatusMessage ?? ex.Message; switch (status) { case 0: { OnError?.Invoke(ex, $"Azure table storage upload to {table.Name} for controller [{id}] failed."); break; } case 401: case 403: { OnUploadError?.Invoke(status, new[] { data }, $"Azure table storage upload to {table.Name} for controller [{id}] forbidden: {errmsg}"); break; } default: { OnUploadError?.Invoke(status, new[] { data }, $"Azure table storage upload to {table.Name} for controller [{id}] failed: {errmsg}"); break; } } } catch (Exception ex) { OnError?.Invoke(ex, $"Azure table storage upload to {table.Name} for controller [{id}] failed."); } } }
public async Task ProcessEventsAsync(PartitionContext context, IEnumerable <EventData> messages) { EventProcInfo[id] = TimeStampThis("Processing"); var parsedData = messages.Select(eventData => Encoding.UTF8.GetString(eventData.GetBytes())).Select(JsonConvert.DeserializeObject <SystemEvent>).ToList(); if (Engine.EngineIsRunning) { await Engine.AddToMainQueue(parsedData); EventProcInfo[id] = TimeStampThis(parsedData.Count + " events added to engine"); } else { EventProcInfo[id] = TimeStampThis("Engine is not running. Cannot add events. Aborting."); return; } try { var storageMan = new AzureStorageManager(CloudConfigurationManager.GetSetting("AzureStorageConnectionString")); CloudTable Table = storageMan.GetTableReference(ConfigurationManager.AppSettings["OperationStorageTable"]); var batches = new Dictionary <string, TableBatchOperation>(); var batchNames = new Dictionary <string, string>(); const int maxOps = Microsoft.WindowsAzure.Storage.Table.Protocol.TableConstants.TableServiceBatchMaximumOperations; foreach (var operationResult in parsedData) { string batchName; if (!batchNames.TryGetValue(operationResult.PartitionKey, out batchName)) { batchName = operationResult.PartitionKey; } TableBatchOperation batchOperation; if (!batches.ContainsKey(batchName)) { batchOperation = new TableBatchOperation(); batches.Add(batchName, batchOperation); } else { batches.TryGetValue(batchName, out batchOperation); } Debug.Assert(batchOperation != null, "Could not find batchOperation in Dictionary."); if (batchOperation.Count == maxOps) { batchOperation = new TableBatchOperation(); batches.Add(GetNewBatchName(operationResult.PartitionKey, batchNames), batchOperation); } batchOperation.Insert(operationResult); } EventProcInfo[id] = TimeStampThis("Running batches"); foreach (var batch in batches) { await Table.ExecuteBatchAsync(batch.Value); } } catch (Exception ex) when(ex.Message.Contains("The specified entity already exists")) { Logger.AddRow("Duplicate entry tried to be added to table storage."); } catch (Exception ex) { EventProcInfo[id] = TimeStampThis("!ERROR! " + ex.Message); Logger.AddRow("!ERROR! In event processor"); Logger.AddRow(ex.ToString()); } finally { EventProcInfo[id] = TimeStampThis("Setting Checkpoint"); await context.CheckpointAsync(); EventProcInfo[id] = TimeStampThis("Checkpoint set"); } }
public Task <IList <TableResult> > ExecuteBatchAsync(TableBatchOperation batchOperation) { return(table.ExecuteBatchAsync(batchOperation)); }
public static async Task <HttpResponseMessage> PutOrchestratorStatus( [HttpTrigger(AuthorizationLevel.Anonymous, "get", "post")] HttpRequestMessage req, [Table("OrchestratorStatus", Connection = "StatusTableStorage")] CloudTable status, TraceWriter log) { log.Info("C# HttpTrigger for EventGrid."); // parse query parameter var requestBody = await req.Content.ReadAsStringAsync(); log.Info($"Body => {requestBody}"); if (string.IsNullOrEmpty(requestBody)) { return(req.CreateResponse(HttpStatusCode.OK)); } var messages = JsonConvert.DeserializeObject <JArray>(requestBody); //If the request is for subscription validation, send back the validation code. if (messages.Count > 0 && string.Equals((string)messages[0]["eventType"], "Microsoft.EventGrid.SubscriptionValidationEvent", System.StringComparison.OrdinalIgnoreCase)) { log.Info("Validate request received"); return(req.CreateResponse(HttpStatusCode.OK, new { validationResponse = messages[0]["data"]["validationCode"] })); } var batchOperation = new TableBatchOperation(); foreach (JObject message in messages) { log.Info($"EventGridEvent: {message}"); var data = message["Data"].ToObject <EventGridEventData>(); switch (data.EventType) { case (int)OrchestrationRuntimeStatus.Running: batchOperation.InsertOrMerge(new OrchestratorStatus() { PartitionKey = data.InstanceId, RowKey = Guid.NewGuid().ToString(), HubName = data.HubName }); break; case (int)OrchestrationRuntimeStatus.Completed: var query = new TableQuery <OrchestratorStatus>() .Where(TableQuery.GenerateFilterCondition(nameof(OrchestratorStatus.PartitionKey), QueryComparisons.Equal, data.InstanceId)); var segment = await status.ExecuteQuerySegmentedAsync(query, null); foreach (var tableEntity in segment) { batchOperation.Delete(tableEntity); } break; default: break; } } await status.ExecuteBatchAsync(batchOperation, new TableRequestOptions { RetryPolicy = new LinearRetry(TimeSpan.FromMilliseconds(10), 5) }, null); return(req.CreateResponse(HttpStatusCode.OK)); }
public async Task RemoveFromAllRolesAsync(T user) { bool error = false; var Roles = new List <UserRoleEntity>(); string partitionKeyQuery = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, user.Id); TableQuery <UserRoleEntity> query = new TableQuery <UserRoleEntity>().Where(partitionKeyQuery); TableQuerySegment <UserRoleEntity> querySegment = null; while (querySegment == null || querySegment.ContinuationToken != null) { querySegment = await _userRoleTable.ExecuteQuerySegmentedAsync(query, querySegment != null?querySegment.ContinuationToken : null); Roles.AddRange(querySegment.Results); } var batch = new TableBatchOperation(); foreach (UserRoleEntity role in Roles) { role.ETag = "*"; //Delete even if it has changed batch.Add(TableOperation.Delete(role)); if (batch.Count >= 100) { try { //Try executing as a batch await _userRoleTable.ExecuteBatchAsync(batch); batch.Clear(); } catch { } //If a batch wont work, try individually foreach (TableOperation op in batch) { try { await _userRoleTable.ExecuteAsync(op); } catch { error = true; } } batch.Clear(); } } if (batch.Count > 0) { try { //Try executing as a batch await _userRoleTable.ExecuteBatchAsync(batch); batch.Clear(); } catch { } //If a batch wont work, try individually foreach (TableOperation op in batch) { try { await _userRoleTable.ExecuteAsync(op); } catch { error = true; } } } if (error) { throw new Exception(); } }
public async Task TableFilterTest() { // Reinitialize the name resolver to avoid conflicts _resolver = new RandomNameResolver(); JobHostConfiguration hostConfig = new JobHostConfiguration() { NameResolver = _resolver, TypeLocator = new FakeTypeLocator( this.GetType(), typeof(BlobToCustomObjectBinder)) }; hostConfig.AddService <IWebJobsExceptionHandler>(new TestExceptionHandler()); // write test entities string testTableName = _resolver.ResolveInString(TableName); CloudTableClient tableClient = _storageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference(testTableName); await table.CreateIfNotExistsAsync(); var operation = new TableBatchOperation(); operation.Insert(new Person { PartitionKey = "1", RowKey = "1", Name = "Lary", Age = 20, Location = "Seattle" }); operation.Insert(new Person { PartitionKey = "1", RowKey = "2", Name = "Moe", Age = 35, Location = "Seattle" }); operation.Insert(new Person { PartitionKey = "1", RowKey = "3", Name = "Curly", Age = 45, Location = "Texas" }); operation.Insert(new Person { PartitionKey = "1", RowKey = "4", Name = "Bill", Age = 28, Location = "Tam O'Shanter" }); await table.ExecuteBatchAsync(operation); JobHost host = new JobHost(hostConfig); var methodInfo = this.GetType().GetMethod("TableWithFilter", BindingFlags.Public | BindingFlags.Static); var input = new Person { Age = 25, Location = "Seattle" }; string json = JsonConvert.SerializeObject(input); var arguments = new { person = json }; await host.CallAsync(methodInfo, arguments); // wait for test results to appear await TestHelpers.Await(() => testResult != null); JArray results = (JArray)testResult; Assert.Single(results); input = new Person { Age = 25, Location = "Tam O'Shanter" }; json = JsonConvert.SerializeObject(input); arguments = new { person = json }; await host.CallAsync(methodInfo, arguments); await TestHelpers.Await(() => testResult != null); results = (JArray)testResult; Assert.Single(results); Assert.Equal("Bill", (string)results[0]["Name"]); }