public long ClearTable() { long deletionCount = 0; // Construct the query operation for all customer entities where PartitionKey="Smith". var list = new List<string>(); list.Add("PartitionKey"); list.Add("RowKey"); TableQuery<SensorValueEntity> query = new TableQuery<SensorValueEntity>().Select(list).Take(100); var results = table.ExecuteQuery(query); if (results.Count() < 1) return deletionCount; foreach(var resultGroup in results.GroupBy(a => a.PartitionKey)) { TableBatchOperation batchOperation = new TableBatchOperation(); foreach (var result in resultGroup) { batchOperation.Delete(result); deletionCount++; } table.ExecuteBatch(batchOperation); } return deletionCount; }
public void DeleteEmail(string thandle, double beforeDays) { CloudStorageAccount storageAccount = CloudStorageAccount.Parse(_connectionString); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference(_tableName); var projectionQuery = new TableQuery<DynamicTableEntity>() .Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, thandle)) .Select(new[] { "RowKey", "Timestamp" }); var batchOperation = new TableBatchOperation(); foreach (var e in table.ExecuteQuery(projectionQuery)) { if ((DateTimeOffset.UtcNow - e.Timestamp).TotalDays > beforeDays) { batchOperation.Delete(e); } } table.ExecuteBatch(batchOperation); }
public static void Clean(string tableName, IList<string> connectionStrings) { AzureStorageSettings settings = new AzureStorageSettings(connectionStrings); foreach (CloudStorageAccount conn in settings.GetStorageAccounts()) { CloudTableClient tableClient = conn.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference(tableName); TableQuery<DynamicTableEntity> query = new TableQuery<DynamicTableEntity>(); IEnumerable<DynamicTableEntity> results = table.ExecuteQuery(query); if (results.Count() > 0) { TableBatchOperation batchOperation = new TableBatchOperation(); foreach (DynamicTableEntity s in results) { batchOperation.Delete(s); } table.ExecuteBatch(batchOperation); } } }
public async Task<ActionResult> DeleteConfirmed(string partitionKey) { // Delete all rows for this mailing list, that is, // Subscriber rows as well as MailingList rows. // Therefore, no need to specify row key. // Get all rows for this mailing list. For a production app where this // could return too many split the work up into segments. var query = new TableQuery<MailingList>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionKey)); TableContinuationToken token = null; OperationContext ctx = new OperationContext(); TableQuerySegment<MailingList> currentSegment = null; List<MailingList> listRows = new List<MailingList>(); while (currentSegment == null || currentSegment.ContinuationToken != null) { currentSegment = await mailingListTable.ExecuteQuerySegmentedAsync(query, token, webUIRetryPolicy, ctx); listRows.AddRange(currentSegment.Results); token = currentSegment.ContinuationToken; } // Delete the rows in batches of 100. var batchOperation = new TableBatchOperation(); int itemsInBatch = 0; foreach (MailingList listRow in listRows) { batchOperation.Delete(listRow); itemsInBatch++; if (itemsInBatch == 100) { await mailingListTable.ExecuteBatchAsync(batchOperation); itemsInBatch = 0; batchOperation = new TableBatchOperation(); } } if (itemsInBatch > 0) { await mailingListTable.ExecuteBatchAsync(batchOperation); } return RedirectToAction("Index"); }
public void TableBatchAllSupportedOperationsAPM() { CloudTableClient tableClient = GenerateCloudTableClient(); TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEnitity(pk)); // delete { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = null; using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); results = currentTable.EndExecuteBatch(asyncRes); } Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
internal override void InitializeHooks() { OnTableClientState(TableClientState.USE_NEW_HIDE_METADATA, async () => { TableBatchOperation batch = new TableBatchOperation(); batch.Delete(new DynamicTableEntity { PartitionKey = MigrationModel.SINGLE_PARTITION_KEY, RowKey = "0", ETag = ChainTable2Constants.ETAG_ANY }); await RunBatchAsync(batch); }); }
public ActionResult DeleteConfirmed(string partitionKey) { // Delete all rows for this mailing list, that is, // Subscriber rows as well as MailingList rows. // Therefore, no need to specify row key. var query = new TableQuery<MailingList>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionKey)); var listRows = mailingListTable.ExecuteQuery(query).ToList(); var batchOperation = new TableBatchOperation(); int itemsInBatch = 0; foreach (MailingList listRow in listRows) { batchOperation.Delete(listRow); itemsInBatch++; if (itemsInBatch == 100) { mailingListTable.ExecuteBatch(batchOperation); itemsInBatch = 0; batchOperation = new TableBatchOperation(); } } if (itemsInBatch > 0) { mailingListTable.ExecuteBatch(batchOperation); } return RedirectToAction("Index"); }
private async Task DoTableBatchDeleteAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; string pk = Guid.NewGuid().ToString(); // Add insert DynamicTableEntity ent = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(ent)); TableBatchOperation batch = new TableBatchOperation(); // Add delete batch.Delete(ent); // success IList<TableResult> results = await currentTable.ExecuteBatchAsync(batch); Assert.AreEqual(results.Count, 1); Assert.AreEqual(results.First().HttpStatusCode, (int)HttpStatusCode.NoContent); // fail - not found OperationContext opContext = new OperationContext(); try { await currentTable.ExecuteBatchAsync(batch, null, opContext); Assert.Fail(); } catch (Exception) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.NotFound, new string[] { "ResourceNotFound" }, "The specified resource does not exist."); } }
/// <inheritdoc /> public async Task AddSiteNewsEntriesAsync(DateTime newsDate, IList <string> messages) { var table = this.tableClient.GetTableReference("SiteNews"); await table.CreateIfNotExistsAsync(); // Delete all rows in the partition first var query = new TableQuery <SiteNewsTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, newsDate.ToString("yyyy-MM-dd"))); var batchOperation = new TableBatchOperation(); TableContinuationToken token = null; do { var segment = await table.ExecuteQuerySegmentedAsync(query, token); token = segment.ContinuationToken; foreach (var entity in segment) { batchOperation.Delete(entity); } }while (token != null); IList <TableResult> results; if (batchOperation.Count > 0) { results = await table.ExecuteBatchAsync(batchOperation); batchOperation = new TableBatchOperation(); } // Next insert all messages for (int i = 0; i < messages.Count; i++) { batchOperation.Insert(new SiteNewsTableEntity(newsDate, i) { Message = messages[i] }); } results = await table.ExecuteBatchAsync(batchOperation); var returnStatusCode = HttpStatusCode.OK; foreach (var result in results) { if (result.HttpStatusCode >= 400 && result.HttpStatusCode <= 499) { this.telemetryClient.TrackFailedTableResult(result); returnStatusCode = HttpStatusCode.BadRequest; } if (result.HttpStatusCode >= 500 && result.HttpStatusCode <= 599) { this.telemetryClient.TrackFailedTableResult(result); returnStatusCode = HttpStatusCode.InternalServerError; } } if (returnStatusCode != HttpStatusCode.OK) { throw new InvalidOperationException("Failed to add news entries due to following http code: " + returnStatusCode); } }
public static async Task <List <TableBatchOperation> > ProccessMessages(IEnumerable <IGrouping <string, ClockworkMessageStorage> > messageGroups, string messageType, TraceWriter log) { var opperations = new List <TableBatchOperation>(); foreach (var messageGroup in messageGroups) { log.Info($"Processing {messageType} group {messageGroup.Key}"); if (!IsMessageGroupWhole(messageGroup)) { log.Info($"Message group {messageType} is incomplete."); } else { var batchOperation = new TableBatchOperation(); var content = new StringBuilder(); foreach (var message in messageGroup) { log.Info($">> Inserting {message.Sequence}"); content.Append(message.Content); batchOperation.Delete(message); } opperations.Add(batchOperation); switch (messageType) { case MessageTypes.Message: log.Info($"Sending message body '{content}'"); var messageJson = JsonConvert.SerializeObject(new MessageModel { Message = content.ToString() }); using (var client = new HttpClient()) { await client.PostAsync("http://163.172.129.168:3000/message", new StringContent(messageJson, Encoding.UTF8, "application/json")); } break; case MessageTypes.Registration: log.Info($"Sending registration body '{content}' for {messageGroup.Key}"); var registrationJson = JsonConvert.SerializeObject(new RegistrationModel { PublicKey = content.ToString(), Mobile = messageGroup.First().From }); using (var client = new HttpClient()) { await client.PostAsync("http://163.172.129.168:3000/register", new StringContent(registrationJson, Encoding.UTF8, "application/json")); } break; default: log.Warning($"Unknown message type '{messageType}'"); break; } } } return(opperations); }
public static async Task ExecuteParallelBatchAsync(this CloudTable table, TableOperationType oType, IList <ITableEntity> entities) { var taskCount = 0; const int taskThreshold = 200; const int maxBatchSize = 100; var batchTasks = new List <Task <IList <TableResult> > >(); for (var i = 0; i < entities.Count; i += maxBatchSize) { taskCount++; var batchItems = entities.Skip(i) .Take(maxBatchSize) .ToList(); var batch = new TableBatchOperation(); switch (oType) { case TableOperationType.Insert: batchItems.ForEach(e => batch.Insert(e)); break; case TableOperationType.Delete: batchItems.ForEach(e => batch.Delete(e)); break; case TableOperationType.Replace: batchItems.ForEach(e => batch.Replace(e)); break; case TableOperationType.Merge: batchItems.ForEach(e => batch.Merge(e)); break; case TableOperationType.InsertOrReplace: batchItems.ForEach(e => batch.InsertOrReplace(e)); break; case TableOperationType.InsertOrMerge: batchItems.ForEach(e => batch.InsertOrMerge(e)); break; default: throw new ArgumentOutOfRangeException(nameof(oType), oType, null); } var task = table.ExecuteBatchAsync(batch); batchTasks.Add(task); if (taskCount < taskThreshold) { continue; } await Task.WhenAll(batchTasks); taskCount = 0; } await Task.WhenAll(batchTasks); }
private async Task <List <T> > BatchCloudTableOperation <T>(string TableName, CloudStorageAccount StorageAccount, List <T> items, bool Delete) { if (items == null || !items.Any()) { return(null); } Debug.WriteLine($"\n=== Batch {((Delete) ? "Delete" : "Upsert")} Operation on {TableName} ========="); var OperationSW = Stopwatch.StartNew(); var table = await Utils.GetCloudTableAsync(TableName, StorageAccount, true); var PartitionSortedList = new SortedList <string, List <T> >(); foreach (var item in items) { if (Delete && Utils.GetVal(item, "ETag") == null) { (item as AzureTableEntity).ETag = "*"; } var PK = Utils.GetVal(item, "PartitionKey").ToString(); if (PK == null || String.IsNullOrEmpty(PK)) { Debug.WriteLine("PK == null"); } if (!PartitionSortedList.ContainsKey(PK)) { PartitionSortedList.Add(PK, new List <T>()); } PartitionSortedList[PK].Add(item); } var results = new List <T>(); int operations = 0; foreach (List <T> ListOfObjectsWithTheSamePartitionKey in PartitionSortedList.Values) { var EditableList = ListOfObjectsWithTheSamePartitionKey; while (EditableList.Any()) { var BatchSW = Stopwatch.StartNew(); var batchOperation = new TableBatchOperation(); Action <T> takeAction = o => batchOperation.InsertOrReplace(o as TableEntity); if (Delete) { takeAction = o => batchOperation.Delete(o as TableEntity); } EditableList.Take(100).ToList().ForEach(takeAction); var batchResultsList = await table.ExecuteBatchAsync(batchOperation); try { var typedBatchResults = Utils.TableResultsToTypedList <T>(batchResultsList); results.AddRange(typedBatchResults); Debug.WriteLine("+" + BatchSW.Elapsed + " #" + operations + " \t" + typedBatchResults.Count + " item" + (typedBatchResults.Count == 1 ? "" : "s") + " with PK " + (EditableList.First() as TableEntity).PartitionKey); } catch (Exception EX) { Debug.WriteLine(EX.Message); } operations++; EditableList = EditableList.Skip(100).ToList(); } EditableList.Clear(); } PartitionSortedList.Clear(); //=00:00:00.4292620 2 Total on Blog Debug.WriteLine($"={OperationSW.Elapsed} \t\t{items.Count} Total on {TableName} ==="); return(results); }
private async Task ExecuteAtsOperatioons(List <AtsOperation> operations) { if (operations.Count == 1) { var ats = operations[0]; TableOperation operation = null; switch (ats.OperationType) { case AtsOperationType.Insert: operation = TableOperation.InsertOrReplace(ats.Row); break; case AtsOperationType.Replace: operation = TableOperation.InsertOrReplace(ats.Row); break; case AtsOperationType.Delete: operation = TableOperation.Delete(ats.Row); break; default: break; } if (operation != null) { await ExecuteOperation(operation).ConfigureAwait(false); } } else { List <Task> tasks = new List <Task>(); foreach (var partitionOperations in operations.GroupBy(x => x.Row.PartitionKey)) { var batch = new TableBatchOperation(); foreach (var operation in partitionOperations) { switch (operation.OperationType) { case AtsOperationType.Insert: batch.InsertOrReplace(operation.Row); break; case AtsOperationType.Replace: batch.InsertOrReplace(operation.Row); break; case AtsOperationType.Delete: batch.Delete(operation.Row); break; default: break; } // only 40, because the request itself can actually become too big... :) if (batch.Count == 40) { tasks.Add(ExecuteBatchOperation(batch)); batch = new TableBatchOperation(); } } if (batch.Count != 0) { tasks.Add(ExecuteBatchOperation(batch)); } } await Task.WhenAll(tasks).ConfigureAwait(false); } }
public void BatchDelete <T>(T entity) where T : BaseTableEntity { CheckBatch(entity); _batch.Delete(entity); }
public void BatchOperationUsingLargerViewId() { long currentViewId = 100; long futureViewId = currentViewId + 1; this.UpdateConfiguration(replicas, 0, false, currentViewId); string jobType = "jobType-BatchOperationUsingLargerViewId"; string jobId = "jobId-BatchOperationUsingLargerViewId"; int count = 2; // number of operations in the batch _rtable_Operation List <TableOperationType> opTypes = new List <TableOperationType>() { TableOperationType.Replace, TableOperationType.Delete, }; // // Insert // string jobIdTemplate = jobId + "-{0}"; string messageTemplate = "message-{0}"; string updatedMessageTemplate = "updated-" + messageTemplate; string partitionKey = string.Empty; // // Insert entities // for (int i = 0; i < count; i++) { SampleRTableEntity originalEntity = new SampleRTableEntity( jobType, string.Format(jobIdTemplate, i), string.Format(messageTemplate, i)); this.repTable.Execute(TableOperation.Insert(originalEntity)); partitionKey = originalEntity.PartitionKey; } // // Retrieve entities and use them to create batchOperation to Replace or Delete // IEnumerable <SampleRTableEntity> allEntities = this.rtableWrapper.GetAllRows(partitionKey); TableBatchOperation batchOperation = new TableBatchOperation(); int m = 0; foreach (SampleRTableEntity entity in allEntities) { Console.WriteLine("{0}", entity.ToString()); Console.WriteLine("---------------------------------------"); if (opTypes[m] == TableOperationType.Replace) { SampleRTableEntity replaceEntity = new SampleRTableEntity( entity.JobType, entity.JobId, string.Format(updatedMessageTemplate, m)) { ETag = entity.ETag }; batchOperation.Replace(replaceEntity); } else if (opTypes[m] == TableOperationType.InsertOrReplace) { SampleRTableEntity replaceEntity = new SampleRTableEntity( entity.JobType, entity.JobId, string.Format(updatedMessageTemplate, m)) { ETag = entity.ETag }; batchOperation.InsertOrReplace(replaceEntity); } else if (opTypes[m] == TableOperationType.Delete) { batchOperation.Delete(entity); } else { throw new ArgumentException( string.Format("opType={0} is NOT supported", opTypes[m]), "opType"); } m++; } // // Call ModifyConfigurationBlob to change the viewId of the wrapper to an older value // Console.WriteLine("Changing the viewId to larger viewId {0}", futureViewId); this.UpdateConfiguration(replicas, 0, false, futureViewId); Console.WriteLine("\nCalling BatchOperation with a larger viewId..."); this.repTable.ExecuteBatch(batchOperation); this.ExecuteBatchOperationAndValidate( count, partitionKey, jobType, jobId, opTypes); }
public void BatchOperationExceptionWhenUsingSmallerViewId() { long currentViewId = 100; long badViewId = currentViewId - 1; this.UpdateConfiguration(replicas, 0, false, currentViewId); string jobType = "jobType-BatchOperationExceptionWhenUsingSmallerViewId"; string jobId = "jobId-BatchOperationExceptionWhenUsingSmallerViewId"; int count = 3; // number of operations in the batch _rtable_Operation List <TableOperationType> opTypes = new List <TableOperationType>() { TableOperationType.Replace, TableOperationType.InsertOrReplace, TableOperationType.Delete, }; // // Insert // string jobIdTemplate = jobId + "-{0}"; string messageTemplate = "message-{0}"; string updatedMessageTemplate = "updated-" + messageTemplate; string partitionKey = string.Empty; // // Insert entities // for (int i = 0; i < count; i++) { SampleRTableEntity originalEntity = new SampleRTableEntity( jobType, string.Format(jobIdTemplate, i), string.Format(messageTemplate, i)); this.repTable.Execute(TableOperation.Insert(originalEntity)); partitionKey = originalEntity.PartitionKey; } // // Retrieve entities and use them to create batchOperation to Replace or Delete // IEnumerable <SampleRTableEntity> allEntities = this.rtableWrapper.GetAllRows(partitionKey); TableBatchOperation batchOperation = new TableBatchOperation(); int m = 0; foreach (SampleRTableEntity entity in allEntities) { Console.WriteLine("{0}", entity.ToString()); Console.WriteLine("---------------------------------------"); if (opTypes[m] == TableOperationType.Replace) { SampleRTableEntity replaceEntity = new SampleRTableEntity( entity.JobType, entity.JobId, string.Format(updatedMessageTemplate, m)) { ETag = entity.ETag }; batchOperation.Replace(replaceEntity); } else if (opTypes[m] == TableOperationType.InsertOrReplace) { SampleRTableEntity replaceEntity = new SampleRTableEntity( entity.JobType, entity.JobId, string.Format(updatedMessageTemplate, m)) { ETag = entity.ETag }; batchOperation.InsertOrReplace(replaceEntity); } else if (opTypes[m] == TableOperationType.Delete) { batchOperation.Delete(entity); } else { throw new ArgumentException( string.Format("opType={0} is NOT supported", opTypes[m]), "opType"); } m++; } // // Call ModifyConfigurationBlob to change the viewId of the wrapper to an older value // Console.WriteLine("Changing the viewId to badViewId {0}", badViewId); this.UpdateConfiguration(replicas, 0, false, badViewId); // // Execute Batch _rtable_Operation with bad viewId // Console.WriteLine("\nCalling BatchOperation with badViewId..."); try { this.repTable.ExecuteBatch(batchOperation); } catch (ReplicatedTableStaleViewException ex) { Console.WriteLine("Get this RTableStaleViewException: {0}", ex.Message); Assert.IsTrue(ex.ErrorCode == ReplicatedTableViewErrorCodes.ViewIdSmallerThanEntryViewId); Assert.IsTrue(ex.Message.Contains(string.Format("current _rtable_ViewId {0} is smaller than", badViewId)), "Got unexpected exception message"); } }
public void TableBatchInsertAPM() { TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); for (int m = 0; m < 3; m++) { AddInsertToBatch(pk, batch); } // Add insert DynamicTableEntity ent = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(ent)); // Add delete batch.Delete(ent); IList<TableResult> results = null; using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); results = currentTable.EndExecuteBatch(asyncRes); } Assert.AreEqual(results.Count, 4); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); // delete Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
/// <summary> /// Transforms Azure table partition key /// The partition key to be transformed is specified using the following extended properties /// Extended Properties /// ifPartitionKeyContains - The transformation is applied only if the contents of partition key contains the specified value. /// replacePartitionKeySubStrWith - Replace the contents of the matched partition key with the specified value to generate a new partition key. /// rowKeyPrefixes - Rowkey prefixes of the rows in which the partition key transformation will be applied. This is optional and will identify the subset of rows to do this operation. /// ifPartitionKeyContains,replacePartitionKeySubStrWith are mandatory /// Extended Properties Example /// "ifPartitionKeyContains": "Beihai", /// "replacePartitionKeySubStrWith": "AADS2S" /// Activity Operation /// The activity iterates through all the rows from the input table with the matching rowKeyPrefixes, /// checks for the partition key update, apply the partition key transformation if the partition key match is found /// runs an insert operation for entities with new partition key and delete operation on existing entities with matching partition keys /// </summary> /// <param name="linkedServices">Linked services referenced by activity definition.</param> /// <param name="datasets">Datasets referenced by activity definition.</param> /// <param name="activity">Activity definition.</param> /// <param name="logger">Used to log messages during activity execution.</param> /// <returns>Activity state at the end of execution</returns> public IDictionary <string, string> Execute( IEnumerable <LinkedService> linkedServices, IEnumerable <Dataset> datasets, Microsoft.Azure.Management.DataFactories.Models.Activity activity, IActivityLogger logger) { DotNetActivity dotNetActivity = (DotNetActivity)activity.TypeProperties; IDictionary <string, string> extendedProperties = dotNetActivity.ExtendedProperties; logger.Write("Logging extended properties if any..."); foreach (KeyValuePair <string, string> entry in extendedProperties) { logger.Write("<key:{0}> <value:{1}>", entry.Key, entry.Value); } string[] rowKeyPrefixes = null; if (extendedProperties.ContainsKey("rowKeyPrefixes")) { rowKeyPrefixes = extendedProperties["rowKeyPrefixes"].Split(','); } if (!extendedProperties.ContainsKey("ifPartitionKeyContains")) { throw new ArgumentException("Partition key match criteria is required", "ifPartitionKeyContains"); } if (!extendedProperties.ContainsKey("replacePartitionKeySubStrWith")) { throw new ArgumentException("Partition key substring replacement value is required", "replacePartitionKeySubStrWith"); } string ifPartitionKeyContains = extendedProperties["ifPartitionKeyContains"]; string replacePartitionKeySubStrWith = extendedProperties["replacePartitionKeySubStrWith"]; AzureStorageLinkedService inputLinkedService; AzureTableDataset sourceTable; // For activities working on a single dataset, the first entry is the input dataset. // The activity.Inputs can have multiple datasets for building pipeline workflow dependencies. We can ignore the rest of the datasets Dataset inputDataset = datasets.Single(dataset => dataset.Name == activity.Inputs.First().Name); sourceTable = inputDataset.Properties.TypeProperties as AzureTableDataset; logger.Write("input table:{0}", sourceTable.TableName); inputLinkedService = linkedServices.First( ls => ls.Name == inputDataset.Properties.LinkedServiceName).Properties.TypeProperties as AzureStorageLinkedService; string inputConnectionString = inputLinkedService.ConnectionString; // create storage client for input. Pass the connection string. CloudStorageAccount inputStorageAccount = CloudStorageAccount.Parse(inputConnectionString); CloudTableClient inputTableClient = inputStorageAccount.CreateCloudTableClient(); CloudTable inputTable = inputTableClient.GetTableReference(sourceTable.TableName); long totalProcessedRecords = 0; long actualAffectedRecords = 0; TableContinuationToken tableContinuationToken = null; List <Task> tasks = new List <Task>(); do { var resultSegment = inputTable.ExecuteQuerySegmented(new TableQuery(), tableContinuationToken); tableContinuationToken = resultSegment.ContinuationToken; var partitionGroups = (from s in resultSegment.Results where (rowKeyPrefixes == null || rowKeyPrefixes.Length <= 0) ? true : this.IsMatch(s.RowKey, rowKeyPrefixes) select s).GroupBy(a => a.PartitionKey); foreach (IGrouping <string, DynamicTableEntity> g in partitionGroups) { TableBatchOperation deleteBatch = new TableBatchOperation(); TableBatchOperation insertBatch = new TableBatchOperation(); foreach (DynamicTableEntity e in g.AsEnumerable()) { if (!e.PartitionKey.Contains(ifPartitionKeyContains)) { continue; } DynamicTableEntity newEntity = new DynamicTableEntity( e.PartitionKey.Replace(ifPartitionKeyContains, replacePartitionKeySubStrWith), e.RowKey); foreach (KeyValuePair <string, EntityProperty> property in e.Properties) { newEntity.Properties.Add(property); } insertBatch.InsertOrReplace(newEntity); deleteBatch.Delete(e); actualAffectedRecords++; } if (insertBatch.Count > 0) { tasks.Add(this.RetryOnStorageTimeout(inputTable.ExecuteBatchInChunkAsync(insertBatch), numRetriesOnTimeout, numMsDelayOnTimeout, logger)); } if (deleteBatch.Count > 0) { tasks.Add(this.RetryOnStorageTimeout(inputTable.ExecuteBatchInChunkAsync(deleteBatch), numRetriesOnTimeout, numMsDelayOnTimeout, logger)); } logger.Write("Updated partition: {0}", g.Key); } totalProcessedRecords += resultSegment.Results.Count; logger.Write("Processed records count: {0}", totalProcessedRecords); logger.Write("Affected records count: {0}", actualAffectedRecords); }while (tableContinuationToken != null); Task.WaitAll(tasks.ToArray()); logger.Write("Updated {0} records", actualAffectedRecords); return(new Dictionary <string, string>()); }
public void TableBatchDeleteSync() { CloudTableClient tableClient = GenerateCloudTableClient(); string pk = Guid.NewGuid().ToString(); // Add insert DynamicTableEntity ent = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(ent)); TableBatchOperation batch = new TableBatchOperation(); // Add delete batch.Delete(ent); // success IList<TableResult> results = currentTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 1); Assert.AreEqual(results.First().HttpStatusCode, (int)HttpStatusCode.NoContent); // fail - not found OperationContext opContext = new OperationContext(); try { currentTable.ExecuteBatch(batch, null, opContext); Assert.Fail(); } catch (StorageException) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.NotFound, new string[] { "ResourceNotFound" }, "The specified resource does not exist."); } }
/// <summary> /// Deletes a set of already existing data entries in the table, by using eTag. /// Fails if the data does not already exist or if eTag does not match. /// </summary> /// <param name="list">List of data entries and their corresponding etags to be deleted from the table.</param> /// <returns>Completion promise for this storage operation.</returns> public async Task DeleteTableEntriesAsync(IReadOnlyCollection <Tuple <T, string> > collection) { const string operation = "DeleteTableEntries"; var startTime = DateTime.UtcNow; if (Logger.IsVerbose2) { Logger.Verbose2("Deleting {0} table entries: {1}", TableName, Utils.EnumerableToString(collection)); } if (collection == null) { throw new ArgumentNullException("collection"); } if (collection.Count > AzureTableDefaultPolicies.MAX_BULK_UPDATE_ROWS) { throw new ArgumentOutOfRangeException("collection", collection.Count, "Too many rows for bulk delete - max " + AzureTableDefaultPolicies.MAX_BULK_UPDATE_ROWS); } if (collection.Count == 0) { return; } try { var entityBatch = new TableBatchOperation(); foreach (var tuple in collection) { // WAS: // svc.AttachTo(TableName, tuple.Item1, tuple.Item2); // svc.DeleteObject(tuple.Item1); // SaveChangesOptions.ReplaceOnUpdate | SaveChangesOptions.Batch, T item = tuple.Item1; item.ETag = tuple.Item2; entityBatch.Delete(item); } try { await Task <IList <TableResult> > .Factory.FromAsync( tableReference.BeginExecuteBatch, tableReference.EndExecuteBatch, entityBatch, null); } catch (Exception exc) { Logger.Warn(ErrorCode.AzureTable_08, String.Format("Intermediate error deleting entries {0} from the table {1}.", Utils.EnumerableToString(collection), TableName), exc); throw; } } finally { CheckAlertSlowAccess(startTime, operation); } }
private async Task InsertOrMergeBatchWithNEntities(int n, TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; string pk = Guid.NewGuid().ToString(); TableBatchOperation insertBatch = new TableBatchOperation(); TableBatchOperation mergeBatch = new TableBatchOperation(); TableBatchOperation delBatch = new TableBatchOperation(); for (int m = 0; m < n; m++) { insertBatch.InsertOrMerge(GenerateRandomEntity(pk)); } IList<TableResult> results = await currentTable.ExecuteBatchAsync(insertBatch); foreach (TableResult res in results) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); // update entity and add to merge batch DynamicTableEntity ent = res.Result as DynamicTableEntity; ent.Properties.Add("foo2", new EntityProperty("bar2")); mergeBatch.InsertOrMerge(ent); } // execute insertOrMerge batch, this time entities exist IList<TableResult> mergeResults = await currentTable.ExecuteBatchAsync(mergeBatch); foreach (TableResult res in mergeResults) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); // Add to delete batch delBatch.Delete((ITableEntity)res.Result); } IList<TableResult> delResults = await currentTable.ExecuteBatchAsync(delBatch); foreach (TableResult res in delResults) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); } }
public async Task <ActionResult> Edit(string partitionKey, string rowKey, string listName, string emailAddress, Subscriber editedSubscriber) { // Since MailingList and UpdateModel are in the view as EditorFor fields, // and since they refer to the same properties as PartitionKey and RowKey, // exclude PartitionKey and RowKey from model binding when calling UpdateModel. var excludeProperties = new string[] { "PartitionKey", "RowKey" }; if (ModelState.IsValid) { try { UpdateModel(editedSubscriber, string.Empty, null, excludeProperties); if (editedSubscriber.PartitionKey == partitionKey && editedSubscriber.RowKey == rowKey) { //Keys didn't change -- Update the row var replaceOperation = TableOperation.Replace(editedSubscriber); await mailingListTable.ExecuteAsync(replaceOperation); } else { // Keys changed, delete the old record and insert the new one. if (editedSubscriber.PartitionKey != partitionKey) { // PartitionKey changed, can't do delete/insert in a batch. var deleteOperation = TableOperation.Delete(new Subscriber { PartitionKey = partitionKey, RowKey = rowKey, ETag = editedSubscriber.ETag }); await mailingListTable.ExecuteAsync(deleteOperation); var insertOperation = TableOperation.Insert(editedSubscriber); await mailingListTable.ExecuteAsync(insertOperation); } else { // RowKey changed, do delete/insert in a batch. var batchOperation = new TableBatchOperation(); batchOperation.Delete(new Subscriber { PartitionKey = partitionKey, RowKey = rowKey, ETag = editedSubscriber.ETag }); batchOperation.Insert(editedSubscriber); await mailingListTable.ExecuteBatchAsync(batchOperation); } } return(RedirectToAction("Index")); } catch (StorageException ex) { if (ex.RequestInformation.HttpStatusCode == 412) { // Concurrency error. // Only catching concurrency errors for non-key fields. If someone // changes a key field we'll get a 404 and we have no way to know // what they changed it to. var currentSubscriber = FindRow(partitionKey, rowKey); if (currentSubscriber.Verified != editedSubscriber.Verified) { ModelState.AddModelError("Verified", "Current value: " + currentSubscriber.Verified); } ModelState.AddModelError(string.Empty, "The record you attempted to edit " + "was modified by another user after you got the original value. The " + "edit operation was canceled and the current values in the database " + "have been displayed. If you still want to edit this record, click " + "the Save button again. Otherwise click the Back to List hyperlink."); ModelState.SetModelValue("ETag", new ValueProviderResult(currentSubscriber.ETag, currentSubscriber.ETag, null)); } else { throw; } } } var lists = await GetListNamesAsync(); ViewBag.ListName = new SelectList(lists, "ListName", "Description", listName); return(View(editedSubscriber)); }
private async Task DoTableBatchAllSupportedOperationsAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEntity(pk)); // delete { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = await currentTable.ExecuteBatchAsync(batch); Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
protected override void Operate(TableBatchOperation batchOperation, ITableEntity entity) { batchOperation.Delete(entity); }
public void TableBatchDeleteSyncUsingRetrieve() { // Insert Entity Console.WriteLine("Calling Insert()..."); DynamicReplicatedTableEntity baseEntity = new DynamicReplicatedTableEntity("replace test", "foo"); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); this.repTable.Execute(TableOperation.Insert(baseEntity)); // Retrieve existing entities using Retrieve Console.WriteLine("Calling Retrieve()..."); TableResult result = this.repTable.Execute(TableOperation.Retrieve<DynamicReplicatedTableEntity>(baseEntity.PartitionKey, baseEntity.RowKey)); DynamicReplicatedTableEntity retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(baseEntity.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(baseEntity.Properties["prop1"], retrievedEntity.Properties["prop1"]); TableBatchOperation batch = new TableBatchOperation(); // Add delete batch.Delete(retrievedEntity); Console.WriteLine("Calling ExecuteBatch() to delete..."); IList<TableResult> results = this.repTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 1); Assert.AreEqual(results.First().HttpStatusCode, (int)HttpStatusCode.NoContent); }
public void Execute(TableBatchOperation batchOperation, TableEntity entity) { entity.ETag = "*"; // Always overwrite (ignore concurrency). batchOperation.Delete(entity); }
async Task DoRandomAtomicCalls() { for (int callNum = 0; callNum < MigrationModel.NUM_CALLS_PER_MACHINE; callNum++) { SortedDictionary<PrimaryKey, DynamicTableEntity> dump = await peekProxy.DumpReferenceTableAsync(); if (PSharpRuntime.Nondeterministic()) { // Query var query = new TableQuery<DynamicTableEntity>(); query.FilterString = ChainTableUtils.CombineFilters( TableQuery.GenerateFilterCondition( TableConstants.PartitionKey, QueryComparisons.Equal, MigrationModel.SINGLE_PARTITION_KEY), TableOperators.And, NondeterministicUserPropertyFilterString()); await RunQueryAtomicAsync(query); } else { // Batch write int batchSize = PSharpRuntime.Nondeterministic() ? 2 : 1; var batch = new TableBatchOperation(); var rowKeyChoices = new List<string> { "0", "1", "2", "3", "4", "5" }; for (int opNum = 0; opNum < batchSize; opNum++) { int opTypeNum = PSharpNondeterminism.Choice(7); int rowKeyI = PSharpNondeterminism.Choice(rowKeyChoices.Count); string rowKey = rowKeyChoices[rowKeyI]; rowKeyChoices.RemoveAt(rowKeyI); // Avoid duplicate in same batch var primaryKey = new PrimaryKey(MigrationModel.SINGLE_PARTITION_KEY, rowKey); string eTag = null; if (opTypeNum >= 1 && opTypeNum <= 3) { DynamicTableEntity existingEntity; int etagTypeNum = PSharpNondeterminism.Choice( dump.TryGetValue(primaryKey, out existingEntity) ? 3 : 2); switch (etagTypeNum) { case 0: eTag = ChainTable2Constants.ETAG_ANY; break; case 1: eTag = "wrong"; break; case 2: eTag = existingEntity.ETag; break; } } DynamicTableEntity entity = new DynamicTableEntity { PartitionKey = MigrationModel.SINGLE_PARTITION_KEY, RowKey = rowKey, ETag = eTag, Properties = new Dictionary<string, EntityProperty> { // Give us something to see on merge. Might help with tracing too! { string.Format("{0}_c{1}_o{2}", machineId.ToString(), callNum, opNum), new EntityProperty(true) }, // Property with 50%/50% distribution for use in filters. { "isHappy", new EntityProperty(PSharpRuntime.Nondeterministic()) } } }; switch (opTypeNum) { case 0: batch.Insert(entity); break; case 1: batch.Replace(entity); break; case 2: batch.Merge(entity); break; case 3: batch.Delete(entity); break; case 4: batch.InsertOrReplace(entity); break; case 5: batch.InsertOrMerge(entity); break; case 6: entity.ETag = ChainTable2Constants.ETAG_DELETE_IF_EXISTS; batch.Delete(entity); break; } } await RunBatchAsync(batch); } } }
public async Task StoreAsync <T>(nStoreOperation storaeOperationType, IEnumerable <T> models) where T : new() { try { // notify delegate if (_delegate != null) { _delegate.OnStoring(typeof(T), storaeOperationType); } // Retrieve a reference to the table. CloudTable table = GetTableReference(GetTableName <T>()); // Create the batch operation. List <TableBatchOperation> batchOperations = new List <TableBatchOperation>(); // Create the first batch var currentBatch = new TableBatchOperation(); batchOperations.Add(currentBatch); // lookup the entitymapper var entityMapper = _entityMapperRegistry[typeof(T)]; // define the modelcounter int modelCounter = 0; // Add all items foreach (var model in models) { switch (storaeOperationType) { case nStoreOperation.insertOperation: currentBatch.Insert(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.insertOrReplaceOperation: currentBatch.InsertOrReplace(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOperation: currentBatch.Merge(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOrInserOperation: currentBatch.InsertOrMerge(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.delete: currentBatch.Delete(new DynamicTableEntity <T>(model, entityMapper)); break; } modelCounter++; if (modelCounter % 100 == 0) { currentBatch = new TableBatchOperation(); batchOperations.Add(currentBatch); } } // execute foreach (var createdBatch in batchOperations) { if (createdBatch.Count() > 0) { await table.ExecuteBatchAsync(createdBatch); // notify delegate if (_delegate != null) { _delegate.OnStored(typeof(T), storaeOperationType, createdBatch.Count(), null); } } } } catch (StorageException ex) { // check the exception if (!_autoCreateTable || !ex.Message.StartsWith("0:The table specified does not exist", StringComparison.CurrentCulture)) { // notify delegate if (_delegate != null) { _delegate.OnStored(typeof(T), storaeOperationType, 0, ex); } throw ex; } // try to create the table await CreateTableAsync <T>(); // retry await StoreAsync <T>(storaeOperationType, models); } }
public static void DeleteFromStorage(IEnumerable<TemplateModel> templates) { TableBatchOperation batchOperation = new TableBatchOperation(); foreach (TemplateModel template in templates) { batchOperation.Delete(template); } StorageFactory.Instance.IpcAzureAppTenantStateTable.ExecuteBatch(batchOperation, tableReqOptions); }
internal async Task EnsurePartitionSwitchedAsync(string partitionKey, TableRequestOptions requestOptions, OperationContext operationContext) { var metaQuery = new TableQuery <MTableEntity> { FilterString = ChainTableUtils.GeneratePointRetrievalFilterCondition( new PrimaryKey(partitionKey, ROW_KEY_PARTITION_META)) }; Recheck: MTablePartitionState? state; if (IsBugEnabled(MTableOptionalBug.EnsurePartitionSwitchedFromPopulated)) { state = null; } else { state = (from r in (await oldTable.ExecuteQueryAtomicAsync(metaQuery, requestOptions, operationContext)) select r.partitionState).SingleOrDefault(); await monitor.AnnotateLastBackendCallAsync(); } switch (state) { case null: try { await oldTable.ExecuteAsync(TableOperation.Insert(new MTableEntity { PartitionKey = partitionKey, RowKey = ROW_KEY_PARTITION_META, partitionState = MTablePartitionState.SWITCHED }), requestOptions, operationContext); } catch (StorageException ex) { if (ex.GetHttpStatusCode() != HttpStatusCode.Conflict) { throw ChainTableUtils.GenerateInternalException(ex); } if (!IsBugEnabled(MTableOptionalBug.EnsurePartitionSwitchedFromPopulated)) { await monitor.AnnotateLastBackendCallAsync(); // We could now be in POPULATED or SWITCHED. // XXX: In production, what's more likely? Is it faster // to recheck first or just try the case below? goto Recheck; } } await monitor.AnnotateLastBackendCallAsync(); return; case MTablePartitionState.POPULATED: try { var batch = new TableBatchOperation(); batch.Replace(new MTableEntity { PartitionKey = partitionKey, RowKey = ROW_KEY_PARTITION_META, ETag = ChainTable2Constants.ETAG_ANY, partitionState = MTablePartitionState.SWITCHED }); batch.Delete(new MTableEntity { PartitionKey = partitionKey, RowKey = ROW_KEY_PARTITION_POPULATED_ASSERTION, ETag = ChainTable2Constants.ETAG_ANY, }); await oldTable.ExecuteBatchAsync(batch, requestOptions, operationContext); } catch (ChainTableBatchException ex) { // The only way this can fail (within the semantics) is // if someone else moved the partition to SWITCHED. if (!(ex.FailedOpIndex == 1 && ex.GetHttpStatusCode() == HttpStatusCode.NotFound)) { throw ChainTableUtils.GenerateInternalException(ex); } } await monitor.AnnotateLastBackendCallAsync(); return; case MTablePartitionState.SWITCHED: // Nothing to do return; } }
public void TableBatchAllSupportedOperationsSync() { CloudTableClient tableClient = GenerateCloudTableClient(); TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEnitity(pk)); // delete { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = currentTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
public override async Task ExecuteDelete(Table table, List <DeleteQuery> queries, CancellationToken cancellationToken) { try { var connection = GetCloudTableClient(); var cTable = connection.GetTableReference(table.Name); var rowsDeleted = 0; var rowcount = 0; var batchTasks = new List <Task>(); //start a batch operation to update the rows. var batchOperation = new TableBatchOperation(); //loop through all the queries to retrieve the rows to be updated. foreach (var query in queries) { if (cancellationToken.IsCancellationRequested) { throw new ConnectionException("Delete rows cancelled."); } //Read the key fields from the table var tableQuery = new TableQuery { SelectColumns = new[] { "PartitionKey", "RowKey" }, FilterString = BuildFilterString(query.Filters) }; //TableResult = TableReference.ExecuteQuery(TableQuery); TableContinuationToken continuationToken = null; do { var result = await cTable.ExecuteQuerySegmentedAsync(tableQuery, continuationToken, null, null, cancellationToken); continuationToken = result.ContinuationToken; foreach (var entity in result.Results) { batchOperation.Delete(entity); rowcount++; rowsDeleted++; if (rowcount > 99) { await cTable.ExecuteBatchAsync(batchOperation); batchOperation = new TableBatchOperation(); rowcount = 0; } } } while (continuationToken != null); } if (batchOperation.Count > 0) { await cTable.ExecuteBatchAsync(batchOperation); } } catch (Exception ex) { throw new ConnectionException($"The Azure delete query for {table.Name} failed. { ex.Message} ", ex); } }
public void TableBatchOperationsWithEmptyKeys() { CloudTableClient tableClient = GenerateCloudTableClient(); // Insert Entity DynamicTableEntity ent = new DynamicTableEntity() { PartitionKey = "", RowKey = "" }; ent.Properties.Add("foo2", new EntityProperty("bar2")); ent.Properties.Add("foo", new EntityProperty("bar")); TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); currentTable.ExecuteBatch(batch); // Retrieve Entity TableBatchOperation retrieveBatch = new TableBatchOperation(); retrieveBatch.Retrieve(ent.PartitionKey, ent.RowKey); TableResult result = currentTable.ExecuteBatch(retrieveBatch).First(); DynamicTableEntity retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"].StringValue, retrievedEntity.Properties["foo"].StringValue); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); Assert.AreEqual(ent.Properties["foo2"].StringValue, retrievedEntity.Properties["foo2"].StringValue); Assert.AreEqual(ent.Properties["foo2"], retrievedEntity.Properties["foo2"]); // InsertOrMerge DynamicTableEntity insertOrMergeEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrMergeEntity.Properties.Add("foo3", new EntityProperty("value")); batch = new TableBatchOperation(); batch.InsertOrMerge(insertOrMergeEntity); currentTable.ExecuteBatch(batch); result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(insertOrMergeEntity.Properties["foo3"], retrievedEntity.Properties["foo3"]); // InsertOrReplace DynamicTableEntity insertOrReplaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrReplaceEntity.Properties.Add("prop2", new EntityProperty("otherValue")); batch = new TableBatchOperation(); batch.InsertOrReplace(insertOrReplaceEntity); currentTable.ExecuteBatch(batch); result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(1, retrievedEntity.Properties.Count); Assert.AreEqual(insertOrReplaceEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); // Merge DynamicTableEntity mergeEntity = new DynamicTableEntity(retrievedEntity.PartitionKey, retrievedEntity.RowKey) { ETag = retrievedEntity.ETag }; mergeEntity.Properties.Add("mergeProp", new EntityProperty("merged")); batch = new TableBatchOperation(); batch.Merge(mergeEntity); currentTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(mergeEntity.Properties["mergeProp"], retrievedEntity.Properties["mergeProp"]); // Replace DynamicTableEntity replaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey) { ETag = retrievedEntity.ETag }; replaceEntity.Properties.Add("replaceProp", new EntityProperty("replace")); batch = new TableBatchOperation(); batch.Replace(replaceEntity); currentTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(replaceEntity.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(replaceEntity.Properties["replaceProp"], retrievedEntity.Properties["replaceProp"]); // Delete Entity batch = new TableBatchOperation(); batch.Delete(retrievedEntity); currentTable.ExecuteBatch(batch); // Retrieve Entity result = currentTable.ExecuteBatch(retrieveBatch).First(); Assert.IsNull(result.Result); }
public void TableBatchDeleteFailAPM() { CloudTableClient tableClient = GenerateCloudTableClient(); ITableEntity ent = GenerateRandomEnitity("foo"); // add entity currentTable.Execute(TableOperation.Insert(ent)); // update entity TableResult result = currentTable.Execute(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); DynamicTableEntity retrievedEnt = result.Result as DynamicTableEntity; retrievedEnt.Properties.Add("prop", new EntityProperty("var")); currentTable.Execute(TableOperation.Replace(retrievedEnt)); // Attempt to delete with stale etag TableBatchOperation batch = new TableBatchOperation(); batch.Delete(ent); OperationContext opContext = new OperationContext(); try { using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, null, opContext, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); currentTable.EndExecuteBatch(asyncRes); } Assert.Fail(); } catch (StorageException) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.PreconditionFailed, new string[] { "UpdateConditionNotSatisfied", "ConditionNotMet" }, new string[] { "The update condition specified in the request was not satisfied.", "The condition specified using HTTP conditional header(s) is not met." }); } }
private void InsertAndDeleteBatchWithNEntities(int n) { TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); for (int m = 0; m < n; m++) { batch.Insert(GenerateRandomEnitity(pk)); } IList<TableResult> results = currentTable.ExecuteBatch(batch); TableBatchOperation delBatch = new TableBatchOperation(); foreach (TableResult res in results) { delBatch.Delete((ITableEntity)res.Result); Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.Created); } IList<TableResult> delResults = currentTable.ExecuteBatch(delBatch); foreach (TableResult res in delResults) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); } }
private async Task InsertAndDeleteBatchWithNEntities(int n, TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); for (int m = 0; m < n; m++) { batch.Insert(GenerateRandomEntity(pk)); } IList<TableResult> results = await currentTable.ExecuteBatchAsync(batch); TableBatchOperation delBatch = new TableBatchOperation(); foreach (TableResult res in results) { delBatch.Delete((ITableEntity)res.Result); Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.Created); } IList<TableResult> delResults = await currentTable.ExecuteBatchAsync(delBatch); foreach (TableResult res in delResults) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); } }
public void TableBatchDeleteAPM() { CloudTableClient tableClient = GenerateCloudTableClient(); string pk = Guid.NewGuid().ToString(); // Add insert DynamicTableEntity ent = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(ent)); TableBatchOperation batch = new TableBatchOperation(); // Add delete batch.Delete(ent); // success IList<TableResult> results = null; using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); results = currentTable.EndExecuteBatch(asyncRes); } Assert.AreEqual(results.Count, 1); Assert.AreEqual(results.First().HttpStatusCode, (int)HttpStatusCode.NoContent); // fail - not found OperationContext opContext = new OperationContext(); try { using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, null, opContext, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); currentTable.EndExecuteBatch(asyncRes); } Assert.Fail(); } catch (StorageException) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.NotFound, new string[] { "ResourceNotFound" }, "The specified resource does not exist."); } }
public static async Task <String> updateUser( [HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "update-User/{act}/{name}")] HttpRequestMessage request, [Table("Users")] CloudTable cloudTable, string name, string act, ILogger log) { Console.Out.WriteLine("in updateUser"); //addition CloudTable table = null; CloudTableClient client = null; try { StorageCredentials creds = new StorageCredentials(Environment.GetEnvironmentVariable("accountName"), Environment.GetEnvironmentVariable("accountKey")); CloudStorageAccount account = new CloudStorageAccount(creds, useHttps: true); client = account.CreateCloudTableClient(); table = client.GetTableReference("Table00" + name); Console.WriteLine(table.Uri.ToString()); } catch (Exception ex) { Console.WriteLine(ex); } if (act.Equals("remove")) { Console.Out.WriteLine("in remove"); //delete table await table.DeleteIfExistsAsync(); //delete user from Users TableOperation retrieve = TableOperation.Retrieve <TableEntity>(name, ""); CloudTable usersTable = client.GetTableReference("Users"); await usersTable.CreateIfNotExistsAsync(); TableResult result = await usersTable.ExecuteAsync(retrieve); var deleteEntity = (TableEntity)result.Result; TableOperation delete = TableOperation.Delete(deleteEntity); await usersTable.ExecuteAsync(delete); //delete requests CloudTable requestTable = client.GetTableReference("Requests"); TableQuery <Request> idQuery = new TableQuery <Request>() .Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, name)); TableQuerySegment <Request> queryResult = await requestTable.ExecuteQuerySegmentedAsync(idQuery, null); var batchOperation = new TableBatchOperation(); foreach (var e in queryResult.Results) { batchOperation.Delete((TableEntity)e); } if ((queryResult.Results).Count != 0) { await requestTable.ExecuteBatchAsync(batchOperation); } return(act + " " + name); } else if (act == "add") { Console.Out.WriteLine("in add"); await table.CreateIfNotExistsAsync(); CloudTable usersTable = client.GetTableReference("Users"); await usersTable.CreateIfNotExistsAsync(); User newUser = new User(); newUser.PartitionKey = name; newUser.RowKey = ""; newUser.Password = name; newUser.UserType = "user"; TableOperation add = TableOperation.InsertOrReplace(newUser); await usersTable.ExecuteAsync(add); return(act + " " + name); } return(act + " " + name + " error in action"); }
public void TableBatchInsertSync() { TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); for (int m = 0; m < 3; m++) { AddInsertToBatch(pk, batch); } // Add insert DynamicTableEntity ent = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(ent)); // Add delete batch.Delete(ent); IList<TableResult> results = currentTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 4); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); // delete Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
public ActionResult DeleteConfirmed(string tenantId, ServicePrincipalModel editedServicePrincipalList) { // Delete all rows for this servicePrincipal list, that is, // Subscriber rows as well as ServicePrincipal rows. // Therefore, no need to specify row key. var listRows = ServicePrincipalModel.GetAllFromStorage(tenantId); var batchOperation = new TableBatchOperation(); int itemsInBatch = 0; foreach (DynamicTableEntity listRow in listRows) { batchOperation.Delete(listRow); itemsInBatch++; if (itemsInBatch == 100) { StorageFactory.Instance.IpcAzureAppTenantStateTable.ExecuteBatch(batchOperation); itemsInBatch = 0; batchOperation = new TableBatchOperation(); } } if (itemsInBatch > 0) { StorageFactory.Instance.IpcAzureAppTenantStateTable.ExecuteBatch(batchOperation); } return RedirectToAction("Index"); }
private async Task DoTableBatchInsertAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); for (int m = 0; m < 3; m++) { AddInsertToBatch(pk, batch); } // Add insert DynamicTableEntity ent = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(ent)); // Add delete batch.Delete(ent); IList<TableResult> results = await currentTable.ExecuteBatchAsync(batch); Assert.AreEqual(results.Count, 4); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); // delete Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
private void InsertOrMergeBatchWithNEntities(int n) { string pk = Guid.NewGuid().ToString(); TableBatchOperation insertBatch = new TableBatchOperation(); TableBatchOperation mergeBatch = new TableBatchOperation(); TableBatchOperation delBatch = new TableBatchOperation(); for (int m = 0; m < n; m++) { insertBatch.InsertOrMerge(GenerateRandomEnitity(pk)); } IList<TableResult> results = this.repTable.ExecuteBatch(insertBatch); foreach (TableResult res in results) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); // update entity and add to merge batch DynamicReplicatedTableEntity ent = res.Result as DynamicReplicatedTableEntity; ent.Properties.Add("foo2", new EntityProperty("bar2")); mergeBatch.InsertOrMerge(ent); } // execute insertOrMerge batch, this time entities exist IList<TableResult> mergeResults = this.repTable.ExecuteBatch(mergeBatch); foreach (TableResult res in mergeResults) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); // Add to delete batch delBatch.Delete((ITableEntity)res.Result); } IList<TableResult> delResults = this.repTable.ExecuteBatch(delBatch); foreach (TableResult res in delResults) { Assert.AreEqual(res.HttpStatusCode, (int)HttpStatusCode.NoContent); } }
public async Task TableBatchOnSecondaryAsync() { AssertSecondaryEndpoint(); CloudTable table = GenerateCloudTableClient().GetTableReference(GenerateRandomTableName()); TableRequestOptions options = new TableRequestOptions() { LocationMode = LocationMode.SecondaryOnly, RetryPolicy = new NoRetry(), }; TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve("PartitionKey", "RowKey"); OperationContext context = new OperationContext(); await table.ExecuteBatchAsync(batch, options, context); Assert.AreEqual(StorageLocation.Secondary, context.LastResult.TargetLocation); batch = new TableBatchOperation(); batch.Insert(new DynamicTableEntity("PartitionKey", "RowKey")); StorageException e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.InsertOrMerge(new DynamicTableEntity("PartitionKey", "RowKey")); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.InsertOrReplace(new DynamicTableEntity("PartitionKey", "RowKey")); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.Merge(new DynamicTableEntity("PartitionKey", "RowKey") { ETag = "*" }); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.Replace(new DynamicTableEntity("PartitionKey", "RowKey") { ETag = "*" }); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.Delete(new DynamicTableEntity("PartitionKey", "RowKey") { ETag = "*" }); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); }
public void TableBatchAllSupportedOperationsSync() { TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEnitity(pk)); // delete { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = this.repTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); for (int i = 0; i < results.Count; i++) { enumerator.MoveNext(); Assert.AreEqual((int)HttpStatusCode.NoContent, enumerator.Current.HttpStatusCode, "HttpStatusCode mismatch i={0}", i); } }
private async Task DoTableBatchDeleteFailAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; ITableEntity ent = GenerateRandomEntity("foo"); // add entity await currentTable.ExecuteAsync(TableOperation.Insert(ent)); // update entity TableResult res = await currentTable.ExecuteAsync(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); DynamicTableEntity retrievedEnt = res.Result as DynamicTableEntity; retrievedEnt.Properties.Add("prop", new EntityProperty("var")); await currentTable.ExecuteAsync(TableOperation.Replace(retrievedEnt)); // Attempt to delete with stale etag TableBatchOperation batch = new TableBatchOperation(); batch.Delete(ent); OperationContext opContext = new OperationContext(); try { await currentTable.ExecuteBatchAsync(batch, null, opContext); Assert.Fail(); } catch (Exception) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.PreconditionFailed, new string[] { "UpdateConditionNotSatisfied", "ConditionNotMet" }, new string[] { "The update condition specified in the request was not satisfied.", "The condition specified using HTTP conditional header(s) is not met." }); } }
public void TableBatchDeleteSyncUsingExecuteQuery() { // Insert Entity Console.WriteLine("Calling Insert()..."); DynamicReplicatedTableEntity baseEntity = new DynamicReplicatedTableEntity("replace test", "foo"); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); this.repTable.Execute(TableOperation.Insert(baseEntity)); TableBatchOperation batch = new TableBatchOperation(); // Retrieve existing entities using TableQuery TableQuery<DynamicReplicatedTableEntity> query = new TableQuery<DynamicReplicatedTableEntity>().Where( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, baseEntity.PartitionKey)); IEnumerable<DynamicReplicatedTableEntity> allEntities = this.repTable.ExecuteQuery<DynamicReplicatedTableEntity>(query); foreach (DynamicReplicatedTableEntity entity in allEntities) { entity.ETag = entity._rtable_Version.ToString(); // Add delete batch.Delete(entity); } Console.WriteLine("Calling ExecuteBatch() to delete..."); IList<TableResult> results = this.repTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 1); Assert.AreEqual(results.First().HttpStatusCode, (int)HttpStatusCode.NoContent); }
private async Task DoTableBatchOperationsWithEmptyKeysAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; // Insert Entity DynamicTableEntity ent = new DynamicTableEntity() { PartitionKey = "", RowKey = "" }; ent.Properties.Add("foo2", new EntityProperty("bar2")); ent.Properties.Add("foo", new EntityProperty("bar")); TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity TableBatchOperation retrieveBatch = new TableBatchOperation(); retrieveBatch.Retrieve(ent.PartitionKey, ent.RowKey); TableResult result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); DynamicTableEntity retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"].StringValue, retrievedEntity.Properties["foo"].StringValue); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); Assert.AreEqual(ent.Properties["foo2"].StringValue, retrievedEntity.Properties["foo2"].StringValue); Assert.AreEqual(ent.Properties["foo2"], retrievedEntity.Properties["foo2"]); // InsertOrMerge DynamicTableEntity insertOrMergeEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrMergeEntity.Properties.Add("foo3", new EntityProperty("value")); batch = new TableBatchOperation(); batch.InsertOrMerge(insertOrMergeEntity); await currentTable.ExecuteBatchAsync(batch); result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(insertOrMergeEntity.Properties["foo3"], retrievedEntity.Properties["foo3"]); // InsertOrReplace DynamicTableEntity insertOrReplaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrReplaceEntity.Properties.Add("prop2", new EntityProperty("otherValue")); batch = new TableBatchOperation(); batch.InsertOrReplace(insertOrReplaceEntity); await currentTable.ExecuteBatchAsync(batch); result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(1, retrievedEntity.Properties.Count); Assert.AreEqual(insertOrReplaceEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); // Merge DynamicTableEntity mergeEntity = new DynamicTableEntity(retrievedEntity.PartitionKey, retrievedEntity.RowKey) { ETag = retrievedEntity.ETag }; mergeEntity.Properties.Add("mergeProp", new EntityProperty("merged")); batch = new TableBatchOperation(); batch.Merge(mergeEntity); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity & Verify Contents result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(mergeEntity.Properties["mergeProp"], retrievedEntity.Properties["mergeProp"]); // Replace DynamicTableEntity replaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey) { ETag = retrievedEntity.ETag }; replaceEntity.Properties.Add("replaceProp", new EntityProperty("replace")); batch = new TableBatchOperation(); batch.Replace(replaceEntity); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity & Verify Contents result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(replaceEntity.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(replaceEntity.Properties["replaceProp"], retrievedEntity.Properties["replaceProp"]); // Delete Entity batch = new TableBatchOperation(); batch.Delete(retrievedEntity); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); Assert.IsNull(result.Result); }
public void TableBatchOperationsWithEmptyKeys() { // Insert Entity DynamicReplicatedTableEntity ent = new DynamicReplicatedTableEntity() { PartitionKey = "", RowKey = "" }; ent.Properties.Add("foo2", new EntityProperty("bar2")); ent.Properties.Add("foo", new EntityProperty("bar")); TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); this.repTable.ExecuteBatch(batch); // Retrieve Entity TableBatchOperation retrieveBatch = new TableBatchOperation(); retrieveBatch.Retrieve <DynamicReplicatedTableEntity>(ent.PartitionKey, ent.RowKey); TableResult result = this.repTable.ExecuteBatch(retrieveBatch).First(); DynamicReplicatedTableEntity retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"].StringValue, retrievedEntity.Properties["foo"].StringValue); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); Assert.AreEqual(ent.Properties["foo2"].StringValue, retrievedEntity.Properties["foo2"].StringValue); Assert.AreEqual(ent.Properties["foo2"], retrievedEntity.Properties["foo2"]); // InsertOrMerge DynamicReplicatedTableEntity insertOrMergeEntity = new DynamicReplicatedTableEntity(ent.PartitionKey, ent.RowKey); insertOrMergeEntity.Properties.Add("foo3", new EntityProperty("value")); batch = new TableBatchOperation(); batch.InsertOrMerge(insertOrMergeEntity); this.repTable.ExecuteBatch(batch); result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(insertOrMergeEntity.Properties["foo3"], retrievedEntity.Properties["foo3"]); // InsertOrReplace DynamicReplicatedTableEntity insertOrReplaceEntity = new DynamicReplicatedTableEntity(ent.PartitionKey, ent.RowKey); insertOrReplaceEntity.Properties.Add("prop2", new EntityProperty("otherValue")); batch = new TableBatchOperation(); batch.InsertOrReplace(insertOrReplaceEntity); this.repTable.ExecuteBatch(batch); result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(1, retrievedEntity.Properties.Count); Assert.AreEqual(insertOrReplaceEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); // Merge DynamicReplicatedTableEntity mergeEntity = new DynamicReplicatedTableEntity(retrievedEntity.PartitionKey, retrievedEntity.RowKey) { ETag = retrievedEntity.ETag }; mergeEntity.Properties.Add("mergeProp", new EntityProperty("merged")); batch = new TableBatchOperation(); batch.Merge(mergeEntity); this.repTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(mergeEntity.Properties["mergeProp"], retrievedEntity.Properties["mergeProp"]); // Replace DynamicReplicatedTableEntity replaceEntity = new DynamicReplicatedTableEntity(ent.PartitionKey, ent.RowKey) { ETag = retrievedEntity.ETag }; replaceEntity.Properties.Add("replaceProp", new EntityProperty("replace")); batch = new TableBatchOperation(); batch.Replace(replaceEntity); this.repTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(replaceEntity.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(replaceEntity.Properties["replaceProp"], retrievedEntity.Properties["replaceProp"]); // Delete Entity batch = new TableBatchOperation(); batch.Delete(retrievedEntity); this.repTable.ExecuteBatch(batch); // Retrieve Entity result = this.repTable.ExecuteBatch(retrieveBatch).First(); Assert.IsNull(result.Result); }