private static void AddOperationToBatch(ref TableBatchOperation tableBatchOperation, TAzureTableEntity entity, string batchMethodName) { switch (batchMethodName) { case CtConstants.TableOpInsert: tableBatchOperation.Insert(entity); break; case CtConstants.TableOpInsertOrMerge: tableBatchOperation.InsertOrMerge(entity); break; case CtConstants.TableOpInsertOrReplace: tableBatchOperation.InsertOrReplace(entity); break; case CtConstants.TableOpMerge: tableBatchOperation.Merge(entity); break; case CtConstants.TableOpDelete: entity.ETag = "*"; tableBatchOperation.Delete(entity); break; case CtConstants.TableOpReplace: tableBatchOperation.Replace(entity); break; } }
private void AddOperationToBatch(ref TableBatchOperation tableBatchOperation, TAzureTableEntity entity, SaveType batchMethodName) { switch (batchMethodName) { case SaveType.Insert: tableBatchOperation.Insert(entity); break; case SaveType.InsertOrMerge: tableBatchOperation.InsertOrMerge(entity); break; case SaveType.InsertOrReplace: tableBatchOperation.InsertOrReplace(entity); break; case SaveType.Merge: tableBatchOperation.Merge(entity); break; case SaveType.Delete: tableBatchOperation.Delete(entity); break; case SaveType.Replace: tableBatchOperation.Replace(entity); break; } }
public void TableBatchMergeSync() { // Insert Entity DynamicReplicatedTableEntity baseEntity = new DynamicReplicatedTableEntity("merge test", "foo"); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); this.repTable.Execute(TableOperation.Insert(baseEntity)); DynamicReplicatedTableEntity mergeEntity = new DynamicReplicatedTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = baseEntity.ETag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); this.repTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents TableResult result = this.repTable.Execute(TableOperation.Retrieve <DynamicReplicatedTableEntity>(baseEntity.PartitionKey, baseEntity.RowKey)); DynamicReplicatedTableEntity retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(2, retrievedEntity.Properties.Count); Assert.AreEqual(baseEntity.Properties["prop1"], retrievedEntity.Properties["prop1"]); Assert.AreEqual(mergeEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); }
private async Task ConsolidateOrder(Guild guild) { if (guild.RoleAssignations == null || !guild.RoleAssignations.Any()) { await guild.LoadChildrens(g => g.RoleAssignations); } if (!guild.RoleAssignations.Any()) { this._telemetry.TrackEvent("No roles to reorder, finishing"); return; } var lastOrder = 0; var batch = new TableBatchOperation(); foreach (var roleAssignation in guild.RoleAssignations.OrderBy(ra => ra.Order).ToList()) { roleAssignation.Order = lastOrder; lastOrder += 1; batch.Merge(roleAssignation); } if (batch.Any()) { var bindingsTable = GetTable <RoleAssignation>(); await bindingsTable.ExecuteBatchAsync(batch); } }
/// <summary> /// Performs the actual UPDATE operation /// </summary> /// <typeparam name="T">Type of business object</typeparam> /// <param name="objects">Collection of objects</param> /// <param name="setSoftDeleteFlag">If TRUE, then we update the IsDeleted flag</param> private ulong UpdateInternal <T>(IEnumerable <T> objects, bool setSoftDeleteFlag = false) where T : class { if (objects != null) { TableBatchOperation update = new TableBatchOperation(); foreach (T instance in objects) { AzureTableEntity entity = AzureTableEntity.From(instance); if (setSoftDeleteFlag) { entity.AddOrUpdateProperty(AzureTableEntity.PROPERTY_NAME_ISDELETED, true); } update.Merge(entity); } ulong count = (ulong)update.Count; if (count > 0) { ExecuteNonQuery <T>(update); return(count); } } return(0L); }
public void Merge(string partitionKey, string rowKey, string etag, string propertyName, object propertyValue) { Require.NotEmpty(partitionKey, "partitionKey"); Require.NotEmpty(rowKey, "rowKey"); Require.NotEmpty(etag, "etag"); Require.NotEmpty(propertyName, "propertyName"); AssertBatchSizeIsNotExceeded(); var entity = m_tableEntityConverter.CreateDynamicTableEntityFromProperties(propertyName, propertyValue); entity.PartitionKey = partitionKey; entity.RowKey = rowKey; entity.ETag = etag; m_batch.Merge(entity); }
/// <summary> /// Update or Insert Configuration values /// </summary> /// <param name="configs"></param> public bool UpdateEntityProperty(string configs) { try { // Retrieve a reference to the table. CloudTable table = GetTable(); TableBatchOperation batchOperation = new TableBatchOperation(); Dictionary <string, Dictionary <string, string> > allValues = new Dictionary <string, Dictionary <string, string> >(); Dictionary <string, string> keyValues = new Dictionary <string, string>(); Dictionary <string, EntityProperty> newProperties = new Dictionary <string, EntityProperty>(); //add all the configsGroups and their key value pairs to a dictionary allValues = JsonConvert.DeserializeObject <Dictionary <string, Dictionary <string, string> > >(configs); var batch = new TableBatchOperation(); TableQuery <DynamicTableEntity> queryFinal = new TableQuery <DynamicTableEntity>(); foreach (KeyValuePair <string, Dictionary <string, string> > entry in allValues) { foreach (KeyValuePair <string, string> keyValue in entry.Value) { TableQuery <DynamicTableEntity> entityQuery = new TableQuery <DynamicTableEntity>().Where( TableQuery.CombineFilters( TableQuery.GenerateFilterCondition(configSettings.ConfigGroup, QueryComparisons.Equal.ToLower(), entry.Key), TableOperators.And, TableQuery.GenerateFilterCondition(configSettings.Key, QueryComparisons.Equal, keyValue.Key))); IEnumerable <DynamicTableEntity> queryResult = table.ExecuteQuery(entityQuery); if (queryResult.GetEnumerator().MoveNext()) { foreach (DynamicTableEntity entity in queryResult) { entity.Properties[configSettings.ConfigGroup].StringValue = entry.Key; entity.Properties[configSettings.Key].StringValue = keyValue.Key; entity.Properties[configSettings.Value].StringValue = keyValue.Value; batchOperation.Merge(entity); } } else { DynamicTableEntity config = CreateEntity(entry, keyValue); batchOperation.InsertOrReplace(config); } } } table.ExecuteBatch(batchOperation); return(true); } catch (Exception ex) { throw; } }
public async Task StoreAsync <T>(nStoreOperation storageOperationType, IEnumerable <T> models) where T : new() { try { // Retrieve a reference to the table. CloudTable table = GetTableReference(GetTableName <T>()); // Create the batch operation. TableBatchOperation batchOperation = new TableBatchOperation(); // lookup the entitymapper var entityMapper = _entityMapperRegistry[typeof(T)]; // Add all items foreach (var model in models) { switch (storageOperationType) { case nStoreOperation.insertOperation: batchOperation.Insert(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.insertOrReplaceOperation: batchOperation.InsertOrReplace(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOperation: batchOperation.Merge(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOrInserOperation: batchOperation.InsertOrMerge(new DynamicTableEntity <T>(model, entityMapper)); break; } } // execute await table.ExecuteBatchAsync(batchOperation); } catch (StorageException ex) { // check the exception if (!_autoCreateTable || !ex.Message.StartsWith("0:The table specified does not exist", StringComparison.CurrentCulture)) { throw ex; } // try to create the table await CreateTableAsync <T>(); // retry await StoreAsync <T>(storageOperationType, models); } }
public void TableBatchAllSupportedOperationsSync() { TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEnitity(pk)); // delete { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList <TableResult> results = this.repTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 6); IEnumerator <TableResult> enumerator = results.GetEnumerator(); for (int i = 0; i < results.Count; i++) { enumerator.MoveNext(); Assert.AreEqual((int)HttpStatusCode.NoContent, enumerator.Current.HttpStatusCode, "HttpStatusCode mismatch i={0}", i); } }
public void MergeBatch(IEnumerable <T> objs) { List <List <T> > chunks = GetChunks(objs); foreach (var chunk in chunks) { var batchOperation = new TableBatchOperation(); foreach (var obj in chunk) { batchOperation.Merge(obj); } Table.ExecuteBatch(batchOperation); } }
public IList <TableResult> MergeEntities(string tableName, IList <StorageEntity> entities) { SetProperties(entities); CloudTable table = _storage.GetTableReference(tableName); table.CreateIfNotExists(); TableBatchOperation operation = new TableBatchOperation(); foreach (StorageEntity entity in entities) { operation.Merge(entity); } return(table.ExecuteBatch(operation)); }
public static IEnumerable <TableBatchInformation <T> > MergeAll <T>(this IEnumerable <IEnumerable <T> > batches) where T : ITableEntity { foreach (var batch in batches.Select(b => b.ToArray())) { var operation = new TableBatchOperation(); foreach (var instance in batch) { operation.Merge(instance); } yield return(new TableBatchInformation <T>(operation, batch, TableOperationType.Merge)); } }
/// <summary> /// Update Stickers Async /// </summary> /// <param name="stickers">List of Sticker</param> /// <returns>Task</returns> public async Task UpdateStickersAsync(IList <Sticker> stickers) { await this.EnsureInitializedAsync(); int index = 0; var batchOperation = new TableBatchOperation(); foreach (var sticker in stickers) { sticker.Index = index++; // Set the indexes var dto = new AzureTableSticker(sticker); batchOperation.Merge(dto); } await this.cloudTable.ExecuteBatchAsync(batchOperation); }
// These two should do the same thing. Which do we prefer? /* * Task<object> ITablesMachineAnnotation.AnnotateLastOutgoingCallAsync(MirrorTableCall referenceCall) * { * if (referenceCall == null) * return Task.FromResult((object)null); * return referenceCall(referenceTable); * } */ async Task <object> ITablesMachineAnnotation.AnnotateLastBackendCallAsync( MirrorTableCall referenceCall, IList <SpuriousETagChange> spuriousETagChanges) { if (spuriousETagChanges == null) { spuriousETagChanges = new List <SpuriousETagChange>(); } if (referenceCall == null) { if (spuriousETagChanges.Count > 0) { var batch = new TableBatchOperation(); var originalResponse = new List <TableResult>(); foreach (SpuriousETagChange change in spuriousETagChanges) { batch.Merge(new DynamicTableEntity { PartitionKey = change.partitionKey, RowKey = change.rowKey, ETag = ChainTable2Constants.ETAG_ANY }); originalResponse.Add(new TableResult { Etag = change.newETag }); } try { await referenceTable.ExecuteMirrorBatchAsync(batch, originalResponse, null, null); } catch (StorageException ex) { // Make sure this doesn't get swallowed by a generic StorageException catch block. throw new InvalidOperationException("Invalid spurious ETag change annotation.", ex); } } return(null); } else { if (spuriousETagChanges.Count > 0) { throw new ArgumentException("spuriousETagChanges currently not allowed with a reference call"); } return(await referenceCall(referenceTable)); } }
async Task <IList <TableResult> > AttemptBatchOnOldTableAsync(TableBatchOperation batch, TableRequestOptions requestOptions, OperationContext operationContext) { string partitionKey = ChainTableUtils.GetBatchPartitionKey(batch); await TryMarkPartitionPopulatedAsync(partitionKey, requestOptions, operationContext); var oldBatch = new TableBatchOperation(); oldBatch.Merge(new DynamicTableEntity { PartitionKey = partitionKey, RowKey = ROW_KEY_PARTITION_POPULATED_ASSERTION, ETag = ChainTable2Constants.ETAG_ANY, }); // No AddRange? :( foreach (TableOperation op in batch) { oldBatch.Add(op); } IList <TableResult> oldResults; try { oldResults = await oldTable.ExecuteBatchAsync(oldBatch, requestOptions, operationContext); } catch (ChainTableBatchException ex) { if (ex.FailedOpIndex == 0) { // This must mean the partition is switched. await monitor.AnnotateLastBackendCallAsync(); return(null); } else { await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true); throw ChainTableUtils.GenerateBatchException(ex.GetHttpStatusCode(), ex.FailedOpIndex - 1); } } oldResults.RemoveAt(0); await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true, successfulBatchResult : oldResults); return(oldResults); }
private static void InsertOrUpdate(bool presentInTable, Person person) { if (presentInTable) { //if (await AZTableHandler.hasPersonUpdated(person)) //{ batch.Merge(person); _log.LogInformation(string.Format("{0}: Row ID: {1} {2} is updated", DateTime.Now, person.RowKey, person.name)); //} } else { batch.Insert(person); _log.LogInformation(string.Format("{0}: Row ID: {1} {2} is inserted", DateTime.Now, person.RowKey, person.name)); } }
public static async Task BatchAsync(string tableName, ITableEntity[] deleteEntities, ITableEntity[] insertEntities, ITableEntity[] mergeEntities) { if (((mergeEntities?.Length ?? 0) > 0) || ((insertEntities?.Length ?? 0) > 0) || ((deleteEntities?.Length ?? 0) > 0)) { CloudTable table = await TableStorageHelper.GetTableReferenceAsync(tableName); TableBatchOperation batchOperation = new TableBatchOperation(); if (deleteEntities != null) { for (int i = 0; i < deleteEntities.Length; i++) { if (deleteEntities[i] != null) { batchOperation.Delete(deleteEntities[i]); } } } if (insertEntities != null) { for (int i = 0; i < insertEntities.Length; i++) { if (insertEntities[i] != null) { batchOperation.Insert(insertEntities[i]); } } } if (mergeEntities != null) { for (int i = 0; i < mergeEntities.Length; i++) { if (mergeEntities[i] != null) { batchOperation.Merge(mergeEntities[i]); } } } await table.ExecuteBatchAsync(batchOperation); } }
static Task UpdatePostsInTable(List <PostQueueEntity> posts) { CloudTable table = GetTable(); int numGroups = (int)System.Math.Ceiling(posts.Count() / (float)MAX_OPERATIONS_PER_BATCH); var updateBatchRequests = posts .Select((post, index) => new { Post = post, Id = index }) .GroupBy(indexedPost => indexedPost.Id % numGroups) .Select(updatableGroup => { TableBatchOperation updateOperation = new TableBatchOperation(); updatableGroup .Select(indexedPost => indexedPost.Post) .ToList() .ForEach(post => updateOperation.Merge(post)); return(table.ExecuteBatchAsync(updateOperation)); }); return(Task.WhenAll(updateBatchRequests)); }
/// <inheritdoc/> public Task UpdateMeetingNotificationItemEntities(IList <MeetingNotificationItemEntity> meetingNotificationItemEntities) { if (meetingNotificationItemEntities is null || meetingNotificationItemEntities.Count == 0) { throw new System.ArgumentNullException(nameof(meetingNotificationItemEntities)); } this.logger.TraceInformation($"Started {nameof(this.UpdateEmailNotificationItemEntities)} method of {nameof(TableStorageEmailRepository)}."); TableBatchOperation batchOperation = new TableBatchOperation(); foreach (var item in meetingNotificationItemEntities) { batchOperation.Merge(item.ConvertToMeetingNotificationItemTableEntity()); } Task.WaitAll(this.meetingHistoryTable.ExecuteBatchAsync(batchOperation)); this.logger.TraceInformation($"Finished {nameof(this.UpdateEmailNotificationItemEntities)} method of {nameof(TableStorageEmailRepository)}."); return(Task.FromResult(true)); }
/// <summary> /// Updates the entity batch async. /// </summary> /// <typeparam name="T"></typeparam> /// <param name="entities">The entities.</param> /// <param name="entitiesToAdd">The entities to add.</param> /// <param name="entitiesToDelete">The entities to delete.</param> /// <param name="replace">if set to <c>true</c> [force].</param> /// <returns>Task{OperationResult}.</returns> public async Task <OperationResult> UpdateEntityBatchAsync <T>( IList <T> entities, IList <T> entitiesToAdd = null, IList <T> entitiesToDelete = null, bool replace = true) where T : TableEntity { var batchOperation = new TableBatchOperation(); foreach (var entity in entities) { if (replace) { batchOperation.Replace(entity); } else { batchOperation.Merge(entity); } } return(await CompleteUpdateDeleteBatchWithRestOperationsAndStart(batchOperation, entitiesToAdd, entitiesToDelete)); }
public static void Execute <T>(this TableBatchOperation operation, TableOperationType type, T entity) where T : ITableEntity { switch (type) { case TableOperationType.Insert: operation.Insert(entity); break; case TableOperationType.Delete: operation.Delete(entity); break; case TableOperationType.Replace: operation.Replace(entity); break; case TableOperationType.Merge: operation.Merge(entity); break; case TableOperationType.InsertOrReplace: operation.InsertOrReplace(entity); break; case TableOperationType.InsertOrMerge: operation.InsertOrMerge(entity); break; case TableOperationType.Retrieve: operation.Retrieve(entity.PartitionKey, entity.RowKey); break; default: throw new Exception($"Invalid enum value {nameof(TableOperationType)} {type}"); } }
private async Task DoEscapeTestAsync(string data, bool useBatch, bool includeKey) { DynamicTableEntity ent = new DynamicTableEntity(includeKey ? "temp" + data : "temp", Guid.NewGuid().ToString()); ent.Properties.Add("foo", new EntityProperty(data)); // Insert if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); await currentTable.ExecuteBatchAsync(batch); } else { await currentTable.ExecuteAsync(TableOperation.Insert(ent)); } // Retrieve TableResult res = null; if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (await currentTable.ExecuteBatchAsync(batch))[0]; } else { res = await currentTable.ExecuteAsync(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } // Check equality DynamicTableEntity retrievedEntity = res.Result as DynamicTableEntity; Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Merge ent.Properties.Add("foo2", new EntityProperty("bar2")); if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Merge(ent); await currentTable.ExecuteBatchAsync(batch); } else { await currentTable.ExecuteAsync(TableOperation.Merge(ent)); } // Retrieve if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (await currentTable.ExecuteBatchAsync(batch))[0]; } else { res = await currentTable.ExecuteAsync(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } retrievedEntity = res.Result as DynamicTableEntity; Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Replace ent.Properties.Remove("foo2"); ent.Properties.Add("foo3", new EntityProperty("bar3")); if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Replace(ent); await currentTable.ExecuteBatchAsync(batch); } else { await currentTable.ExecuteAsync(TableOperation.Replace(ent)); } // Retrieve if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (await currentTable.ExecuteBatchAsync(batch))[0]; } else { res = await currentTable.ExecuteAsync(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } retrievedEntity = res.Result as DynamicTableEntity; Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); }
async Task DoRandomAtomicCalls() { for (int callNum = 0; callNum < MigrationModel.NUM_CALLS_PER_MACHINE; callNum++) { SortedDictionary <PrimaryKey, DynamicTableEntity> dump = await peekProxy.DumpReferenceTableAsync(); if (PSharpRuntime.Nondeterministic()) { // Query var query = new TableQuery <DynamicTableEntity>(); query.FilterString = ChainTableUtils.CombineFilters( TableQuery.GenerateFilterCondition( TableConstants.PartitionKey, QueryComparisons.Equal, MigrationModel.SINGLE_PARTITION_KEY), TableOperators.And, NondeterministicUserPropertyFilterString()); await RunQueryAtomicAsync(query); } else { // Batch write int batchSize = PSharpRuntime.Nondeterministic() ? 2 : 1; var batch = new TableBatchOperation(); var rowKeyChoices = new List <string> { "0", "1", "2", "3", "4", "5" }; for (int opNum = 0; opNum < batchSize; opNum++) { int opTypeNum = PSharpNondeterminism.Choice(7); int rowKeyI = PSharpNondeterminism.Choice(rowKeyChoices.Count); string rowKey = rowKeyChoices[rowKeyI]; rowKeyChoices.RemoveAt(rowKeyI); // Avoid duplicate in same batch var primaryKey = new PrimaryKey(MigrationModel.SINGLE_PARTITION_KEY, rowKey); string eTag = null; if (opTypeNum >= 1 && opTypeNum <= 3) { DynamicTableEntity existingEntity; int etagTypeNum = PSharpNondeterminism.Choice( dump.TryGetValue(primaryKey, out existingEntity) ? 3 : 2); switch (etagTypeNum) { case 0: eTag = ChainTable2Constants.ETAG_ANY; break; case 1: eTag = "wrong"; break; case 2: eTag = existingEntity.ETag; break; } } DynamicTableEntity entity = new DynamicTableEntity { PartitionKey = MigrationModel.SINGLE_PARTITION_KEY, RowKey = rowKey, ETag = eTag, Properties = new Dictionary <string, EntityProperty> { // Give us something to see on merge. Might help with tracing too! { string.Format("{0}_c{1}_o{2}", machineId.ToString(), callNum, opNum), new EntityProperty(true) }, // Property with 50%/50% distribution for use in filters. { "isHappy", new EntityProperty(PSharpRuntime.Nondeterministic()) } } }; switch (opTypeNum) { case 0: batch.Insert(entity); break; case 1: batch.Replace(entity); break; case 2: batch.Merge(entity); break; case 3: batch.Delete(entity); break; case 4: batch.InsertOrReplace(entity); break; case 5: batch.InsertOrMerge(entity); break; case 6: entity.ETag = ChainTable2Constants.ETAG_DELETE_IF_EXISTS; batch.Delete(entity); break; } } await RunBatchAsync(batch); } } }
private async Task DoTableBatchMergeAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; // Insert Entity DynamicTableEntity baseEntity = new DynamicTableEntity("merge test", "foo" + format.ToString()); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); await currentTable.ExecuteAsync(TableOperation.Insert(baseEntity)); DynamicTableEntity mergeEntity = new DynamicTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = baseEntity.ETag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity & Verify Contents TableResult result = await currentTable.ExecuteAsync(TableOperation.Retrieve(baseEntity.PartitionKey, baseEntity.RowKey)); DynamicTableEntity retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(2, retrievedEntity.Properties.Count); Assert.AreEqual(baseEntity.Properties["prop1"], retrievedEntity.Properties["prop1"]); Assert.AreEqual(mergeEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); }
public void TableBatchMergeFailAPM() { CloudTableClient tableClient = GenerateCloudTableClient(); // Insert Entity DynamicTableEntity baseEntity = new DynamicTableEntity("merge test", "foo"); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); currentTable.Execute(TableOperation.Insert(baseEntity)); string staleEtag = baseEntity.ETag; // update entity to rev etag baseEntity.Properties["prop1"].StringValue = "updated value"; currentTable.Execute(TableOperation.Replace(baseEntity)); OperationContext opContext = new OperationContext(); try { // Attempt a merge with stale etag DynamicTableEntity mergeEntity = new DynamicTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = staleEtag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, null, opContext, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); currentTable.EndExecuteBatch(asyncRes); } Assert.Fail(); } catch (StorageException) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.PreconditionFailed, new string[] { "UpdateConditionNotSatisfied", "ConditionNotMet" }, new string[] { "The update condition specified in the request was not satisfied.", "The condition specified using HTTP conditional header(s) is not met." }); } // Delete Entity currentTable.Execute(TableOperation.Delete(baseEntity)); opContext = new OperationContext(); // try merging with deleted entity try { // Attempt a merge with stale etag DynamicTableEntity mergeEntity = new DynamicTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = baseEntity.ETag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, null, opContext, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); currentTable.EndExecuteBatch(asyncRes); } Assert.Fail(); } catch (StorageException) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.NotFound, new string[] { "ResourceNotFound" }, "The specified resource does not exist."); } }
public void TableBatchMergeSync() { CloudTableClient tableClient = GenerateCloudTableClient(); // Insert Entity DynamicTableEntity baseEntity = new DynamicTableEntity("merge test", "foo"); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); currentTable.Execute(TableOperation.Insert(baseEntity)); DynamicTableEntity mergeEntity = new DynamicTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = baseEntity.ETag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); currentTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents TableResult result = currentTable.Execute(TableOperation.Retrieve(baseEntity.PartitionKey, baseEntity.RowKey)); DynamicTableEntity retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(2, retrievedEntity.Properties.Count); Assert.AreEqual(baseEntity.Properties["prop1"], retrievedEntity.Properties["prop1"]); Assert.AreEqual(mergeEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); }
public void TableBatchAllSupportedOperationsAPM() { CloudTableClient tableClient = GenerateCloudTableClient(); TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEnitity(pk)); // delete { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = null; using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); results = currentTable.EndExecuteBatch(asyncRes); } Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
public static async Task ExecuteParallelBatchAsync(this CloudTable table, TableOperationType oType, IList <ITableEntity> entities) { var taskCount = 0; const int taskThreshold = 200; const int maxBatchSize = 100; var batchTasks = new List <Task <IList <TableResult> > >(); for (var i = 0; i < entities.Count; i += maxBatchSize) { taskCount++; var batchItems = entities.Skip(i) .Take(maxBatchSize) .ToList(); var batch = new TableBatchOperation(); switch (oType) { case TableOperationType.Insert: batchItems.ForEach(e => batch.Insert(e)); break; case TableOperationType.Delete: batchItems.ForEach(e => batch.Delete(e)); break; case TableOperationType.Replace: batchItems.ForEach(e => batch.Replace(e)); break; case TableOperationType.Merge: batchItems.ForEach(e => batch.Merge(e)); break; case TableOperationType.InsertOrReplace: batchItems.ForEach(e => batch.InsertOrReplace(e)); break; case TableOperationType.InsertOrMerge: batchItems.ForEach(e => batch.InsertOrMerge(e)); break; default: throw new ArgumentOutOfRangeException(nameof(oType), oType, null); } var task = table.ExecuteBatchAsync(batch); batchTasks.Add(task); if (taskCount < taskThreshold) { continue; } await Task.WhenAll(batchTasks); taskCount = 0; } await Task.WhenAll(batchTasks); }
async Task DoRandomAtomicCalls() { for (int callNum = 0; callNum < MigrationModel.NUM_CALLS_PER_MACHINE; callNum++) { TableCall originalCall; MirrorTableCall referenceCall; SortedDictionary <PrimaryKey, DynamicTableEntity> dump = await peekProxy.DumpReferenceTableAsync(); if (PSharpRuntime.Nondeterministic()) { // Query // XXX: Test the filtering? var query = new TableQuery <DynamicTableEntity>(); query.FilterString = TableQuery.GenerateFilterCondition( TableConstants.PartitionKey, QueryComparisons.Equal, MigrationModel.SINGLE_PARTITION_KEY); // async/await pair needed to upcast the return value to object. originalCall = async table => await table.ExecuteQueryAtomicAsync(query); referenceCall = async referenceTable => await referenceTable.ExecuteQueryAtomicAsync(query); Console.WriteLine("{0} starting query", machineId); } else { // Batch write int batchSize = PSharpRuntime.Nondeterministic() ? 2 : 1; var batch = new TableBatchOperation(); var rowKeyChoices = new List <string> { "0", "1", "2", "3", "4", "5" }; for (int opNum = 0; opNum < batchSize; opNum++) { int opTypeNum = PSharpNondeterminism.Choice(7); int rowKeyI = PSharpNondeterminism.Choice(rowKeyChoices.Count); string rowKey = rowKeyChoices[rowKeyI]; rowKeyChoices.RemoveAt(rowKeyI); // Avoid duplicate in same batch var primaryKey = new PrimaryKey(MigrationModel.SINGLE_PARTITION_KEY, rowKey); string eTag = null; if (opTypeNum >= 1 && opTypeNum <= 3) { DynamicTableEntity existingEntity; int etagTypeNum = PSharpNondeterminism.Choice( dump.TryGetValue(primaryKey, out existingEntity) ? 3 : 2); switch (etagTypeNum) { case 0: eTag = ChainTable2Constants.ETAG_ANY; break; case 1: eTag = "wrong"; break; case 2: eTag = existingEntity.ETag; break; } } DynamicTableEntity entity = new DynamicTableEntity { PartitionKey = MigrationModel.SINGLE_PARTITION_KEY, RowKey = rowKey, ETag = eTag, Properties = new Dictionary <string, EntityProperty> { // Give us something to see on merge. Might help with tracing too! { string.Format("{0}_c{1}_o{2}", machineId.ToString(), callNum, opNum), new EntityProperty(true) } } }; switch (opTypeNum) { case 0: batch.Insert(entity); break; case 1: batch.Replace(entity); break; case 2: batch.Merge(entity); break; case 3: batch.Delete(entity); break; case 4: batch.InsertOrReplace(entity); break; case 5: batch.InsertOrMerge(entity); break; case 6: entity.ETag = ChainTable2Constants.ETAG_DELETE_IF_EXISTS; batch.Delete(entity); break; } } TableBatchOperation batchCopy = ChainTableUtils.CopyBatch <DynamicTableEntity>(batch); originalCall = async table => await table.ExecuteBatchAsync(batch); referenceCall = async referenceTable => await referenceTable.ExecuteMirrorBatchAsync(batchCopy, successfulBatchResult); Console.WriteLine("{0} starting batch {1}", machineId, batch); } await RunCallAsync(originalCall, referenceCall); Console.WriteLine("{0} table call verified"); } }
private async Task DoTableBatchAllSupportedOperationsAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEntity(pk)); // delete { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicTableEntity entity = GenerateRandomEntity(pk); await currentTable.ExecuteAsync(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = await currentTable.ExecuteBatchAsync(batch); Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
/// <summary> /// Updates a column value in Apps Azure table. /// The column to be transformed is specified using the following extended properties /// Extended Properties /// columnName - Name of the column to be added /// columnType - Data type of the column. Only supported types right now are: int32, bool, and string /// rowKeyPrefixes - Rowkey prefixes of the rows in which the column update will be applied. This is optional and will identify the subset of rows to do this operation. /// partitionKeyOwnerValueRule - The updates are specified using the partition key owner and the value for it in the ; separated key-value format. /// Extended Properties Example /// "columnName": "DisableHandleValidation", /// "columnType": "bool", /// "rowKeyPrefix": "ProfilesObject:" /// "partitionKeyOwnerValueRule": "Beihai=true;EndToEndTests=true" /// Activity Operation /// The activity iterates through all the rows from the input table with the matching rowKeyPrefixes, /// checks if the column is present, updates the column value if the partition key belongs to the app handles /// associated with the owner specified in partitionKeyOwnerValueRule /// </summary> /// <param name="linkedServices">Linked services referenced by activity definition.</param> /// <param name="datasets">Datasets referenced by activity definition.</param> /// <param name="activity">Activity definition.</param> /// <param name="logger">Used to log messages during activity execution.</param> /// <returns>Activity state at the end of execution</returns> public IDictionary <string, string> Execute( IEnumerable <LinkedService> linkedServices, IEnumerable <Dataset> datasets, Activity activity, IActivityLogger logger) { DotNetActivity dotNetActivity = (DotNetActivity)activity.TypeProperties; IDictionary <string, string> extendedProperties = dotNetActivity.ExtendedProperties; logger.Write("Logging extended properties if any..."); foreach (KeyValuePair <string, string> entry in extendedProperties) { logger.Write("<key:{0}> <value:{1}>", entry.Key, entry.Value); } if (!extendedProperties.ContainsKey("columnName")) { throw new ArgumentException("Column name is required", "columnName"); } string columnName = extendedProperties["columnName"]; if (!extendedProperties.ContainsKey("columnType")) { throw new ArgumentException("Column Type information is required", "columnType"); } string columnType = extendedProperties["columnType"]; // Note that partitionKeyOwnerValueRule is required as the rules for updating value comes from it // We do not update column value with default value if the matching rule is not found. The record is ignored. All rules need to be explicitly specified if (!extendedProperties.ContainsKey("partitionKeyOwnerValueRule")) { throw new ArgumentException("PartitionKeyOwnerValueRule information is required", "partitionKeyOwnerValueRule"); } string partitionKeyOwnerValueRule = extendedProperties["partitionKeyOwnerValueRule"]; string[] rowKeyPrefixes = null; if (extendedProperties.ContainsKey("rowKeyPrefixes")) { rowKeyPrefixes = extendedProperties["rowKeyPrefixes"].Split(','); } var partitionKeyOwnerValueRuleDict = partitionKeyOwnerValueRule.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries) .Select(part => part.Split('=')) .ToDictionary(split => split[0], split => split[1]); var appHandles = ownerAppHandles.Where(item => partitionKeyOwnerValueRuleDict.ContainsKey(item.Key)).SelectMany(item => item.Value).ToList(); logger.Write("Matching appHandles:{0}", string.Join(",", appHandles)); AzureStorageLinkedService inputLinkedService; AzureTableDataset sourceTable; // For activities working on a single dataset, the first entry is the input dataset. // The activity.Inputs can have multiple datasets for building pipeline workflow dependencies. We can ignore the rest of the datasets Dataset inputDataset = datasets.Single(dataset => dataset.Name == activity.Inputs.First().Name); sourceTable = inputDataset.Properties.TypeProperties as AzureTableDataset; logger.Write("input table:{0}", sourceTable.TableName); inputLinkedService = linkedServices.First( ls => ls.Name == inputDataset.Properties.LinkedServiceName).Properties.TypeProperties as AzureStorageLinkedService; string inputConnectionString = inputLinkedService.ConnectionString; // create storage client for input. Pass the connection string. CloudStorageAccount inputStorageAccount = CloudStorageAccount.Parse(inputConnectionString); CloudTableClient inputTableClient = inputStorageAccount.CreateCloudTableClient(); CloudTable inputTable = inputTableClient.GetTableReference(sourceTable.TableName); long totalProcessedRecords = 0; long actualAffectedRecords = 0; TableContinuationToken tableContinuationToken = null; List <Task> tasks = new List <Task>(); do { var resultSegment = inputTable.ExecuteQuerySegmented(new TableQuery(), tableContinuationToken); tableContinuationToken = resultSegment.ContinuationToken; var partitionGroups = (from s in resultSegment.Results where (rowKeyPrefixes == null || rowKeyPrefixes.Length <= 0) ? true : this.IsMatch(s.RowKey, rowKeyPrefixes) select s).GroupBy(a => a.PartitionKey); foreach (IGrouping <string, DynamicTableEntity> g in partitionGroups) { TableBatchOperation batch = new TableBatchOperation(); foreach (DynamicTableEntity e in g.AsEnumerable()) { // If appHandles do not contain the partition key, Continue if (!appHandles.Contains(e.PartitionKey)) { continue; } else { // Pick the value to be used for specified AppHandle // This is done by getting the owber key first from e.PartitionKey var ownerKey = ownerAppHandles.FirstOrDefault(x => x.Value.Contains(e.PartitionKey)).Key; // The owner key is used to pick the value for the column string newColumnValue = partitionKeyOwnerValueRuleDict[ownerKey]; if (this.ReplaceColumnValue(e, columnName, columnType, newColumnValue)) { batch.Merge(e); logger.Write("<partition key:{0}>, <row key:{1}>", e.PartitionKey, e.RowKey); } } } if (batch.Count > 0) { tasks.Add(inputTable.ExecuteBatchInChunkAsync(batch)); actualAffectedRecords += batch.Count; } logger.Write("Updated partition: {0}", g.Key); } totalProcessedRecords += resultSegment.Results.Count; logger.Write("Processed records count: {0}", totalProcessedRecords); logger.Write("Affected records count: {0}", actualAffectedRecords); }while (tableContinuationToken != null); Task.WaitAll(tasks.ToArray()); logger.Write("Updated {0} records", actualAffectedRecords); return(new Dictionary <string, string>()); }
/// <summary> /// Adds a column to an Azure table. /// The column to be added is specified using the following extended properties /// Extended Properties /// columnName - Name of the column to be added /// type - Data type of the column. Only supported types right now are: int32, bool, and string /// defaultValue - Default value of the column. This is optional and will default to type's default value. /// rowKeyPrefix - Rowkey prefix of the row in which the column will be added. This is optional and will identify the subset of rows to do this operation. /// columnName and type are mandatory. /// Extended Properties Example /// "columnName": "DisableHandleValidation", /// "type": "bool", /// "defaultValue": "False", /// "rowKeyPrefix": "ProfilesObject:" /// Activity Operation /// The activity iterates through all the rows from the input table with the matching rowKeyPrefix, /// checks for the column, adds it if the column is not found and runs a merge table operation to merge the contents of /// modified row/entity with an existing row/entity in the table. /// </summary> /// <param name="linkedServices">Linked services referenced by activity definition.</param> /// <param name="datasets">Datasets referenced by activity definition.</param> /// <param name="activity">Activity definition.</param> /// <param name="logger">Used to log messages during activity execution.</param> /// <returns>Activity state at the end of execution</returns> public IDictionary <string, string> Execute( IEnumerable <LinkedService> linkedServices, IEnumerable <Dataset> datasets, Activity activity, IActivityLogger logger) { DotNetActivity dotNetActivity = (DotNetActivity)activity.TypeProperties; IDictionary <string, string> extendedProperties = dotNetActivity.ExtendedProperties; logger.Write("Logging extended properties if any..."); foreach (KeyValuePair <string, string> entry in extendedProperties) { logger.Write("<key:{0}> <value:{1}>", entry.Key, entry.Value); } if (!extendedProperties.ContainsKey("columnName")) { throw new ArgumentException("Column name is required", "columnName"); } string columnName = extendedProperties["columnName"]; if (!extendedProperties.ContainsKey("type")) { throw new ArgumentException("Type information is required", "type"); } string type = extendedProperties["type"]; string defaultValueStr = null; if (extendedProperties.ContainsKey("defaultValue")) { defaultValueStr = extendedProperties["defaultValue"]; } string rowKeyPrefix = string.Empty; if (extendedProperties.ContainsKey("rowKeyPrefix")) { rowKeyPrefix = extendedProperties["rowKeyPrefix"]; } AzureStorageLinkedService inputLinkedService; AzureTableDataset sourceTable; // For activities working on a single dataset, the first entry is the input dataset. // The activity.Inputs can have multiple datasets for building pipeline workflow dependencies. We can ignore the rest of the datasets Dataset inputDataset = datasets.Single(dataset => dataset.Name == activity.Inputs.First().Name); sourceTable = inputDataset.Properties.TypeProperties as AzureTableDataset; logger.Write("input table:{0}", sourceTable.TableName); inputLinkedService = linkedServices.First( ls => ls.Name == inputDataset.Properties.LinkedServiceName).Properties.TypeProperties as AzureStorageLinkedService; string inputConnectionString = inputLinkedService.ConnectionString; // create storage client for input. Pass the connection string. CloudStorageAccount inputStorageAccount = CloudStorageAccount.Parse(inputConnectionString); CloudTableClient inputTableClient = inputStorageAccount.CreateCloudTableClient(); CloudTable inputTable = inputTableClient.GetTableReference(sourceTable.TableName); EntityProperty columnValue = this.GetEntityProperty(type, defaultValueStr); long totalProcessedRecords = 0; long actualAffectedRecords = 0; TableContinuationToken tableContinuationToken = null; List <Task> tasks = new List <Task>(); do { var resultSegment = inputTable.ExecuteQuerySegmented(new TableQuery(), tableContinuationToken); tableContinuationToken = resultSegment.ContinuationToken; var partitionGroups = (from s in resultSegment.Results where string.IsNullOrWhiteSpace(rowKeyPrefix) ? true : s.RowKey.StartsWith(rowKeyPrefix) select s).GroupBy(a => a.PartitionKey); foreach (IGrouping <string, DynamicTableEntity> g in partitionGroups) { // Create a new batch for every partition group within the resultSegment TableBatchOperation batch = new TableBatchOperation(); foreach (DynamicTableEntity e in g.AsEnumerable()) { // If the columnName does not exist in the properties, then only Add it if (!e.Properties.ContainsKey(columnName)) { e.Properties.Add(columnName, columnValue); batch.Merge(e); logger.Write("<partition key:{0}>, <row key:{1}>", e.PartitionKey, e.RowKey); } } if (batch.Count > 0) { // ExecuteBatchInChunkAsync is an extension method to chunk and process 100 operations in a batch tasks.Add(inputTable.ExecuteBatchInChunkAsync(batch)); actualAffectedRecords += batch.Count; } } totalProcessedRecords += resultSegment.Results.Count; logger.Write("Processed records count: {0}", totalProcessedRecords); logger.Write("Affected records count: {0}", actualAffectedRecords); }while (tableContinuationToken != null); // The batch operations complete when Task.WaitAll completes // TODO : Add ContinueWith on ExecuteBatchAsync for tracing of each batch operation as it completes Task.WaitAll(tasks.ToArray()); logger.Write("Added new column to {0} records", actualAffectedRecords); return(new Dictionary <string, string>()); }
public void TableBatchAllSupportedOperationsSync() { CloudTableClient tableClient = GenerateCloudTableClient(); TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEnitity(pk)); // delete { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicTableEntity entity = GenerateRandomEnitity(pk); currentTable.Execute(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = currentTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.Created); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); enumerator.MoveNext(); Assert.AreEqual(enumerator.Current.HttpStatusCode, (int)HttpStatusCode.NoContent); }
async Task DoRandomAtomicCalls() { for (int callNum = 0; callNum < MigrationModel.NUM_CALLS_PER_MACHINE; callNum++) { SortedDictionary<PrimaryKey, DynamicTableEntity> dump = await peekProxy.DumpReferenceTableAsync(); if (PSharpRuntime.Nondeterministic()) { // Query var query = new TableQuery<DynamicTableEntity>(); query.FilterString = ChainTableUtils.CombineFilters( TableQuery.GenerateFilterCondition( TableConstants.PartitionKey, QueryComparisons.Equal, MigrationModel.SINGLE_PARTITION_KEY), TableOperators.And, NondeterministicUserPropertyFilterString()); await RunQueryAtomicAsync(query); } else { // Batch write int batchSize = PSharpRuntime.Nondeterministic() ? 2 : 1; var batch = new TableBatchOperation(); var rowKeyChoices = new List<string> { "0", "1", "2", "3", "4", "5" }; for (int opNum = 0; opNum < batchSize; opNum++) { int opTypeNum = PSharpNondeterminism.Choice(7); int rowKeyI = PSharpNondeterminism.Choice(rowKeyChoices.Count); string rowKey = rowKeyChoices[rowKeyI]; rowKeyChoices.RemoveAt(rowKeyI); // Avoid duplicate in same batch var primaryKey = new PrimaryKey(MigrationModel.SINGLE_PARTITION_KEY, rowKey); string eTag = null; if (opTypeNum >= 1 && opTypeNum <= 3) { DynamicTableEntity existingEntity; int etagTypeNum = PSharpNondeterminism.Choice( dump.TryGetValue(primaryKey, out existingEntity) ? 3 : 2); switch (etagTypeNum) { case 0: eTag = ChainTable2Constants.ETAG_ANY; break; case 1: eTag = "wrong"; break; case 2: eTag = existingEntity.ETag; break; } } DynamicTableEntity entity = new DynamicTableEntity { PartitionKey = MigrationModel.SINGLE_PARTITION_KEY, RowKey = rowKey, ETag = eTag, Properties = new Dictionary<string, EntityProperty> { // Give us something to see on merge. Might help with tracing too! { string.Format("{0}_c{1}_o{2}", machineId.ToString(), callNum, opNum), new EntityProperty(true) }, // Property with 50%/50% distribution for use in filters. { "isHappy", new EntityProperty(PSharpRuntime.Nondeterministic()) } } }; switch (opTypeNum) { case 0: batch.Insert(entity); break; case 1: batch.Replace(entity); break; case 2: batch.Merge(entity); break; case 3: batch.Delete(entity); break; case 4: batch.InsertOrReplace(entity); break; case 5: batch.InsertOrMerge(entity); break; case 6: entity.ETag = ChainTable2Constants.ETAG_DELETE_IF_EXISTS; batch.Delete(entity); break; } } await RunBatchAsync(batch); } } }
public void TableBatchOperationsWithEmptyKeys() { CloudTableClient tableClient = GenerateCloudTableClient(); // Insert Entity DynamicTableEntity ent = new DynamicTableEntity() { PartitionKey = "", RowKey = "" }; ent.Properties.Add("foo2", new EntityProperty("bar2")); ent.Properties.Add("foo", new EntityProperty("bar")); TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); currentTable.ExecuteBatch(batch); // Retrieve Entity TableBatchOperation retrieveBatch = new TableBatchOperation(); retrieveBatch.Retrieve(ent.PartitionKey, ent.RowKey); TableResult result = currentTable.ExecuteBatch(retrieveBatch).First(); DynamicTableEntity retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"].StringValue, retrievedEntity.Properties["foo"].StringValue); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); Assert.AreEqual(ent.Properties["foo2"].StringValue, retrievedEntity.Properties["foo2"].StringValue); Assert.AreEqual(ent.Properties["foo2"], retrievedEntity.Properties["foo2"]); // InsertOrMerge DynamicTableEntity insertOrMergeEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrMergeEntity.Properties.Add("foo3", new EntityProperty("value")); batch = new TableBatchOperation(); batch.InsertOrMerge(insertOrMergeEntity); currentTable.ExecuteBatch(batch); result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(insertOrMergeEntity.Properties["foo3"], retrievedEntity.Properties["foo3"]); // InsertOrReplace DynamicTableEntity insertOrReplaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrReplaceEntity.Properties.Add("prop2", new EntityProperty("otherValue")); batch = new TableBatchOperation(); batch.InsertOrReplace(insertOrReplaceEntity); currentTable.ExecuteBatch(batch); result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(1, retrievedEntity.Properties.Count); Assert.AreEqual(insertOrReplaceEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); // Merge DynamicTableEntity mergeEntity = new DynamicTableEntity(retrievedEntity.PartitionKey, retrievedEntity.RowKey) { ETag = retrievedEntity.ETag }; mergeEntity.Properties.Add("mergeProp", new EntityProperty("merged")); batch = new TableBatchOperation(); batch.Merge(mergeEntity); currentTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(mergeEntity.Properties["mergeProp"], retrievedEntity.Properties["mergeProp"]); // Replace DynamicTableEntity replaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey) { ETag = retrievedEntity.ETag }; replaceEntity.Properties.Add("replaceProp", new EntityProperty("replace")); batch = new TableBatchOperation(); batch.Replace(replaceEntity); currentTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = currentTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(replaceEntity.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(replaceEntity.Properties["replaceProp"], retrievedEntity.Properties["replaceProp"]); // Delete Entity batch = new TableBatchOperation(); batch.Delete(retrievedEntity); currentTable.ExecuteBatch(batch); // Retrieve Entity result = currentTable.ExecuteBatch(retrieveBatch).First(); Assert.IsNull(result.Result); }
public static async Task <HttpResponseMessage> Run( [HttpTrigger(AuthorizationLevel.Admin, "put", Route = "samples/{id}")] HttpRequestMessage req, [Table(Constants.SamplesTableName)] CloudTable table, string id, TraceWriter log, CancellationToken ct) { var clientIp = req.GetClientIp(); var saveRequest = JsonConvert.DeserializeObject <SampleSaveRequest>(await req.Content.ReadAsStringAsync()); var existingQuery = table .CreateQuery <Sample>() .Where(smpl => smpl.PartitionKey.Equals(nameof(Sample)) && smpl.RowKey.Equals(id)) .AsTableQuery(); var existing = (await table.ExecuteQuery(existingQuery, ct)).FirstOrDefault(); var sample = existing ?? new Sample(id); var exists = existing != null; if (saveRequest == null) { return(req.CreateErrorResponse(HttpStatusCode.BadRequest, "Request payload required.")); } if (!exists && (saveRequest.Xaml == null || saveRequest.Xaml.Length < 5)) { return(req.CreateErrorResponse(HttpStatusCode.BadRequest, "Xaml required.")); } if (saveRequest.Xaml != null && saveRequest.Xaml.Length > 512 * 1024) { return(req.CreateErrorResponse(HttpStatusCode.RequestEntityTooLarge, "Xaml too big.")); } if (saveRequest.Data != null && saveRequest.Data.Length > 16 * 1024) { return(req.CreateErrorResponse(HttpStatusCode.RequestEntityTooLarge, "Data too big.")); } if (saveRequest.Title != null && saveRequest.Title.Length > 255) { return(req.CreateErrorResponse(HttpStatusCode.RequestEntityTooLarge, "Title too big.")); } if (saveRequest.App != null && saveRequest.App.Length > 255) { return(req.CreateErrorResponse(HttpStatusCode.RequestEntityTooLarge, "App name too big.")); } if (!string.IsNullOrWhiteSpace(saveRequest.Data)) { sample.Data = saveRequest.Data; } if (!string.IsNullOrWhiteSpace(saveRequest.Xaml)) { sample.Xaml = saveRequest.Xaml; } if (string.IsNullOrWhiteSpace(sample.Category) || !string.IsNullOrWhiteSpace(saveRequest.Category)) { sample.Category = saveRequest.Category ?? Constants.DefaultCategoryIdForSaving; } if (!string.IsNullOrWhiteSpace(saveRequest.Title)) { sample.Title = saveRequest.Title; } if (string.IsNullOrWhiteSpace(sample.IpAddress)) { sample.IpAddress = clientIp; } if (string.IsNullOrWhiteSpace(sample.UserAgent)) { sample.UserAgent = req.Headers.UserAgent.ToString(); } if (!string.IsNullOrWhiteSpace(saveRequest.App)) { sample.App = saveRequest.App; } if (!string.IsNullOrWhiteSpace(saveRequest.PathData)) { sample.PathData = saveRequest.PathData; } if (!string.IsNullOrWhiteSpace(saveRequest.AccentPathData)) { sample.AccentPathData = saveRequest.AccentPathData; } var operation = new TableBatchOperation(); if (existing == null) { operation.Insert(sample); } else { operation.Merge(sample); } await table.ExecuteBatchAsync(operation, null, null, ct); return(req.CreateResponse(HttpStatusCode.OK, id)); }
public void TableBatchMergeAPM() { CloudTableClient tableClient = GenerateCloudTableClient(); // Insert Entity DynamicTableEntity baseEntity = new DynamicTableEntity("merge test", "foo"); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); currentTable.Execute(TableOperation.Insert(baseEntity)); DynamicTableEntity mergeEntity = new DynamicTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = baseEntity.ETag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); using (ManualResetEvent evt = new ManualResetEvent(false)) { IAsyncResult asyncRes = null; currentTable.BeginExecuteBatch(batch, (res) => { asyncRes = res; evt.Set(); }, null); evt.WaitOne(); currentTable.EndExecuteBatch(asyncRes); } // Retrieve Entity & Verify Contents TableResult result = currentTable.Execute(TableOperation.Retrieve(baseEntity.PartitionKey, baseEntity.RowKey)); DynamicTableEntity retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(2, retrievedEntity.Properties.Count); Assert.AreEqual(baseEntity.Properties["prop1"], retrievedEntity.Properties["prop1"]); Assert.AreEqual(mergeEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); }
private void DoEscapeTest(string data, bool useBatch, bool includeKey) { DynamicTableEntity ent = new DynamicTableEntity(includeKey ? "temp" + data : "temp", Guid.NewGuid().ToString()); ent.Properties.Add("foo", new EntityProperty(data)); // Insert if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); currentTable.ExecuteBatch(batch); } else { currentTable.Execute(TableOperation.Insert(ent)); } // Retrieve TableResult res = null; if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (currentTable.ExecuteBatch(batch))[0]; } else { res = currentTable.Execute(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } // Check equality DynamicTableEntity retrievedEntity = res.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Query using data filter TableQuery query = new TableQuery(); query.Where(string.Format( "(PartitionKey eq \'{0}\') and (RowKey eq \'{1}\') and (foo eq \'{2}\')", ent.PartitionKey, ent.RowKey, data.Replace("\'", "\'\'"))); retrievedEntity = currentTable.ExecuteQuery(query).Single(); Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Merge ent.Properties.Add("foo2", new EntityProperty("bar2")); if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Merge(ent); currentTable.ExecuteBatch(batch); } else { currentTable.Execute(TableOperation.Merge(ent)); } // Retrieve if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (currentTable.ExecuteBatch(batch))[0]; } else { res = currentTable.Execute(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } retrievedEntity = res.Result as DynamicTableEntity; Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Replace ent.Properties.Remove("foo2"); ent.Properties.Add("foo3", new EntityProperty("bar3")); if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Replace(ent); currentTable.ExecuteBatch(batch); } else { currentTable.Execute(TableOperation.Replace(ent)); } // Retrieve if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (currentTable.ExecuteBatch(batch))[0]; } else { res = currentTable.Execute(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } retrievedEntity = res.Result as DynamicTableEntity; Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); }
public async Task TableBatchOnSecondaryAsync() { AssertSecondaryEndpoint(); CloudTable table = GenerateCloudTableClient().GetTableReference(GenerateRandomTableName()); TableRequestOptions options = new TableRequestOptions() { LocationMode = LocationMode.SecondaryOnly, RetryPolicy = new NoRetry(), }; TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve("PartitionKey", "RowKey"); OperationContext context = new OperationContext(); await table.ExecuteBatchAsync(batch, options, context); Assert.AreEqual(StorageLocation.Secondary, context.LastResult.TargetLocation); batch = new TableBatchOperation(); batch.Insert(new DynamicTableEntity("PartitionKey", "RowKey")); StorageException e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.InsertOrMerge(new DynamicTableEntity("PartitionKey", "RowKey")); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.InsertOrReplace(new DynamicTableEntity("PartitionKey", "RowKey")); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.Merge(new DynamicTableEntity("PartitionKey", "RowKey") { ETag = "*" }); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.Replace(new DynamicTableEntity("PartitionKey", "RowKey") { ETag = "*" }); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); batch = new TableBatchOperation(); batch.Delete(new DynamicTableEntity("PartitionKey", "RowKey") { ETag = "*" }); e = await TestHelper.ExpectedExceptionAsync<StorageException>( async () => await table.ExecuteBatchAsync(batch, options, null), "Batch operations other than retrieve should not be sent to secondary"); Assert.AreEqual(SR.PrimaryOnlyCommand, e.Message); }
public void BatchUpdate <T>(T entity) where T : BaseTableEntity { CheckBatch(entity); _batch.Merge(entity); }
private async Task DoTableBatchMergeFailAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; // Insert Entity DynamicTableEntity baseEntity = new DynamicTableEntity("merge test", "foo" + format.ToString()); baseEntity.Properties.Add("prop1", new EntityProperty("value1")); await currentTable.ExecuteAsync(TableOperation.Insert(baseEntity)); string staleEtag = baseEntity.ETag; // update entity to rev etag baseEntity.Properties["prop1"].StringValue = "updated value"; await currentTable.ExecuteAsync(TableOperation.Replace(baseEntity)); OperationContext opContext = new OperationContext(); try { // Attempt a merge with stale etag DynamicTableEntity mergeEntity = new DynamicTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = staleEtag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); await currentTable.ExecuteBatchAsync(batch, null, opContext); Assert.Fail(); } catch (Exception) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.PreconditionFailed, new string[] { "UpdateConditionNotSatisfied", "ConditionNotMet" }, new string[] { "The update condition specified in the request was not satisfied.", "The condition specified using HTTP conditional header(s) is not met." }); } // Delete Entity await currentTable.ExecuteAsync(TableOperation.Delete(baseEntity)); opContext = new OperationContext(); // try merging with deleted entity try { // Attempt a merge with stale etag DynamicTableEntity mergeEntity = new DynamicTableEntity(baseEntity.PartitionKey, baseEntity.RowKey) { ETag = baseEntity.ETag }; mergeEntity.Properties.Add("prop2", new EntityProperty("value2")); TableBatchOperation batch = new TableBatchOperation(); batch.Merge(mergeEntity); await currentTable.ExecuteBatchAsync(batch, null, opContext); Assert.Fail(); } catch (Exception) { TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.NotFound, new string[] { "ResourceNotFound" }, "The specified resource does not exist."); } }
public async Task StoreAsync <T>(nStoreOperation storaeOperationType, IEnumerable <T> models) where T : new() { try { // notify delegate if (_delegate != null) { _delegate.OnStoring(typeof(T), storaeOperationType); } // Retrieve a reference to the table. CloudTable table = GetTableReference(GetTableName <T>()); // Create the batch operation. List <TableBatchOperation> batchOperations = new List <TableBatchOperation>(); // Create the first batch var currentBatch = new TableBatchOperation(); batchOperations.Add(currentBatch); // lookup the entitymapper var entityMapper = _entityMapperRegistry[typeof(T)]; // define the modelcounter int modelCounter = 0; // Add all items foreach (var model in models) { switch (storaeOperationType) { case nStoreOperation.insertOperation: currentBatch.Insert(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.insertOrReplaceOperation: currentBatch.InsertOrReplace(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOperation: currentBatch.Merge(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.mergeOrInserOperation: currentBatch.InsertOrMerge(new DynamicTableEntity <T>(model, entityMapper)); break; case nStoreOperation.delete: currentBatch.Delete(new DynamicTableEntity <T>(model, entityMapper)); break; } modelCounter++; if (modelCounter % 100 == 0) { currentBatch = new TableBatchOperation(); batchOperations.Add(currentBatch); } } // execute foreach (var createdBatch in batchOperations) { if (createdBatch.Count() > 0) { await table.ExecuteBatchAsync(createdBatch); // notify delegate if (_delegate != null) { _delegate.OnStored(typeof(T), storaeOperationType, createdBatch.Count(), null); } } } } catch (StorageException ex) { // check the exception if (_autoCreateTable && ex.Message.StartsWith("0:The table specified does not exist", StringComparison.CurrentCulture)) { // try to create the table await CreateTableAsync <T>(); // retry await StoreAsync <T>(storaeOperationType, models); } else { // notify delegate if (_delegate != null) { _delegate.OnStored(typeof(T), storaeOperationType, 0, ex); } throw ex; } } }
private async Task DoTableBatchOperationsWithEmptyKeysAsync(TablePayloadFormat format) { tableClient.DefaultRequestOptions.PayloadFormat = format; // Insert Entity DynamicTableEntity ent = new DynamicTableEntity() { PartitionKey = "", RowKey = "" }; ent.Properties.Add("foo2", new EntityProperty("bar2")); ent.Properties.Add("foo", new EntityProperty("bar")); TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity TableBatchOperation retrieveBatch = new TableBatchOperation(); retrieveBatch.Retrieve(ent.PartitionKey, ent.RowKey); TableResult result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); DynamicTableEntity retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"].StringValue, retrievedEntity.Properties["foo"].StringValue); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); Assert.AreEqual(ent.Properties["foo2"].StringValue, retrievedEntity.Properties["foo2"].StringValue); Assert.AreEqual(ent.Properties["foo2"], retrievedEntity.Properties["foo2"]); // InsertOrMerge DynamicTableEntity insertOrMergeEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrMergeEntity.Properties.Add("foo3", new EntityProperty("value")); batch = new TableBatchOperation(); batch.InsertOrMerge(insertOrMergeEntity); await currentTable.ExecuteBatchAsync(batch); result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(insertOrMergeEntity.Properties["foo3"], retrievedEntity.Properties["foo3"]); // InsertOrReplace DynamicTableEntity insertOrReplaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey); insertOrReplaceEntity.Properties.Add("prop2", new EntityProperty("otherValue")); batch = new TableBatchOperation(); batch.InsertOrReplace(insertOrReplaceEntity); await currentTable.ExecuteBatchAsync(batch); result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(1, retrievedEntity.Properties.Count); Assert.AreEqual(insertOrReplaceEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); // Merge DynamicTableEntity mergeEntity = new DynamicTableEntity(retrievedEntity.PartitionKey, retrievedEntity.RowKey) { ETag = retrievedEntity.ETag }; mergeEntity.Properties.Add("mergeProp", new EntityProperty("merged")); batch = new TableBatchOperation(); batch.Merge(mergeEntity); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity & Verify Contents result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(mergeEntity.Properties["mergeProp"], retrievedEntity.Properties["mergeProp"]); // Replace DynamicTableEntity replaceEntity = new DynamicTableEntity(ent.PartitionKey, ent.RowKey) { ETag = retrievedEntity.ETag }; replaceEntity.Properties.Add("replaceProp", new EntityProperty("replace")); batch = new TableBatchOperation(); batch.Replace(replaceEntity); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity & Verify Contents result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); retrievedEntity = result.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(replaceEntity.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(replaceEntity.Properties["replaceProp"], retrievedEntity.Properties["replaceProp"]); // Delete Entity batch = new TableBatchOperation(); batch.Delete(retrievedEntity); await currentTable.ExecuteBatchAsync(batch); // Retrieve Entity result = (await currentTable.ExecuteBatchAsync(retrieveBatch)).First(); Assert.IsNull(result.Result); }
public void TableBatchOperationsWithEmptyKeys() { // Insert Entity DynamicReplicatedTableEntity ent = new DynamicReplicatedTableEntity() { PartitionKey = "", RowKey = "" }; ent.Properties.Add("foo2", new EntityProperty("bar2")); ent.Properties.Add("foo", new EntityProperty("bar")); TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); this.repTable.ExecuteBatch(batch); // Retrieve Entity TableBatchOperation retrieveBatch = new TableBatchOperation(); retrieveBatch.Retrieve <DynamicReplicatedTableEntity>(ent.PartitionKey, ent.RowKey); TableResult result = this.repTable.ExecuteBatch(retrieveBatch).First(); DynamicReplicatedTableEntity retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"].StringValue, retrievedEntity.Properties["foo"].StringValue); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); Assert.AreEqual(ent.Properties["foo2"].StringValue, retrievedEntity.Properties["foo2"].StringValue); Assert.AreEqual(ent.Properties["foo2"], retrievedEntity.Properties["foo2"]); // InsertOrMerge DynamicReplicatedTableEntity insertOrMergeEntity = new DynamicReplicatedTableEntity(ent.PartitionKey, ent.RowKey); insertOrMergeEntity.Properties.Add("foo3", new EntityProperty("value")); batch = new TableBatchOperation(); batch.InsertOrMerge(insertOrMergeEntity); this.repTable.ExecuteBatch(batch); result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(insertOrMergeEntity.Properties["foo3"], retrievedEntity.Properties["foo3"]); // InsertOrReplace DynamicReplicatedTableEntity insertOrReplaceEntity = new DynamicReplicatedTableEntity(ent.PartitionKey, ent.RowKey); insertOrReplaceEntity.Properties.Add("prop2", new EntityProperty("otherValue")); batch = new TableBatchOperation(); batch.InsertOrReplace(insertOrReplaceEntity); this.repTable.ExecuteBatch(batch); result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(1, retrievedEntity.Properties.Count); Assert.AreEqual(insertOrReplaceEntity.Properties["prop2"], retrievedEntity.Properties["prop2"]); // Merge DynamicReplicatedTableEntity mergeEntity = new DynamicReplicatedTableEntity(retrievedEntity.PartitionKey, retrievedEntity.RowKey) { ETag = retrievedEntity.ETag }; mergeEntity.Properties.Add("mergeProp", new EntityProperty("merged")); batch = new TableBatchOperation(); batch.Merge(mergeEntity); this.repTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(mergeEntity.Properties["mergeProp"], retrievedEntity.Properties["mergeProp"]); // Replace DynamicReplicatedTableEntity replaceEntity = new DynamicReplicatedTableEntity(ent.PartitionKey, ent.RowKey) { ETag = retrievedEntity.ETag }; replaceEntity.Properties.Add("replaceProp", new EntityProperty("replace")); batch = new TableBatchOperation(); batch.Replace(replaceEntity); this.repTable.ExecuteBatch(batch); // Retrieve Entity & Verify Contents result = this.repTable.ExecuteBatch(retrieveBatch).First(); retrievedEntity = result.Result as DynamicReplicatedTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(replaceEntity.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(replaceEntity.Properties["replaceProp"], retrievedEntity.Properties["replaceProp"]); // Delete Entity batch = new TableBatchOperation(); batch.Delete(retrievedEntity); this.repTable.ExecuteBatch(batch); // Retrieve Entity result = this.repTable.ExecuteBatch(retrieveBatch).First(); Assert.IsNull(result.Result); }
public void TableBatchAllSupportedOperationsSync() { TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); // insert batch.Insert(GenerateRandomEnitity(pk)); // delete { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Delete(entity); } // replace { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Replace(entity); } // insert or replace { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.InsertOrReplace(entity); } // merge { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.Merge(entity); } // insert or merge { DynamicReplicatedTableEntity entity = GenerateRandomEnitity(pk); this.repTable.Execute(TableOperation.Insert(entity)); batch.InsertOrMerge(entity); } IList<TableResult> results = this.repTable.ExecuteBatch(batch); Assert.AreEqual(results.Count, 6); IEnumerator<TableResult> enumerator = results.GetEnumerator(); for (int i = 0; i < results.Count; i++) { enumerator.MoveNext(); Assert.AreEqual((int)HttpStatusCode.NoContent, enumerator.Current.HttpStatusCode, "HttpStatusCode mismatch i={0}", i); } }
/// <summary> /// Update or Insert Configuration values /// </summary> /// <param name="configs"></param> public bool UpdateEntityProperty(string configs) { try { // Retrieve a reference to the table. CloudTable table = GetTable(); TableBatchOperation batchOperation = new TableBatchOperation(); Dictionary<string, Dictionary<string, string>> allValues = new Dictionary<string, Dictionary<string, string>>(); Dictionary<string,string> keyValues = new Dictionary<string, string>(); Dictionary<string, EntityProperty> newProperties = new Dictionary<string, EntityProperty>(); //add all the configsGroups and their key value pairs to a dictionary allValues = JsonConvert.DeserializeObject<Dictionary<string, Dictionary<string, string>>>(configs); var batch = new TableBatchOperation(); TableQuery<DynamicTableEntity> queryFinal = new TableQuery<DynamicTableEntity>(); foreach (KeyValuePair<string, Dictionary<string, string>> entry in allValues) { foreach (KeyValuePair<string, string> keyValue in entry.Value) { TableQuery<DynamicTableEntity> entityQuery = new TableQuery<DynamicTableEntity>().Where( TableQuery.CombineFilters( TableQuery.GenerateFilterCondition(configSettings.ConfigGroup, QueryComparisons.Equal.ToLower(), entry.Key), TableOperators.And, TableQuery.GenerateFilterCondition(configSettings.Key, QueryComparisons.Equal, keyValue.Key))); IEnumerable<DynamicTableEntity> queryResult = table.ExecuteQuery(entityQuery); if (queryResult.GetEnumerator().MoveNext()) { foreach (DynamicTableEntity entity in queryResult) { entity.Properties[configSettings.ConfigGroup].StringValue = entry.Key; entity.Properties[configSettings.Key].StringValue = keyValue.Key; entity.Properties[configSettings.Value].StringValue = keyValue.Value; batchOperation.Merge(entity); } } else { DynamicTableEntity config = CreateEntity(entry, keyValue); batchOperation.InsertOrReplace(config); } } } table.ExecuteBatch(batchOperation); return true; } catch (Exception ex) { throw; } }