async Task EnsureAffectedRowsMigratedAsync(TableBatchOperation batch, TableRequestOptions requestOptions, OperationContext operationContext) { string partitionKey = ChainTableUtils.GetBatchPartitionKey(batch); var query = GenerateQueryForAffectedRows(batch); IList <MTableEntity> oldRows = await oldTable.ExecuteQueryAtomicAsync(query, requestOptions, operationContext); await monitor.AnnotateLastBackendCallAsync(); IList <MTableEntity> newRows = await newTable.ExecuteQueryAtomicAsync(query, requestOptions, operationContext); await monitor.AnnotateLastBackendCallAsync(); Dictionary <string, MTableEntity> oldDict = oldRows.ToDictionary(ent => ent.RowKey); Dictionary <string, MTableEntity> newDict = newRows.ToDictionary(ent => ent.RowKey); // Migrate any affected rows not already migrated. foreach (TableOperation op in batch) { string targetedRowKey = op.GetEntity().RowKey; MTableEntity oldEntity; if (oldDict.TryGetValue(targetedRowKey, out oldEntity) && !newDict.ContainsKey(targetedRowKey)) { await TryCopyEntityToNewTableAsync(oldEntity, requestOptions, operationContext); } } }
async Task <IList <TableResult> > AttemptBatchOnOldTableAsync(TableBatchOperation batch, TableRequestOptions requestOptions, OperationContext operationContext) { string partitionKey = ChainTableUtils.GetBatchPartitionKey(batch); await TryMarkPartitionPopulatedAsync(partitionKey, requestOptions, operationContext); var oldBatch = new TableBatchOperation(); oldBatch.Merge(new DynamicTableEntity { PartitionKey = partitionKey, RowKey = ROW_KEY_PARTITION_POPULATED_ASSERTION, ETag = ChainTable2Constants.ETAG_ANY, }); // No AddRange? :( foreach (TableOperation op in batch) { oldBatch.Add(op); } IList <TableResult> oldResults; try { oldResults = await oldTable.ExecuteBatchAsync(oldBatch, requestOptions, operationContext); } catch (ChainTableBatchException ex) { if (ex.FailedOpIndex == 0) { // This must mean the partition is switched. await monitor.AnnotateLastBackendCallAsync(); return(null); } else { await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true); throw ChainTableUtils.GenerateBatchException(ex.GetHttpStatusCode(), ex.FailedOpIndex - 1); } } oldResults.RemoveAt(0); await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true, successfulBatchResult : oldResults); return(oldResults); }
/* * FIXME: This will not work against real Azure table because a batch * can affect up to 100 rows but a filter string only allows 15 * comparisons. The only other thing we can really do in general is * query the whole partition, but that might be slow. We can try an * atomic query of the whole partition (maybe even with a timeout lower * than the default) and if that fails, fall back to breaking the query * into up to 8 partial queries. */ static TableQuery <MTableEntity> GenerateQueryForAffectedRows(TableBatchOperation batch) { string rowKeyFilterString = null; foreach (TableOperation op in batch) { string comparison = TableQuery.GenerateFilterCondition( TableConstants.RowKey, QueryComparisons.Equal, op.GetEntity().RowKey); rowKeyFilterString = (rowKeyFilterString == null) ? comparison : TableQuery.CombineFilters(rowKeyFilterString, TableOperators.Or, comparison); } string filterString = TableQuery.CombineFilters( TableQuery.GenerateFilterCondition( TableConstants.PartitionKey, QueryComparisons.Equal, ChainTableUtils.GetBatchPartitionKey(batch)), TableOperators.And, rowKeyFilterString); return(new TableQuery <MTableEntity> { FilterString = filterString }); }
public override Task <IList <TableResult> > ExecuteMirrorBatchAsync( TableBatchOperation originalBatch, IList <TableResult> originalResponse, TableRequestOptions requestOptions = null, OperationContext operationContext = null) { ChainTableUtils.GetBatchPartitionKey(originalBatch); // For validation; result ignored // Copy the table. Entities are aliased to the original table, so don't mutate them. var tmpTable = new SortedDictionary <PrimaryKey, DynamicTableEntity>(table); int tmpNextEtag = nextEtag; var results = new List <TableResult>(); for (int i = 0; i < originalBatch.Count; i++) { TableOperation op = originalBatch[i]; TableOperationType opType = op.GetOperationType(); ITableEntity passedEntity = op.GetEntity(); PrimaryKey key = passedEntity.GetPrimaryKey(); DynamicTableEntity oldEntity = tmpTable.GetValueOrDefault(key); DynamicTableEntity newEntity = null; HttpStatusCode statusCode = HttpStatusCode.NoContent; if (opType == TableOperationType.Insert) { if (oldEntity != null) { throw ChainTableUtils.GenerateBatchException(HttpStatusCode.Conflict, i); } else { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); statusCode = HttpStatusCode.Created; } } else if (opType == TableOperationType.InsertOrReplace) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); } else if (opType == TableOperationType.InsertOrMerge) { if (oldEntity == null) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); } else { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(oldEntity); Merge(newEntity, passedEntity); } } else if (opType == TableOperationType.Delete && passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS) { tmpTable.Remove(key); } else if (oldEntity == null) { throw ChainTableUtils.GenerateBatchException(HttpStatusCode.NotFound, i); } else if (string.IsNullOrEmpty(passedEntity.ETag)) { // Enforce this because real Azure table will. // XXX Ideally do this up front. throw new ArgumentException(string.Format("Operation {0} requires an explicit ETag.", i)); } else if (passedEntity.ETag != ChainTable2Constants.ETAG_ANY && oldEntity.ETag != passedEntity.ETag) { throw ChainTableUtils.GenerateBatchException(HttpStatusCode.PreconditionFailed, i); } else if (opType == TableOperationType.Delete) { tmpTable.Remove(key); } else if (opType == TableOperationType.Replace) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); } else if (opType == TableOperationType.Merge) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(oldEntity); Merge(newEntity, passedEntity); } else { // IChainTable2 does not allow Retrieve in a batch. throw new NotImplementedException(); } if (newEntity != null) { newEntity.ETag = (originalResponse != null) ? originalResponse[i].Etag : (tmpNextEtag++).ToString(); newEntity.Timestamp = DateTimeOffset.MinValue; // Arbitrary, deterministic tmpTable[key] = newEntity; } results.Add(new TableResult { Result = passedEntity, HttpStatusCode = (int)statusCode, Etag = (newEntity != null) ? newEntity.ETag : null, }); } // If we got here, commit. table = tmpTable; nextEtag = tmpNextEtag; for (int i = 0; i < originalBatch.Count; i++) { if (results[i].Etag != null) // not delete { originalBatch[i].GetEntity().ETag = results[i].Etag; } } return(Task.FromResult((IList <TableResult>)results)); }
async Task <IList <TableResult> > ExecuteBatchOnNewTableAsync(MTableConfiguration config, TableBatchOperation batch, TableRequestOptions requestOptions, OperationContext operationContext) { string partitionKey = ChainTableUtils.GetBatchPartitionKey(batch); await EnsurePartitionSwitchedAsync(partitionKey, requestOptions, operationContext); if (config.state <= TableClientState.PREFER_NEW) { await EnsureAffectedRowsMigratedAsync(batch, requestOptions, operationContext); } Attempt: // Batch on new table. var query = GenerateQueryForAffectedRows(batch); IList <MTableEntity> newRows = await newTable.ExecuteQueryAtomicAsync(query, requestOptions, operationContext); Dictionary <string, MTableEntity> newDict = newRows.ToDictionary(ent => ent.RowKey); // NOTE! At this point, the read has not yet been annotated. It is annotated below. var newBatch = new TableBatchOperation(); var inputToNewTableIndexMapping = new List <int?>(); for (int i = 0; i < batch.Count; i++) { TableOperation op = batch[i]; ITableEntity passedEntity = op.GetEntity(); MTableEntity existingEntity = newDict.GetValueOrDefault(passedEntity.RowKey); TableOperation newOp = null; HttpStatusCode?errorCode = null; TranslateOperationForNewTable( op, existingEntity, config.state <= TableClientState.USE_NEW_WITH_TOMBSTONES, ref newOp, ref errorCode); if (errorCode != null) { Debug.Assert(newOp == null); await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true); throw ChainTableUtils.GenerateBatchException(errorCode.Value, i); } if (newOp != null) { inputToNewTableIndexMapping.Add(newBatch.Count); newBatch.Add(newOp); } else { inputToNewTableIndexMapping.Add(null); } } await monitor.AnnotateLastBackendCallAsync(); IList <TableResult> newResults; try { newResults = await newTable.ExecuteBatchAsync(newBatch, requestOptions, operationContext); } catch (ChainTableBatchException) { // XXX: Try to distinguish expected concurrency exceptions from unexpected exceptions? await monitor.AnnotateLastBackendCallAsync(); goto Attempt; } // We made it! var results = new List <TableResult>(); for (int i = 0; i < batch.Count; i++) { ITableEntity passedEntity = batch[i].GetEntity(); int? newTableIndex = inputToNewTableIndexMapping[i]; string newETag = (IsBugEnabled(MTableOptionalBug.TombstoneOutputETag) ? newTableIndex != null : batch[i].GetOperationType() == TableOperationType.Delete) ? null : newResults[newTableIndex.Value].Etag; if (newETag != null) { passedEntity.ETag = newETag; } results.Add(new TableResult { HttpStatusCode = (int)( (batch[i].GetOperationType() == TableOperationType.Insert) ? HttpStatusCode.Created : HttpStatusCode.NoContent), Etag = newETag, Result = passedEntity, }); } await monitor.AnnotateLastBackendCallAsync(wasLinearizationPoint : true, successfulBatchResult : results); return(results); }