Task <IList <TableResult> > ExecuteExportedMirrorBatchAsync(TableBatchOperation batch, IList <TableResult> originalResponse) { var exportedBatch = new TableBatchOperation(); var exportedOriginalResponse = new List <TableResult>(); for (int i = 0; i < batch.Count; i++) { TableOperation op = batch[i]; var mtableEntity = (MTableEntity)op.GetEntity(); if (MigratingTable.RowKeyIsInternal(mtableEntity.RowKey)) { continue; } exportedOriginalResponse.Add(originalResponse[i]); Debug.Assert(op.GetOperationType() == TableOperationType.InsertOrReplace); DynamicTableEntity exported = mtableEntity.Export <DynamicTableEntity>(); if (mtableEntity.deleted) { exported.ETag = ChainTable2Constants.ETAG_DELETE_IF_EXISTS; exportedBatch.Delete(exported); } else { exported.ETag = null; exportedBatch.InsertOrReplace(exported); } } return(referenceTable.ExecuteMirrorBatchAsync(exportedBatch, exportedOriginalResponse)); }
// XXX: Preserve the entity type without the caller having to specify it? public static TableOperation CopyOperation <TEntity>(TableOperation op) where TEntity : ITableEntity, new() { ITableEntity newEntity = CopyEntity <TEntity>(op.GetEntity()); switch (op.GetOperationType()) { case TableOperationType.Insert: return(TableOperation.Insert(newEntity)); case TableOperationType.Replace: return(TableOperation.Replace(newEntity)); case TableOperationType.Merge: return(TableOperation.Merge(newEntity)); case TableOperationType.Delete: return(TableOperation.Delete(newEntity)); case TableOperationType.InsertOrReplace: return(TableOperation.InsertOrReplace(newEntity)); case TableOperationType.InsertOrMerge: return(TableOperation.InsertOrMerge(newEntity)); default: throw new NotImplementedException(); } }
// Single in terms of batch // FIXME: IChainTable2 (following IChainTable) allows Retrieve in // ExecuteAsync but not ExecuteBatchAsync. What do we want to do? public virtual async Task <TableResult> ExecuteAsync(TableOperation operation, TableRequestOptions requestOptions = null, OperationContext operationContext = null) { if (operation.GetOperationType() == TableOperationType.Retrieve) { // Retrieve supports custom entity resolvers but query doesn't. // Work around this by querying as DynamicTableEntity and then // running the custom resolver. XXX: Fix IChainTable2 so we can // skip this step and avoid the cost. var query = new TableQuery <DynamicTableEntity> { FilterString = ChainTableUtils.GeneratePointRetrievalFilterCondition(operation.GetRetrievePrimaryKey()) }; DynamicTableEntity entity = (await ExecuteQueryAtomicAsync(query, requestOptions, operationContext)).SingleOrDefault(); if (entity == null) { return new TableResult { HttpStatusCode = (int)HttpStatusCode.NotFound } } ; else { return new TableResult { HttpStatusCode = (int)HttpStatusCode.OK, Result = AzureTableAccessors.GetRetrieveResolver(operation)( entity.PartitionKey, entity.RowKey, entity.Timestamp, entity.WriteEntity(null), entity.ETag), Etag = entity.ETag } }; } var batch = new TableBatchOperation(); batch.Add(operation); try { IList <TableResult> resultList = await ExecuteBatchAsync(batch, requestOptions, operationContext); Debug.Assert(resultList.Count == 1); return(resultList[0]); } catch (ChainTableBatchException e) { // XXX: Does this lose the stack trace? throw new StorageException(e.RequestInformation, e.Message, e.InnerException); } }
public override Task <IList <TableResult> > ExecuteMirrorBatchAsync( TableBatchOperation originalBatch, IList <TableResult> originalResponse, TableRequestOptions requestOptions = null, OperationContext operationContext = null) { ChainTableUtils.GetBatchPartitionKey(originalBatch); // For validation; result ignored // Copy the table. Entities are aliased to the original table, so don't mutate them. var tmpTable = new SortedDictionary <PrimaryKey, DynamicTableEntity>(table); int tmpNextEtag = nextEtag; var results = new List <TableResult>(); for (int i = 0; i < originalBatch.Count; i++) { TableOperation op = originalBatch[i]; TableOperationType opType = op.GetOperationType(); ITableEntity passedEntity = op.GetEntity(); PrimaryKey key = passedEntity.GetPrimaryKey(); DynamicTableEntity oldEntity = tmpTable.GetValueOrDefault(key); DynamicTableEntity newEntity = null; HttpStatusCode statusCode = HttpStatusCode.NoContent; if (opType == TableOperationType.Insert) { if (oldEntity != null) { throw ChainTableUtils.GenerateBatchException(HttpStatusCode.Conflict, i); } else { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); statusCode = HttpStatusCode.Created; } } else if (opType == TableOperationType.InsertOrReplace) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); } else if (opType == TableOperationType.InsertOrMerge) { if (oldEntity == null) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); } else { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(oldEntity); Merge(newEntity, passedEntity); } } else if (opType == TableOperationType.Delete && passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS) { tmpTable.Remove(key); } else if (oldEntity == null) { throw ChainTableUtils.GenerateBatchException(HttpStatusCode.NotFound, i); } else if (string.IsNullOrEmpty(passedEntity.ETag)) { // Enforce this because real Azure table will. // XXX Ideally do this up front. throw new ArgumentException(string.Format("Operation {0} requires an explicit ETag.", i)); } else if (passedEntity.ETag != ChainTable2Constants.ETAG_ANY && oldEntity.ETag != passedEntity.ETag) { throw ChainTableUtils.GenerateBatchException(HttpStatusCode.PreconditionFailed, i); } else if (opType == TableOperationType.Delete) { tmpTable.Remove(key); } else if (opType == TableOperationType.Replace) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(passedEntity); } else if (opType == TableOperationType.Merge) { newEntity = ChainTableUtils.CopyEntity <DynamicTableEntity>(oldEntity); Merge(newEntity, passedEntity); } else { // IChainTable2 does not allow Retrieve in a batch. throw new NotImplementedException(); } if (newEntity != null) { newEntity.ETag = (originalResponse != null) ? originalResponse[i].Etag : (tmpNextEtag++).ToString(); newEntity.Timestamp = DateTimeOffset.MinValue; // Arbitrary, deterministic tmpTable[key] = newEntity; } results.Add(new TableResult { Result = passedEntity, HttpStatusCode = (int)statusCode, Etag = (newEntity != null) ? newEntity.ETag : null, }); } // If we got here, commit. table = tmpTable; nextEtag = tmpNextEtag; for (int i = 0; i < originalBatch.Count; i++) { if (results[i].Etag != null) // not delete { originalBatch[i].GetEntity().ETag = results[i].Etag; } } return(Task.FromResult((IList <TableResult>)results)); }
void TranslateOperationForNewTable( TableOperation op, MTableEntity existingEntity, bool leaveTombstones, ref TableOperation newOp, ref HttpStatusCode?errorCode) { ITableEntity passedEntity = op.GetEntity(); TableOperationType opType = op.GetOperationType(); switch (opType) { case TableOperationType.Insert: if (existingEntity == null) { newOp = TableOperation.Insert(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity)); } else if (existingEntity.deleted) { newOp = TableOperation.Replace(ImportWithIfMatch(passedEntity, existingEntity.ETag)); } else { errorCode = HttpStatusCode.Conflict; } break; case TableOperationType.Replace: if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { newOp = TableOperation.Replace(ImportWithIfMatch(passedEntity, existingEntity.ETag)); } break; case TableOperationType.Merge: if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { newOp = TableOperation.Merge(ImportWithIfMatch(passedEntity, existingEntity.ETag)); } break; case TableOperationType.Delete: string buggablePartitionKey, buggableRowKey; if (IsBugEnabled(MTableOptionalBug.DeletePrimaryKey)) { buggablePartitionKey = buggableRowKey = null; } else { buggablePartitionKey = passedEntity.PartitionKey; buggableRowKey = passedEntity.RowKey; } if (leaveTombstones) { if (passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS) { newOp = TableOperation.InsertOrReplace(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, deleted = true }); } else if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { newOp = TableOperation.Replace(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, deleted = true, ETag = existingEntity.ETag }); } } else { if (passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS) { if (existingEntity != null) { newOp = TableOperation.Delete(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, // It's OK to delete the entity and return success whether or not // the entity is a tombstone by the time it is actually deleted. ETag = IsBugEnabled(MTableOptionalBug.DeleteNoLeaveTombstonesETag) ? null : ChainTable2Constants.ETAG_ANY }); } // Otherwise generate nothing. // FIXME: This is not linearizable! It can also generate empty batches. } else if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { // Another client in USE_NEW_WITH_TOMBSTONES could concurrently replace the // entity with a tombstone, in which case we need to return 404 to the caller, // hence this needs to be conditioned on the existing ETag. newOp = TableOperation.Delete(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, ETag = IsBugEnabled(MTableOptionalBug.DeleteNoLeaveTombstonesETag) ? null : existingEntity.ETag }); } } break; case TableOperationType.InsertOrReplace: newOp = TableOperation.InsertOrReplace(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity)); break; case TableOperationType.InsertOrMerge: newOp = TableOperation.InsertOrMerge(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity)); break; default: throw new NotImplementedException(); } }