public async Task ExecuteBatchOperation(TableBatchOperation bigBatch) { int taken = 0; while (taken < bigBatch.Count) { var partBatch = new TableBatchOperation(); foreach (TableOperation operation in bigBatch.Skip(taken).Take(100)) { partBatch.Add(operation); // adds to batch not an add operation } ; taken += 100; if (partBatch.Count > 0) { await table.ExecuteBatchAsync(partBatch); } } }
protected override void IndexCore(string partitionName, IEnumerable <TIndexed> items) { var batch = new TableBatchOperation(); foreach (var item in items) { batch.Add(TableOperation.InsertOrReplace(ToTableEntity(item))); } var table = GetCloudTable(); var options = new TableRequestOptions() { PayloadFormat = TablePayloadFormat.Json, MaximumExecutionTime = _Timeout, ServerTimeout = _Timeout, }; var context = new OperationContext(); Queue <TableBatchOperation> batches = new Queue <TableBatchOperation>(); batches.Enqueue(batch); while (batches.Count > 0) { batch = batches.Dequeue(); try { Stopwatch watch = new Stopwatch(); watch.Start(); if (batch.Count > 1) { table.ExecuteBatchAsync(batch, options, context).GetAwaiter().GetResult(); } else { if (batch.Count == 1) { table.ExecuteAsync(batch[0], options, context).GetAwaiter().GetResult(); } } Interlocked.Add(ref _IndexedEntities, batch.Count); } catch (Exception ex) { if (IsError413(ex)) { var split = batch.Count / 2; var batch1 = batch.Take(split).ToList(); var batch2 = batch.Skip(split).Take(batch.Count - split).ToList(); batches.Enqueue(ToBatch(batch1)); batches.Enqueue(ToBatch(batch2)); } else if (Helper.IsError(ex, "EntityTooLarge")) { var op = GetFaultyOperation(ex, batch); var entity = (DynamicTableEntity)GetEntity(op); var serialized = entity.Serialize(); Configuration .GetBlocksContainer() .GetBlockBlobReference(entity.GetFatBlobName()) .UploadFromByteArrayAsync(serialized, 0, serialized.Length).GetAwaiter().GetResult(); entity.MakeFat(serialized.Length); batches.Enqueue(batch); } else { IndexerTrace.ErrorWhileImportingEntitiesToAzure(batch.Select(b => GetEntity(b)).ToArray(), ex); batches.Enqueue(batch); throw; } } } }
protected override void IndexCore(string partitionName, IEnumerable <TIndexed> items) { var batch = new TableBatchOperation(); foreach (var item in items) { batch.Add(TableOperation.InsertOrReplace(ToTableEntity(item))); } var table = GetCloudTable(); var options = new TableRequestOptions() { PayloadFormat = TablePayloadFormat.Json, MaximumExecutionTime = _Timeout, ServerTimeout = _Timeout, }; var context = new OperationContext(); Queue <TableBatchOperation> batches = new Queue <TableBatchOperation>(); batches.Enqueue(batch); while (batches.Count > 0) { batch = batches.Dequeue(); try { Stopwatch watch = new Stopwatch(); watch.Start(); if (batch.Count > 1) { table.ExecuteBatchAsync(batch, options, context).GetAwaiter().GetResult(); } else { if (batch.Count == 1) { table.ExecuteAsync(batch[0], options, context).GetAwaiter().GetResult(); } } Interlocked.Add(ref _IndexedEntities, batch.Count); } catch (Exception ex) { if (IsError413(ex) /* Request too large */ || Helper.IsError(ex, "OperationTimedOut")) { // Reduce the size of all batches to half the size of the offending batch. int maxSize = Math.Max(1, batch.Count / 2); bool workDone = false; Queue <TableBatchOperation> newBatches = new Queue <TableBatchOperation>(); for (/* starting with the current batch */; ; batch = batches.Dequeue()) { for (; batch.Count > maxSize;) { newBatches.Enqueue(ToBatch(batch.Take(maxSize).ToList())); batch = ToBatch(batch.Skip(maxSize).ToList()); workDone = true; } if (batch.Count > 0) { newBatches.Enqueue(batch); } if (batches.Count == 0) { break; } } batches = newBatches; // Nothing could be done? if (!workDone) { throw; } } else if (Helper.IsError(ex, "EntityTooLarge")) { var op = GetFaultyOperation(ex, batch); var entity = (DynamicTableEntity)GetEntity(op); var serialized = entity.Serialize(); Configuration .GetBlocksContainer() .GetBlockBlobReference(entity.GetFatBlobName()) .UploadFromByteArrayAsync(serialized, 0, serialized.Length).GetAwaiter().GetResult(); entity.MakeFat(serialized.Length); batches.Enqueue(batch); } else { IndexerTrace.ErrorWhileImportingEntitiesToAzure(batch.Select(b => GetEntity(b)).ToArray(), ex); batches.Enqueue(batch); throw; } } } }