Beispiel #1
0
        private async Task ExecuteBatch(
            CloudTable table,
            TableBatchOperation batch,
            CancellationToken cancellationToken)
        {
            try
            {
                await table.ExecuteBatchAsync(batch).ConfigureAwait(false);
            }
            catch (StorageException ex)
            {
                if (ex.RequestInformation.HttpStatusCode != 404)
                {
                    throw;
                }

                // Check if this 404 is because of the table not existing
                if (ex.RequestInformation.ExtendedErrorInformation.ErrorCode == "TableNotFound")
                {
                    // The table doesn't exist yet, retry
                    await table.CreateIfNotExistsAsync(null, null, cancellationToken).ConfigureAwait(false);

                    await table.ExecuteBatchAsync(batch).ConfigureAwait(false);

                    return;
                }

                // Most likely this is because we are trying to delete an entity that does not exist
                // Because this has failed, none of the other potentially valid changes have been actioned in the batch
                // The only way to recover from this is to retry each item individually and forgo the transaction saving of batching
                // changes to a storage partition
                var tasks = batch.Select(x => ExecuteWithCreateTable(table, x, cancellationToken));

                await Task.WhenAll(tasks).ConfigureAwait(false);
            }
        }
Beispiel #2
0
        private static async Task ExecuteBatchCore(CloudTable table, TableBatchOperation tableBatchOperation)
        {
            try
            {
                await table.ExecuteBatchAsync(tableBatchOperation).ConfigureAwait(false);
            }
            catch (StorageException e)
            {
                var batchContents = String.Join(Environment.NewLine,
                                                tableBatchOperation.Select((o, i) => String.Format(CultureInfo.InvariantCulture,
                                                                                                   "[{0}] Type: {1} Partition: {2} Row: {3}", i, o.OperationType, o.GetPartitionKey(), o.GetRowKey())));

                CommonEventSource.Log.ErrorExecutingBatchOperation(
                    e,
                    e.RequestInformation?.HttpStatusCode ?? 0,
                    e.RequestInformation?.ExtendedErrorInformation?.ErrorCode ?? "unknown",
                    e.RequestInformation?.ExtendedErrorInformation?.ErrorMessage ?? "unknown",
                    batchContents);

                throw new StorageException(String.Format(CultureInfo.InvariantCulture,
                                                         "Error executing batch operation: {0}. Status code: {1}. Batch contents: {2}",
                                                         e.RequestInformation?.ExtendedErrorInformation?.ErrorMessage ?? "unknown", e.RequestInformation?.HttpStatusCode, batchContents), e);
            }
        }
Beispiel #3
0
 public IList <TableResult> ExecuteBatch(TableBatchOperation batch, TableRequestOptions requestOptions = null,
                                         OperationContext operationContext = null)
 {
     return(batch.Select(operation => Execute(operation)).ToList());
 }
        // Flush out all data to the Cosmos DB Table sink.
        private async Task FlushToTableAsync(CancellationToken cancellation)
        {
            try
            {
                List <Exception> exceptions = new List <Exception>();

                foreach (var kv in dict)
                {
                    var batchOperation = kv.Value;
                    if (batchOperation.Count > 0)
                    {
                        var subOperations = batchSizeTracker.Split(batchOperation);

                        foreach (var subOperation in subOperations)
                        {
                            var op = new TableBatchOperation();
                            for (int i = 0; i < subOperation.Count(); i++)
                            {
                                op.Add(subOperation.ElementAt(i));
                            }

                            try
                            {
                                await Utils.ExecuteWithRetryAsync(
                                    () => cloudtable.ExecuteBatchAsync(op, cancellation)
                                    );
                            }
                            catch (Exception ex)
                            {
                                string listofDocumentsNotCommitted = string.Join(",", op.Select(x => x.Entity.RowKey));
                                ex = new Exception(
                                    string.Format("{0} : offending documents having PartitionKey={1}: RowKeys:[{2}]",
                                                  ex.Message, op[0].Entity.PartitionKey, listofDocumentsNotCommitted), ex
                                    );
                                exceptions.Add(ex);
                            }
                        }
                    }
                }

                if (exceptions.Count > 0)
                {
                    throw new AggregateException(exceptions);
                }
            }
            finally
            {
                dict.Clear();
                batchSizeTracker.Clear();
            }
        }
Beispiel #5
0
        protected override void IndexCore(string partitionName, IEnumerable <TIndexed> items)
        {
            var batch = new TableBatchOperation();

            foreach (var item in items)
            {
                batch.Add(TableOperation.InsertOrReplace(ToTableEntity(item)));
            }

            var table = GetCloudTable();

            var options = new TableRequestOptions()
            {
                PayloadFormat        = TablePayloadFormat.Json,
                MaximumExecutionTime = _Timeout,
                ServerTimeout        = _Timeout,
            };

            var context = new OperationContext();
            Queue <TableBatchOperation> batches = new Queue <TableBatchOperation>();

            batches.Enqueue(batch);

            while (batches.Count > 0)
            {
                batch = batches.Dequeue();
                try
                {
                    Stopwatch watch = new Stopwatch();
                    watch.Start();
                    if (batch.Count > 1)
                    {
                        table.ExecuteBatchAsync(batch, options, context).GetAwaiter().GetResult();
                    }
                    else
                    {
                        if (batch.Count == 1)
                        {
                            table.ExecuteAsync(batch[0], options, context).GetAwaiter().GetResult();
                        }
                    }
                    Interlocked.Add(ref _IndexedEntities, batch.Count);
                }
                catch (Exception ex)
                {
                    if (IsError413(ex))
                    {
                        var split  = batch.Count / 2;
                        var batch1 = batch.Take(split).ToList();
                        var batch2 = batch.Skip(split).Take(batch.Count - split).ToList();
                        batches.Enqueue(ToBatch(batch1));
                        batches.Enqueue(ToBatch(batch2));
                    }
                    else if (Helper.IsError(ex, "EntityTooLarge"))
                    {
                        var op         = GetFaultyOperation(ex, batch);
                        var entity     = (DynamicTableEntity)GetEntity(op);
                        var serialized = entity.Serialize();

                        Configuration
                        .GetBlocksContainer()
                        .GetBlockBlobReference(entity.GetFatBlobName())
                        .UploadFromByteArrayAsync(serialized, 0, serialized.Length).GetAwaiter().GetResult();
                        entity.MakeFat(serialized.Length);
                        batches.Enqueue(batch);
                    }
                    else
                    {
                        IndexerTrace.ErrorWhileImportingEntitiesToAzure(batch.Select(b => GetEntity(b)).ToArray(), ex);
                        batches.Enqueue(batch);
                        throw;
                    }
                }
            }
        }
        protected override void IndexCore(string partitionName, IEnumerable <TIndexed> items)
        {
            var batch = new TableBatchOperation();

            foreach (var item in items)
            {
                batch.Add(TableOperation.InsertOrReplace(ToTableEntity(item)));
            }

            var table = GetCloudTable();

            var options = new TableRequestOptions()
            {
                PayloadFormat        = TablePayloadFormat.Json,
                MaximumExecutionTime = _Timeout,
                ServerTimeout        = _Timeout,
            };

            var context = new OperationContext();
            Queue <TableBatchOperation> batches = new Queue <TableBatchOperation>();

            batches.Enqueue(batch);

            while (batches.Count > 0)
            {
                batch = batches.Dequeue();

                try
                {
                    Stopwatch watch = new Stopwatch();
                    watch.Start();

                    if (batch.Count > 1)
                    {
                        table.ExecuteBatchAsync(batch, options, context).GetAwaiter().GetResult();
                    }
                    else
                    {
                        if (batch.Count == 1)
                        {
                            table.ExecuteAsync(batch[0], options, context).GetAwaiter().GetResult();
                        }
                    }

                    Interlocked.Add(ref _IndexedEntities, batch.Count);
                }
                catch (Exception ex)
                {
                    if (IsError413(ex) /* Request too large */ || Helper.IsError(ex, "OperationTimedOut"))
                    {
                        // Reduce the size of all batches to half the size of the offending batch.
                        int  maxSize  = Math.Max(1, batch.Count / 2);
                        bool workDone = false;
                        Queue <TableBatchOperation> newBatches = new Queue <TableBatchOperation>();

                        for (/* starting with the current batch */; ; batch = batches.Dequeue())
                        {
                            for (; batch.Count > maxSize;)
                            {
                                newBatches.Enqueue(ToBatch(batch.Take(maxSize).ToList()));
                                batch    = ToBatch(batch.Skip(maxSize).ToList());
                                workDone = true;
                            }

                            if (batch.Count > 0)
                            {
                                newBatches.Enqueue(batch);
                            }

                            if (batches.Count == 0)
                            {
                                break;
                            }
                        }

                        batches = newBatches;

                        // Nothing could be done?
                        if (!workDone)
                        {
                            throw;
                        }
                    }
                    else if (Helper.IsError(ex, "EntityTooLarge"))
                    {
                        var op         = GetFaultyOperation(ex, batch);
                        var entity     = (DynamicTableEntity)GetEntity(op);
                        var serialized = entity.Serialize();

                        Configuration
                        .GetBlocksContainer()
                        .GetBlockBlobReference(entity.GetFatBlobName())
                        .UploadFromByteArrayAsync(serialized, 0, serialized.Length).GetAwaiter().GetResult();

                        entity.MakeFat(serialized.Length);
                        batches.Enqueue(batch);
                    }
                    else
                    {
                        IndexerTrace.ErrorWhileImportingEntitiesToAzure(batch.Select(b => GetEntity(b)).ToArray(), ex);
                        batches.Enqueue(batch);
                        throw;
                    }
                }
            }
        }