Example #1
0
        private void CommitBatchedChangesEnd(int index)
        {
            IList <TableResult> results = new TableResult[0];

            try
            {
                // Get the result of the batched operation
                results = this.tableClients[this.contextIndex]
                          .GetTableReference(this.tableUploadSettings.TableName)
                          .EndExecuteBatch(this.asyncResult[this.contextIndex]);
                this.perfHelper.ExternalOperationEnd(
                    ExternalOperationTime.ExternalOperationType.TableUpload,
                    index);
            }
            catch (Exception e)
            {
                AzureTableCommon.TableServiceExceptionAction action;
                try
                {
                    action = AzureTableCommon.ProcessTableServiceRequestException(
                        this.traceSource,
                        this.logSourceId,
                        e,
                        AzureTableCommon.TableServiceAction.SaveEntityBatchEnd);
                }
                catch (MaxRetriesException)
                {
                    // A failure that we are designed to handle did not get resolved
                    // even after the maximum number of retries. Let's abort this
                    // pass and retry on the next pass.
                    this.bufferedEventProvider.AbortEtwEventDelivery();
                    action = AzureTableCommon.TableServiceExceptionAction.Abort;
                }

                if (AzureTableCommon.TableServiceExceptionAction.Abort == action)
                {
                    // Give up on this batch
                    results = null;
                }
                else
                {
                    Debug.Assert(AzureTableCommon.TableServiceExceptionAction.ProcessResponse == action);
                }
            }

            this.asyncResult[index] = null;

            // Verify the result
            if (results != null)
            {
                if (VerifyTableServiceResponse(results, index))
                {
                    this.perfHelper.BatchUploadedToAzureTable((ulong)this.unsavedEntities[index].Count);
                }
            }

            // Clear out all entities from the table service context, so that it
            // is ready for reuse.
            ClearTableServiceContext(index);
        }
Example #2
0
        private void DeleteOldEntities()
        {
            var storageAccount = this.storageAccountFactory.GetStorageAccount();

            Debug.Assert(null == storageAccount);

            // Create a table client
            CloudTableClient tableClient = AzureTableCommon.CreateNewTableClient(storageAccount);

            // Create a new table service context
            CloudTableClient tableServiceContext = tableClient;

            // Figure out the timestamp before which all entities will be deleted
            DateTime cutoffTime = DateTime.UtcNow.Add(-this.entityDeletionAge);

            // Delete the entities in batches. Do this in a loop until our query
            // for "delete-eligible" entities yields no results.
            TableContinuationToken continuationToken = null;

            do
            {
                QueryAndDeleteEntityBatch(storageAccount, tableServiceContext, cutoffTime, ref continuationToken);

                if (this.stopping)
                {
                    this.traceSource.WriteInfo(
                        this.logSourceId,
                        "The consumer is being stopped. Therefore, no more old entities will be deleted in this pass.");
                    break;
                }
            } while (null != continuationToken);
        }
Example #3
0
        public void OnEtwEventDeliveryStart()
        {
            this.perfHelper.TableUploadPeriodBegin();

            var storageAccount = this.tableUploadSettings.StorageAccountFactory.GetStorageAccount();

            Debug.Assert(null == storageAccount);

            // Create a table client
            CloudTableClient tableClient = AzureTableCommon.CreateNewTableClient(storageAccount);

            // Create new table service contexts using the new credentials
            for (int i = 0; i < this.batchConcurrencyCount; i++)
            {
                Debug.Assert(null == this.tableClients[i]);
                this.tableClients[i] = tableClient;
            }
        }
Example #4
0
        private void CreateTableWorker(CloudStorageAccount storageAccount)
        {
            // Create the table client object
            CloudTableClient cloudTableClient = AzureTableCommon.CreateNewTableClient(storageAccount);

            // Create the table
#if DotNetCoreClrLinux
            cloudTableClient.GetTableReference(this.tableUploadSettings.TableName).CreateIfNotExistsAsync().Wait();
#else
            cloudTableClient.GetTableReference(this.tableUploadSettings.TableName).CreateIfNotExists();
#endif

            // Create an array of table service context objects
            this.availableIndexes = new Stack <int>();
            this.tableClients     = new CloudTableClient[this.batchConcurrencyCount];
            for (int i = 0; i < this.batchConcurrencyCount; i++)
            {
                this.availableIndexes.Push(i);
            }
        }
Example #5
0
        private void DeleteEntityBatch(CloudTableClient client, List <TableEntity> entitiesToDelete)
        {
            var batchOperation = new TableBatchOperation();

            foreach (var entity in entitiesToDelete)
            {
                batchOperation.Add(TableOperation.Delete(entity));
            }

            // Save batched changes
            IList <TableResult> results = new TableResult[0];

            try
            {
                this.perfHelper.ExternalOperationBegin(
                    ExternalOperationTime.ExternalOperationType.TableDeletion,
                    0);
#if DotNetCoreClr
                results = client.GetTableReference(this.tableName).ExecuteBatchAsync(batchOperation).Result;
#else
                results = client.GetTableReference(this.tableName).ExecuteBatch(batchOperation);
#endif
                this.perfHelper.ExternalOperationEnd(
                    ExternalOperationTime.ExternalOperationType.TableDeletion,
                    0);
            }
            catch (Exception e)
            {
                AzureTableCommon.TableServiceExceptionAction action = AzureTableCommon.ProcessTableServiceRequestException(
                    this.traceSource,
                    this.logSourceId,
                    e,
                    AzureTableCommon.TableServiceAction.DeleteEntityBatch);
                if (AzureTableCommon.TableServiceExceptionAction.Abort == action)
                {
                    // Give up on this batch
                    return;
                }

                Debug.Assert(AzureTableCommon.TableServiceExceptionAction.ProcessResponse == action);
            }

            for (var idx = 0; idx < batchOperation.Count; idx++)
            {
                TableEntity entity = entitiesToDelete[idx];
                var         result = results[idx];

                if (Utility.IsNetworkError(result.HttpStatusCode))
                {
                    // We encountered a network error that wasn't resolved even
                    // after retries.
                    this.traceSource.WriteError(
                        this.logSourceId,
                        "Error {0} encountered when attempting to delete an entity from table storage. Entity information: {1},{2},{3}.",
                        results[idx],
                        entity.PartitionKey,
                        entity.RowKey,
                        entity.Timestamp);

                    throw new MaxRetriesException();
                }

                if (AzureTableCommon.HttpCodeResourceNotFound == result.HttpStatusCode)
                {
                    continue;
                }

                if (result.HttpStatusCode < AzureTableCommon.HttpSuccessCodeMin ||
                    result.HttpStatusCode > AzureTableCommon.HttpSuccessCodeMax)
                {
                    this.traceSource.WriteError(
                        this.logSourceId,
                        "FabricDCA encountered an error when attempting to delete an entity from table service. Error code: {0}. Entity information: {1},{2},{3}.",
                        result.HttpStatusCode,
                        entity.PartitionKey,
                        entity.RowKey,
                        entity.Timestamp);
                    return;
                }
            }
            this.perfHelper.BatchDeletedFromAzureTable((ulong)batchOperation.Count);
        }
Example #6
0
        private void QueryAndDeleteEntityBatch(CloudStorageAccount storageAccount, CloudTableClient tableServiceContext, DateTime cutoffTime, ref TableContinuationToken continuationToken)
        {
            TableContinuationToken initialContinuationToken = continuationToken;

            continuationToken = null;
            this.traceSource.WriteInfo(this.logSourceId, "Deleting table entries older than {0}.", cutoffTime);

            // Create a query object
            TableQuery <TableEntity> query = this.queryCreationMethod(cutoffTime);

            TableQuerySegment <TableEntity> resultSegment;

            try
            {
                this.perfHelper.ExternalOperationBegin(
                    ExternalOperationTime.ExternalOperationType.TableQuery,
                    0);

                // Execute the query
#if DotNetCoreClr
                resultSegment = tableServiceContext.GetTableReference(this.tableName).ExecuteQuerySegmentedAsync(query, initialContinuationToken).Result;
#else
                resultSegment = tableServiceContext.GetTableReference(this.tableName).ExecuteQuerySegmented(query, initialContinuationToken);
#endif

                this.perfHelper.ExternalOperationEnd(
                    ExternalOperationTime.ExternalOperationType.TableQuery,
                    0);
            }
            catch (Exception e)
            {
                AzureTableCommon.TableServiceExceptionAction action = AzureTableCommon.ProcessTableServiceQueryException(
                    this.traceSource,
                    this.logSourceId,
                    e,
                    AzureTableCommon.TableServiceAction.QueryEntitiesForDeletion);
                // Give up on this batch
                Debug.Assert(AzureTableCommon.TableServiceExceptionAction.Abort == action);
                return;
            }

            continuationToken = resultSegment.ContinuationToken;
            if (!resultSegment.Results.Any())
            {
                // Query did not give us any entities to delete. Nothing more to
                // be done here.
                return;
            }
            this.perfHelper.BatchQueriedFromAzureTable((ulong)resultSegment.Results.Count());

            // Create a table client
            CloudTableClient tableClient = AzureTableCommon.CreateNewTableClient(storageAccount);

            // Walk through the query results
            var    entitiesToDelete = new List <TableEntity>();
            string partitionKey     = String.Empty;
            foreach (var entity in resultSegment.Results)
            {
                // Azure table service requires all entities in a batched transaction to
                // have the same parition key. Hence if the partition key for this entity
                // is not the same as that of the entities we already added to the batch,
                // then delete those entities first and add this one to a new batch.
                if ((false == String.IsNullOrEmpty(partitionKey)) &&
                    (false == entity.PartitionKey.Equals(partitionKey)))
                {
                    DeleteEntityBatch(tableClient, entitiesToDelete);

                    // Clear for the next batch
                    entitiesToDelete.Clear();
                }

                // Record the partition key for the current batch
                partitionKey = entity.PartitionKey;

                entitiesToDelete.Add(entity);

                // If we have reached that maximum entity count for a batch, then
                // delete those entities.
                if (entitiesToDelete.Count == AzureTableCommon.MaxEntitiesInBatch)
                {
                    DeleteEntityBatch(tableClient, entitiesToDelete);

                    // Clear for the next batch
                    entitiesToDelete.Clear();
                }
            }

            if (entitiesToDelete.Any())
            {
                DeleteEntityBatch(tableClient, entitiesToDelete);
            }
        }
Example #7
0
        private bool CommitBatchedChangesBegin()
        {
            Debug.Assert(null == this.handles[this.contextIndex]);
            Debug.Assert(null == this.asyncResult[this.contextIndex]);

            if (this.stopping)
            {
                // The consumer is being stopped. As far as possible, we don't want
                // the saving of batched changes to be interrupted midway due to
                // any any timeout that may be enforced on consumer stop. Therefore,
                // let's not save the batched entities at this time.
                //
                // Note that if a consumer stop is initiated while buffered event
                // file is being processed, that file gets processed again when the
                // consumer is restarted. So we should get another opportunity to
                // upload events from this buffered event file when we are restarted.
                this.traceSource.WriteInfo(
                    this.logSourceId,
                    "The consumer is being stopped. Therefore, filtered traces will not be sent for upload to table storage.");
                return(false);
            }

            // Save batched changes
            bool success = true;

            try
            {
                this.perfHelper.ExternalOperationBegin(
                    ExternalOperationTime.ExternalOperationType.TableUpload,
                    this.contextIndex);
                var batchOperation = new TableBatchOperation();
                foreach (var entityToSave in this.unsavedEntities[this.contextIndex])
                {
                    batchOperation.Add(TableOperation.Insert(entityToSave));
                }

                this.asyncResult[this.contextIndex] = this.tableClients[this.contextIndex]
                                                      .GetTableReference(this.tableUploadSettings.TableName)
                                                      .BeginExecuteBatch(batchOperation, state => { }, null);
            }
            catch (Exception e)
            {
                success = false;

                try
                {
                    AzureTableCommon.ProcessTableServiceRequestException(
                        this.traceSource,
                        this.logSourceId,
                        e,
                        AzureTableCommon.TableServiceAction.SaveEntityBatchBegin);
                }
                catch (MaxRetriesException)
                {
                    // A failure that we are designed to handle did not get resolved
                    // even after the maximum number of retries. Let's abort this
                    // pass and retry on the next pass.
                    this.bufferedEventProvider.AbortEtwEventDelivery();
                }
            }

            if (false == success)
            {
                // Clear out all entities from the table service context, so that it
                // is ready for reuse.
                ClearTableServiceContext(this.contextIndex);

                Debug.Assert(null == this.handles[this.contextIndex]);
                Debug.Assert(null == this.asyncResult[this.contextIndex]);
            }
            else
            {
                // Save the handle we need to wait on in order to know when this
                // batched operation completes.
                this.handles[this.contextIndex] = this.asyncResult[this.contextIndex].AsyncWaitHandle;
            }

            return(success);
        }