private void CommitBatchedChangesEnd(int index) { IList <TableResult> results = new TableResult[0]; try { // Get the result of the batched operation results = this.tableClients[this.contextIndex] .GetTableReference(this.tableUploadSettings.TableName) .EndExecuteBatch(this.asyncResult[this.contextIndex]); this.perfHelper.ExternalOperationEnd( ExternalOperationTime.ExternalOperationType.TableUpload, index); } catch (Exception e) { AzureTableCommon.TableServiceExceptionAction action; try { action = AzureTableCommon.ProcessTableServiceRequestException( this.traceSource, this.logSourceId, e, AzureTableCommon.TableServiceAction.SaveEntityBatchEnd); } catch (MaxRetriesException) { // A failure that we are designed to handle did not get resolved // even after the maximum number of retries. Let's abort this // pass and retry on the next pass. this.bufferedEventProvider.AbortEtwEventDelivery(); action = AzureTableCommon.TableServiceExceptionAction.Abort; } if (AzureTableCommon.TableServiceExceptionAction.Abort == action) { // Give up on this batch results = null; } else { Debug.Assert(AzureTableCommon.TableServiceExceptionAction.ProcessResponse == action); } } this.asyncResult[index] = null; // Verify the result if (results != null) { if (VerifyTableServiceResponse(results, index)) { this.perfHelper.BatchUploadedToAzureTable((ulong)this.unsavedEntities[index].Count); } } // Clear out all entities from the table service context, so that it // is ready for reuse. ClearTableServiceContext(index); }
private void DeleteEntityBatch(CloudTableClient client, List <TableEntity> entitiesToDelete) { var batchOperation = new TableBatchOperation(); foreach (var entity in entitiesToDelete) { batchOperation.Add(TableOperation.Delete(entity)); } // Save batched changes IList <TableResult> results = new TableResult[0]; try { this.perfHelper.ExternalOperationBegin( ExternalOperationTime.ExternalOperationType.TableDeletion, 0); #if DotNetCoreClr results = client.GetTableReference(this.tableName).ExecuteBatchAsync(batchOperation).Result; #else results = client.GetTableReference(this.tableName).ExecuteBatch(batchOperation); #endif this.perfHelper.ExternalOperationEnd( ExternalOperationTime.ExternalOperationType.TableDeletion, 0); } catch (Exception e) { AzureTableCommon.TableServiceExceptionAction action = AzureTableCommon.ProcessTableServiceRequestException( this.traceSource, this.logSourceId, e, AzureTableCommon.TableServiceAction.DeleteEntityBatch); if (AzureTableCommon.TableServiceExceptionAction.Abort == action) { // Give up on this batch return; } Debug.Assert(AzureTableCommon.TableServiceExceptionAction.ProcessResponse == action); } for (var idx = 0; idx < batchOperation.Count; idx++) { TableEntity entity = entitiesToDelete[idx]; var result = results[idx]; if (Utility.IsNetworkError(result.HttpStatusCode)) { // We encountered a network error that wasn't resolved even // after retries. this.traceSource.WriteError( this.logSourceId, "Error {0} encountered when attempting to delete an entity from table storage. Entity information: {1},{2},{3}.", results[idx], entity.PartitionKey, entity.RowKey, entity.Timestamp); throw new MaxRetriesException(); } if (AzureTableCommon.HttpCodeResourceNotFound == result.HttpStatusCode) { continue; } if (result.HttpStatusCode < AzureTableCommon.HttpSuccessCodeMin || result.HttpStatusCode > AzureTableCommon.HttpSuccessCodeMax) { this.traceSource.WriteError( this.logSourceId, "FabricDCA encountered an error when attempting to delete an entity from table service. Error code: {0}. Entity information: {1},{2},{3}.", result.HttpStatusCode, entity.PartitionKey, entity.RowKey, entity.Timestamp); return; } } this.perfHelper.BatchDeletedFromAzureTable((ulong)batchOperation.Count); }
private bool CommitBatchedChangesBegin() { Debug.Assert(null == this.handles[this.contextIndex]); Debug.Assert(null == this.asyncResult[this.contextIndex]); if (this.stopping) { // The consumer is being stopped. As far as possible, we don't want // the saving of batched changes to be interrupted midway due to // any any timeout that may be enforced on consumer stop. Therefore, // let's not save the batched entities at this time. // // Note that if a consumer stop is initiated while buffered event // file is being processed, that file gets processed again when the // consumer is restarted. So we should get another opportunity to // upload events from this buffered event file when we are restarted. this.traceSource.WriteInfo( this.logSourceId, "The consumer is being stopped. Therefore, filtered traces will not be sent for upload to table storage."); return(false); } // Save batched changes bool success = true; try { this.perfHelper.ExternalOperationBegin( ExternalOperationTime.ExternalOperationType.TableUpload, this.contextIndex); var batchOperation = new TableBatchOperation(); foreach (var entityToSave in this.unsavedEntities[this.contextIndex]) { batchOperation.Add(TableOperation.Insert(entityToSave)); } this.asyncResult[this.contextIndex] = this.tableClients[this.contextIndex] .GetTableReference(this.tableUploadSettings.TableName) .BeginExecuteBatch(batchOperation, state => { }, null); } catch (Exception e) { success = false; try { AzureTableCommon.ProcessTableServiceRequestException( this.traceSource, this.logSourceId, e, AzureTableCommon.TableServiceAction.SaveEntityBatchBegin); } catch (MaxRetriesException) { // A failure that we are designed to handle did not get resolved // even after the maximum number of retries. Let's abort this // pass and retry on the next pass. this.bufferedEventProvider.AbortEtwEventDelivery(); } } if (false == success) { // Clear out all entities from the table service context, so that it // is ready for reuse. ClearTableServiceContext(this.contextIndex); Debug.Assert(null == this.handles[this.contextIndex]); Debug.Assert(null == this.asyncResult[this.contextIndex]); } else { // Save the handle we need to wait on in order to know when this // batched operation completes. this.handles[this.contextIndex] = this.asyncResult[this.contextIndex].AsyncWaitHandle; } return(success); }