/// <summary> /// Does a stream already exist for this event stream identity /// </summary> /// <remarks> /// We use the existence of the stream footer record as proof of stream existence /// </remarks> protected async internal Task <bool> StreamAlreadyExists() { TableEntityKeyRecord streamFooter = null; TableOperation getKeyRecord = TableOperation.Retrieve <TableEntityKeyRecord>(this.InstanceKey, SequenceNumberAsString(0)); await Table.CreateIfNotExistsAsync(); TableResult getFooter = await Table.ExecuteAsync( getKeyRecord); if (getFooter != null) { if (getFooter.Result != null) { streamFooter = (TableEntityKeyRecord)getFooter.Result; } } if (null != streamFooter) { return(true); } return(false); }
/// <summary> /// Does a stream already exist for this event stream identity /// </summary> /// <remarks> /// We use the existence of the stream footer record as proof of stream existence /// </remarks> protected internal bool StreamAlreadyExists() { TableEntityKeyRecord streamFooter = null; streamFooter = (TableEntityKeyRecord)Table.Execute( TableOperation.Retrieve <TableEntityKeyRecord>(this.InstanceKey, SequenceNumberAsString(0)), requestOptions: null, operationContext: GetDefaultOperationContext()).Result; if (null != streamFooter) { return(true); } return(false); }
/// <summary> /// Increment the sequence number for this event stream and return the new number /// </summary> /// <remarks> /// This is done before the event itself is written so that a partial failure leaves a gap in the event stream which is /// less harmful than an overwritten event record /// </remarks> private async Task <int> IncrementSequenceNumber() { bool recordUpdated = false; int tries = 0; TableEntityKeyRecord streamFooter = null; while (!recordUpdated) { tries += 1; // read in the a [TableEntityKeyRecord] streamFooter = (TableEntityKeyRecord)Table.Execute( TableOperation.Retrieve <TableEntityKeyRecord>(this.InstanceKey, SequenceNumberAsString(0)), operationContext: GetDefaultOperationContext()).Result; if (null == streamFooter) { streamFooter = new TableEntityKeyRecord(this); } streamFooter.LastSequence += 1; try { TableResult tres = null; if (string.IsNullOrWhiteSpace(streamFooter.ETag)) { tres = await Table.ExecuteAsync(TableOperation.Insert(streamFooter)); } else { tres = await Table.ExecuteAsync(TableOperation.Replace(streamFooter)); } if (tres.HttpStatusCode == 204) { recordUpdated = true; } } catch (Microsoft.Azure.Cosmos.Table.StorageException sEx) { if (sEx.RequestInformation.HttpStatusCode == (int)HttpStatusCode.PreconditionFailed) { // Precondition Failed - could not update the footer due to a concurrency error recordUpdated = false; // Wait a random-ish amount of time int delayMilliseconds = 13 * new Random().Next(10, 100); await Task.Delay(delayMilliseconds); } else { throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to increment the stream sequence number due to storage error", source: "Table Event Stream Writer", innerException: sEx); } } if (tries > 500) { // catastrophic deadlock throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to increment the stream sequence number due to deadlock", source: "Table Event Stream Writer"); } } if (null != streamFooter) { if (streamFooter.Deleting) { // Do not allow a write to an event stream that is being deleted throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to write to this event stream as it is being deleted", source: "Table Event Stream Writer"); } return(streamFooter.LastSequence); } else { return(1); } }
/// <summary> /// Delete all the records in the table linked to this event stream /// </summary> public async Task DeleteStream() { // 1- mark the stream footer as "Deleting" bool recordUpdated = false; int tries = 0; TableEntityKeyRecord streamFooter = null; while (!recordUpdated) { tries += 1; // read in the a [TableEntityKeyRecord] streamFooter = (TableEntityKeyRecord)Table.Execute( TableOperation.Retrieve <TableEntityKeyRecord>(this.InstanceKey, SequenceNumberAsString(0)), operationContext: GetDefaultOperationContext()).Result; if (null == streamFooter) { streamFooter = new TableEntityKeyRecord(this); } streamFooter.Deleting = true; string lastETag = streamFooter.ETag; try { TableResult tres = await Table.ExecuteAsync(TableOperation.InsertOrReplace(streamFooter), null, new OperationContext { UserHeaders = new Dictionary <String, String> { { "If-Match", lastETag } } }); if (tres.HttpStatusCode == 204) { recordUpdated = true; } } catch (Microsoft.Azure.Cosmos.Table.StorageException sEx) { if (sEx.RequestInformation.HttpStatusCode == (int)HttpStatusCode.PreconditionFailed) { // Precondition Failed - could not update the footer due to a concurrency error recordUpdated = false; // Wait a random-ish amount of time int delayMilliseconds = 13 * new Random().Next(10, 100); System.Threading.Thread.Sleep(delayMilliseconds); } else { throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to set the Deleting flag stream sequence number due to storage error", source: "Table Event Stream Writer", innerException: sEx); } } if (tries > 500) { // catastrophic deadlock throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to set the Deleting flag number due to deadlock", source: "Table Event Stream Writer"); } } // 2- delete the actual stream records in reverse order if (Table != null) { // We need a continuation token as this is done in batches of 100... TableContinuationToken token = new TableContinuationToken(); TableQuery getEventsToDeleteQuery = DeleteRowsQuery().Take(MAX_BATCH_SIZE); do { var batches = new List <TableBatchOperation>(); int nextBatch = 0; // create the query to be executed.. var segment = Table.ExecuteQuerySegmented(getEventsToDeleteQuery, token, operationContext: GetDefaultOperationContext()); // update the continuation token to get the next chunk of records token = segment.ContinuationToken; int currentRow = 0; batches.Add(new TableBatchOperation()); foreach (DynamicTableEntity dteRow in segment) { if (currentRow % MAX_BATCH_SIZE == 0) { nextBatch += 1; batches.Add(new TableBatchOperation()); } batches[nextBatch].Add(TableOperation.Delete(dteRow)); currentRow += 1; } foreach (var batchExecute in batches) { if (batchExecute.Count > 0) { Table.ExecuteBatch(batchExecute); } } } while (null != token); } }