public async Task Add(TableOperation operation) { batchOperation.Add(operation); if (operation.Entity == key) { batchContainsKey = true; } if (batchOperation.Count == AzureTableConstants.MaxBatchSize - (batchContainsKey ? 0 : 1)) { // the key serves as a synchronizer, to prevent modification by multiple grains under edge conditions, // like duplicate activations or deployments.Every batch write needs to include the key, // even if the key values don't change. if (!batchContainsKey) { if (string.IsNullOrEmpty(key.ETag)) { batchOperation.Insert(key); } else { batchOperation.Replace(key); } } await Flush().ConfigureAwait(false); batchOperation.Clear(); batchContainsKey = false; } }
public async Task UpsertEntities <T>(string tableName, List <T> data, int batchSize = 100) where T : class, ITableItem { try { var table = CloudTableClient.GetTableReference(tableName); var batchOperation = new TableBatchOperation(); batchSize = batchSize > 100 ? 100 : batchSize; var lastUsedPartitionKey = string.Empty; for (int i = 0; i < data.Count; i++) { var entry = TableEntityConvert.ToTableEntity(data[i]); var newBatch = (lastUsedPartitionKey != entry.PartitionKey && lastUsedPartitionKey.Length > 0); if (newBatch) { await table.ExecuteBatchAsync(batchOperation); batchOperation.Clear(); batchOperation.InsertOrReplace(entry); } else { batchOperation.InsertOrReplace(entry); // Execute the batch operation if we reach our batch limit or its the end of the list. if (batchOperation.Count >= batchSize || i == data.Count - 1) { await table.ExecuteBatchAsync(batchOperation); batchOperation.Clear(); } } lastUsedPartitionKey = entry.PartitionKey; } if (batchOperation.Count > 0) { await table.ExecuteBatchAsync(batchOperation); batchOperation.Clear(); } } catch (StorageException st) when(st.RequestInformation.HttpStatusCode == 400) { Logger?.LogError($"Bad Request to table storage ({tableName}) - check modal properties are all set: {JsonConvert.SerializeObject(data)}"); throw new InvalidOperationException("Upsert data is badly formed and resulted in a 400 Bad Request response from TableStorage"); } catch (Exception e) { Logger?.LogError(e, $"Error {e.Message} occurred upserting multiple, table name: {tableName} with generic data"); throw; } }
static async Task HideTimeoutsOfCurrentSegment(CloudTable endpointTimeoutTable, IEnumerable <TimeoutDataEntity> timeoutsOfSegment, string uniqueHiddenEndpointName) { // with batching we can at least efficiently insert long hideByPartitionKeyBatchSize = 0; var hideByPartitionBatch = new TableBatchOperation(); var hideTasks = new List <Task>(); foreach (var entitiesInTheSamePartition in timeoutsOfSegment.GroupBy(x => x.PartitionKey)) { foreach (var timeoutDataEntity in entitiesInTheSamePartition) { // entries with Guid as partition key should never be modified. We are only interested in the query entities if (Guid.TryParse(timeoutDataEntity.PartitionKey.AsSpan(), out _)) { continue; } // we don't want to preserve the etag and we always want to win timeoutDataEntity.ETag = "*"; // Fix the hiding part timeoutDataEntity.OwningTimeoutManager = uniqueHiddenEndpointName; var entitySize = timeoutDataEntity.CalculateSize(); // the batch can have max 100 items and max 4 MB of data // the partition key for all operations in the batch has to be the same if (hideByPartitionKeyBatchSize + entitySize > MaxPayloadPerBatchOperation || hideByPartitionBatch.Count == MaxOperationsPerBatchOperation) { hideTasks.Add(endpointTimeoutTable.ExecuteBatchAsync(hideByPartitionBatch.Clone())); hideByPartitionKeyBatchSize = 0; hideByPartitionBatch.Clear(); } hideByPartitionKeyBatchSize += entitySize; var tableOperation = TableOperation.Merge(timeoutDataEntity); SetEchoContentTo(tableOperation, false); hideByPartitionBatch.Add(tableOperation); } if (hideByPartitionBatch.Count > 0) { hideTasks.Add(endpointTimeoutTable.ExecuteBatchAsync(hideByPartitionBatch.Clone())); } hideByPartitionBatch.Clear(); hideByPartitionKeyBatchSize = 0; } if (hideTasks.Count > 0) { await Task.WhenAll(hideTasks); } }
/// <summary> /// Inserts a collection of <see cref="TableEntity"/> values into a table using batch style /// operations. All entities must be insertable via batch operations. /// </summary> public static async Task InsertBatch <T>(CloudTable table, List <T> entityList) where T : ITableEntity { if (entityList.Count == 0) { return; } var operation = new TableBatchOperation(); foreach (var entity in entityList) { // Important to use InsertOrReplace here. It's possible for a populate job to be cut off in the // middle when the BuildFailure table is updated but not yet the BuildProcessed table. Hence // we'll up here again doing a batch insert. operation.InsertOrReplace(entity); if (operation.Count == MaxBatchCount) { await table.ExecuteBatchAsync(operation); operation.Clear(); } } if (operation.Count > 0) { await table.ExecuteBatchAsync(operation); } }
/// <summary> /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> protected async override void Write(IList <AsyncLogEventInfo> logEvents) { //must sort into containers and then into the blobs for the container if (_getTableNameDelegate == null) { _getTableNameDelegate = c => TableName.Render(c.LogEvent); } var tableBuckets = SortHelpers.BucketSort(logEvents, _getTableNameDelegate); //Iterate over all the tables being written to foreach (var tableBucket in tableBuckets) { var tableNameFinal = CheckAndRepairTableNamingRules(tableBucket.Key); InitializeTable(tableNameFinal); var batch = new TableBatchOperation(); //add each message for the destination table limit batch to 100 elements foreach (var asyncLogEventInfo in tableBucket.Value) { var entity = new NLogEntity(asyncLogEventInfo.LogEvent, Layout); batch.Insert(entity); if (batch.Count == 100) { await _table.ExecuteBatchAsync(batch); batch.Clear(); } } if (batch.Count > 0) { await _table.ExecuteBatchAsync(batch); } } }
public async Task ProcessBatchAsync(IReadOnlyList <Contact> batch, CancellationToken token) { var tableClient = AzureStorageTable.GetAzTableClient(); var azureTableBatchOperation = new TableBatchOperation(); var portionSize = 70; var portionCounter = 0; // Selected data is alrteady in batch collection foreach (var contact in batch) { if (portionCounter >= portionSize) { await tableClient.ExecuteBatchAsync(azureTableBatchOperation); azureTableBatchOperation.Clear(); portionCounter = 0; } var contactEmails = contact.GetFacet <EmailAddressList>(); var contactPersonalInformation = contact.GetFacet <PersonalInformation>(); azureTableBatchOperation.InsertOrReplace(new AzureStorage.Model.ContactEntity ( contactEmails == null ? "N/A" : contactEmails.PreferredEmail.SmtpAddress, contact.Id, contactPersonalInformation == null ? "N/A" : contactPersonalInformation.FirstName, contactPersonalInformation == null ? "N/A" : contactPersonalInformation.LastName )); portionCounter++; } await tableClient.ExecuteBatchAsync(azureTableBatchOperation); await Task.FromResult(1); }
public static async Task <IActionResult> catWordBulk( [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest req, //Offenbar bekommt die Funktion automatisch ein korrekt eingerichtetes Objekt für die Tabelle mitgegeben [Table("categorization")] CloudTable tabCats, ILogger log) { List <bulkW2CData> allC2W; //Bulk data - array of objects with name, langu, desc as attributes string requestBody = await new StreamReader(req.Body).ReadToEndAsync(); allC2W = JsonConvert.DeserializeObject <List <bulkW2CData> >(requestBody); string dummy; int bCount = 0; foreach (bulkW2CData oneC2W in allC2W) { bCount++; await intCatWord(tabCats, "", "", oneC2W.n, oneC2W.k, true); if (bCount == 99) { dummy = tabCats.ExecuteBatchAsync(bulkTO).Result.ToString();; dummy = tabCats.ExecuteBatchAsync(bulkTO2).Result.ToString();; bCount = 0; bulkTO.Clear(); bulkTO2.Clear(); } } dummy = tabCats.ExecuteBatchAsync(bulkTO).Result.ToString();; dummy = tabCats.ExecuteBatchAsync(bulkTO2).Result.ToString();; return(new OkObjectResult("")); }
/// <summary> /// Batch Insert Operation for optimization purposes, /// </summary> /// <param name="tName">Name of the table</param> /// <param name="list">List of Object</param> /// <returns></returns> private async Task BatchInsertViaTableName(string tName, List <WebsitePage> list) { CloudTable table = tableClient.GetTableReference(tName); await table.CreateIfNotExistsAsync(); // create if not exist var pages = list.Where(page => page.Domain.Equals(tName)); // get all websitepage objects with similar tablename (domain) var partitionkeys = pages.Select(page => page.PartitionKey).Distinct(); // get all distinct partitionkeys, from the list (keywords) distinct TableBatchOperation batchInsertOperation = new TableBatchOperation(); // create a table batch operation object foreach (string key in partitionkeys) // iterate partitionkeys { Dictionary <string, string> rowkeys = new Dictionary <string, string>(); // iterate through websitepage object lists with similar partition keys foreach (WebsitePage page in pages.Where(p => p.PartitionKey.Equals(key))) { if (!rowkeys.ContainsKey(page.RowKey)) // check duplicate items, { batchInsertOperation.InsertOrMerge(page); rowkeys.Add(page.RowKey, page.RowKey); } } await table.ExecuteBatchAsync(batchInsertOperation); batchInsertOperation.Clear(); } }
/// <summary> /// Executes batch add or delete operation on Azure table storage. /// </summary> /// <param name="batchOperation">Batch operation to be performed.</param> /// <param name="rooms">List of rooms.</param> /// <returns>Returns true if batch operation is successful else throws exception for error.</returns> private async Task <bool> ExecuteBatchOperationAsync(BatchOperation batchOperation, IList <UserFavoriteRoomEntity> rooms) { var tableBatchOperation = new TableBatchOperation(); try { int batchCount = (int)Math.Ceiling((double)rooms.Count / RoomsPerBatch); for (int i = 0; i < batchCount; i++) { tableBatchOperation.Clear(); var roomsBatch = rooms.Skip(i * RoomsPerBatch).Take(RoomsPerBatch); foreach (var room in roomsBatch) { tableBatchOperation.Add(batchOperation == BatchOperation.Insert ? TableOperation.Insert(room) : TableOperation.Delete(room)); } if (tableBatchOperation.Count > 0) { await this.cloudTable.ExecuteBatchAsync(tableBatchOperation).ConfigureAwait(false); } } return(true); } catch (Exception ex) { this.telemetryClient.TrackException(ex); throw; } }
public static async Task <IList <TableResult> > InsertOrReplaceBatchAsync <T>(this CloudTable table, IEnumerable <T> entities) where T : ITableEntity { if (entities == null) { return(null); } var result = new List <TableResult>(); var iterCount = 0; var entitiesList = entities as IList <T> ?? entities.ToList(); var entitiesBatch = entitiesList.Skip(iterCount * 100).Take(100).ToList(); var batchOperation = new TableBatchOperation(); do { foreach (var entity in entitiesBatch) { batchOperation.InsertOrReplace(entity); } result.AddRange(await table.ExecuteBatchAsync(batchOperation)); iterCount++; entitiesBatch = entitiesList.Skip(iterCount * 100).Take(100).ToList(); batchOperation.Clear(); } while (entitiesBatch.Any()); return(result); }
public static async Task <IList <TableResult> > DeleteExistingEntitiesBatchAsync <T>(this CloudTable table, IEnumerable <T> entities) where T : ITableEntity, new() { if (entities == null || !entities.Any()) { return(new List <TableResult>()); } var foundEntities = entities.GroupBy(e => e.PartitionKey); var result = new List <TableResult>(); foreach (var entitiesByPK in foundEntities) { var iterCount = 0; var entitiesBatch = entitiesByPK.Skip(iterCount * 100).Take(100).ToList(); var batchOperation = new TableBatchOperation(); while (entitiesBatch.Any()) { foreach (var entity in entitiesBatch) { batchOperation.Delete(entity); } result.AddRange(await table.ExecuteBatchAsync(batchOperation)); iterCount++; entitiesBatch = entitiesByPK.Skip(iterCount * 100).Take(100).ToList(); batchOperation.Clear(); } } return(result); }
public static async Task Remove <TEntity>(this CloudTable table, IList <TEntity> entities) where TEntity : ITableEntity, new() { if (entities == null) { throw new ArgumentNullException(nameof(entities)); } if (!entities.Any()) { return; } var batch = new TableBatchOperation(); foreach (var entity in entities) { batch.Add(TableOperation.Delete(entity)); // Table batch can have up to 100 operations. if (batch.Count == 100) { await table.ExecuteBatchAsync(batch); batch.Clear(); } } if (batch.Any()) { await table.ExecuteBatchAsync(batch); } }
private static TimeSpan InsertEntitiesInBatch(List <DynamicTableEntity> entities, CloudTable table) { TableBatchOperation batchOp = new TableBatchOperation(); Stopwatch sw = new Stopwatch(); sw.Start(); foreach (DynamicTableEntity entity in entities) { batchOp.Add(TableOperation.Insert(entity)); if (batchOp.Count > 99) { table.ExecuteBatch(batchOp); batchOp.Clear(); } } if (batchOp.Count > 0) { table.ExecuteBatch(batchOp); } sw.Stop(); return(sw.Elapsed); }
async Task <List <TableBatchResult> > InsertBatchEntiy(CloudTable table, List <AzurePointTableRow> batchItems) { TableBatchOperation batch = new TableBatchOperation(); List <TableBatchResult> results = new List <TableBatchResult>(); if (batchItems == null || batchItems.Count == 0) { throw new ApplicationException("Missing data"); } // Seperate out partionkey string partionKey = batchItems[0].PartitionKey; for (int c = 0; c < batchItems.Count; c++) { if (batchItems[c].PartitionKey != partionKey) { // New partion, send batch in order to isolate partionKey in each batch results.Add(await table.ExecuteBatchAsync(batch)); partionKey = batchItems[c].PartitionKey; batch.Clear(); } batch.Insert(batchItems[c]); } results.Add(await table.ExecuteBatchAsync(batch)); return(results); }
/// <summary> /// Delete a collection of <see cref="TableEntity"/> values into a table using batch style /// operations. All entities must be insertable via batch operations. /// </summary> public static async Task DeleteBatch <T>(CloudTable table, List <T> entityList) where T : ITableEntity { if (entityList.Count == 0) { return; } var operation = new TableBatchOperation(); foreach (var entity in entityList) { operation.Delete(entity); if (operation.Count == MaxBatchCount) { await table.ExecuteBatchAsync(operation); operation.Clear(); } } if (operation.Count > 0) { await table.ExecuteBatchAsync(operation); } }
private async Task CommitAsync(TableBatchOperation batch) { TryLog(StructureName + " Batch count: " + batch.Count); if (batch.Count == 0) { return; } else { try { await _table.ExecuteBatchAsync(batch); TryLog("Batch executed in " + StructureName); } catch (StorageException e) { throw new InvalidOperationException($"Storage Exception ({e.RequestInformation.HttpStatusCode})\n{e.Message}\n{e.RequestInformation.HttpStatusMessage}"); } } batch.Clear(); }
public async Task RunGarbageCollectionInXTableAsync(CancellationToken cancellationToken) { var tsTable = _xTableClient.GetTableReference(_config.Value.AzStorageTable); var notBeforeTime = DateTime.UtcNow.Subtract(TimeSpan.FromDays(_config.Value.RetentionDays)); var contToken = default(TableContinuationToken); var gcQuery = new TableQuery <ContribSampleEntity>().Where( TableQuery.GenerateFilterConditionForDate( "MetricTimeStampUtc", QueryComparisons.LessThan, notBeforeTime )); _logger.LogInformation("TS Garbage Collection started"); do { var result = await tsTable.ExecuteQuerySegmentedAsync(gcQuery, contToken, cancellationToken); contToken = result.ContinuationToken; _logger.LogInformation("TS GC retrieved a batch"); if (result.Results != null) { var partitionGroup = result.Results.GroupBy(i => i.PartitionKey); foreach (var p in partitionGroup) { try { var batch = new TableBatchOperation(); foreach (var i in p) { batch.Add(TableOperation.Delete(i)); if (batch.Count % 50 == 0) { await tsTable.ExecuteBatchAsync(batch, cancellationToken); batch.Clear(); } } if (batch.Count > 0) { await tsTable.ExecuteBatchAsync(batch, cancellationToken); } _logger.LogInformation("TS GC deleted an item"); } catch (Exception exc) { _logger.LogInformation(exc, "TS GC failed to delete a batch but ignored"); } } _logger.LogInformation($"TS Garbage Collection deleted batch of {result.Results.Count} item(s)"); } }while (contToken != null && !cancellationToken.IsCancellationRequested); _logger.LogInformation("TS Garbage Collection completed"); }
private static void saveData(newsItems items, string tableName) { //return if no items to process if (items.NewsItems.Count == 0) { return; } CloudTable table = getTableStorage(tableName); // Create the table if it doesn't exist. table.CreateIfNotExists(); // Create the batch operation. TableBatchOperation batchOperation = new TableBatchOperation(); // Create the TableOperation object that inserts the customer entity. foreach (newsItem item in items.NewsItems) { batchOperation.InsertOrReplace(item); //update in batches of 100 if (batchOperation.Count >= 100) { table.ExecuteBatch(batchOperation); batchOperation.Clear(); } } // Execute the insert operation for anything left over if (batchOperation.Count > 0) { table.ExecuteBatch(batchOperation); } }
/// <summary> /// Inserts the update batch terrain data. /// </summary> /// <param name="tableName">Name of the table.</param> /// <param name="batchData">The batch data.</param> public void InsertUpdateBatchPMSEUnscheduledAdjustment(string tableName, List <TableOperation> batchData) { var cloudAccount = this.GetCloudStorageAccount(); var cloudTable = cloudAccount.CreateCloudTableClient().GetTableReference(tableName); cloudTable.CreateIfNotExists(); TableBatchOperation batchOperation = new TableBatchOperation(); for (int i = 0; i < batchData.Count; i += 100) { batchOperation.Clear(); for (int j = i; j < i + 100; j++) { if (j >= batchData.Count) { break; } batchOperation.Add(batchData[j]); } cloudTable.ExecuteBatch(batchOperation); } }
/// <summary> /// Mark the items as deleted in the active partition (this updates day-to-day users) /// </summary> /// <param name="activePartitionKey"></param> /// <param name="itemTable"></param> /// <param name="itemKeys"></param> private static async void DeleteInActivePartition(string activePartitionKey, CloudTable itemTable, List <ItemToRemove> itemKeys) { var batch = new TableBatchOperation(); foreach (var itemKey in itemKeys) { batch.Add(TableOperation.InsertOrReplace(new DeletedItemV1 { PartitionKey = activePartitionKey, RowKey = Guid.NewGuid().ToString(), ItemPartitionKey = itemKey.Partition, ItemRowKey = itemKey.Id })); if (batch.Count == 100) { await itemTable.ExecuteBatchAsync(batch); batch.Clear(); } } if (batch.Count > 0) { await itemTable.ExecuteBatchAsync(batch); } }
internal void InsertItem(string leakedAccount) { if (ValidateLine.IsMatch(leakedAccount)) { string[] entity = leakedAccount.Split(new char[] { '@' }, StringSplitOptions.RemoveEmptyEntries); string domain = entity[1]; if (!OperationDirectonary.ContainsKey(domain)) { OperationDirectonary.Add(entity[1], new TableBatchOperation()); } TableBatchOperation operation = OperationDirectonary[domain]; operation.InsertOrReplace(new LeakedAccountEntity(entity[1], entity[0]) { Source = SOURCE, LeakedDate = leakedDate }); tableItemsOnHold++; if (operation.Count >= ITEM_ON_HOLD_LIMIT) { try { EnsureInit(); Table.ExecuteBatch(operation); } catch (Exception ex) { TraceOperationError(ex, operation); } operation.Clear(); } } }
public async Task StoreCategoryLinks(CategoryGroup categoryGroup, string categoryName, IEnumerable <CategoryLinkChange> changes, CancellationToken cancellationToken) { Ensure.String.IsNotNullOrWhiteSpace(categoryName, nameof(categoryName)); Ensure.Any.IsNotNull(changes, nameof(changes)); var table = GetTable(TableName); var batch = new TableBatchOperation(); foreach (var change in changes) { if (batch.Count == 100) { // Batches can only handle 100 items, need to execute this batch await ExecuteBatch(table, batch, cancellationToken).ConfigureAwait(false); batch.Clear(); } var operation = BuildLinkChangeTableOperation(categoryGroup, categoryName, change); batch.Add(operation); } if (batch.Count == 0) { // We were provided a changes instance but no changes to be made return; } await ExecuteBatch(table, batch, cancellationToken).ConfigureAwait(false); }
private static int Delete(string partitionKey, CloudTable itemTable, List <string> itemKeys) { int numOperations = 0; var batch = new TableBatchOperation(); foreach (var itemKey in itemKeys) { var entity = new DynamicTableEntity(partitionKey, itemKey); entity.ETag = "*"; entity.Properties.Add("IsActive", new EntityProperty(false)); batch.Add(TableOperation.Merge(entity)); if (batch.Count == 100) { itemTable.ExecuteBatch(batch); batch.Clear(); numOperations++; } } if (batch.Count > 0) { itemTable.ExecuteBatch(batch); numOperations++; } return(numOperations); }
private static int Insert(string partitionKey, CloudTable itemTable, IEnumerable <Item> items) { int numOperations = 0; var batch = new TableBatchOperation(); foreach (var item in items) { batch.Add(TableOperation.Insert(item)); if (batch.Count == 100) { itemTable.ExecuteBatch(batch); batch.Clear(); numOperations++; } } if (batch.Count > 0) { itemTable.ExecuteBatch(batch); numOperations++; } return(numOperations); }
public async Task PublishPendingEvents_does_not_fails_even_if_all_events_persisted() { // Arrange var userId = Guid.NewGuid(); var userCreated = fixture.Create <FakeUserCreated>(); var usernameChanged = fixture.Create <FakeUsernameChanged>(); var domainEvents = new DomainEvent[] { userCreated, usernameChanged }; RaiseEvents(userId, domainEvents); var envelopes = new List <Envelope>(domainEvents.Select(e => new Envelope(e))); var batchOperation = new TableBatchOperation(); envelopes .Select(e => PendingEventTableEntity.FromEnvelope <FakeUser>(e, serializer)) .ForEach(batchOperation.Insert); await s_eventTable.ExecuteBatchAsync(batchOperation); batchOperation.Clear(); envelopes .Select(e => EventTableEntity.FromEnvelope <FakeUser>(e, serializer)) .ForEach(batchOperation.Insert); await s_eventTable.ExecuteBatchAsync(batchOperation); // Act Func <Task> action = () => sut.PublishPendingEvents <FakeUser>(userId, CancellationToken.None); // Assert action.ShouldNotThrow(); }
/// <summary> /// Execute a DML operation /// </summary> /// <typeparam name="T">Class type of the business object</typeparam> /// <param name="batchOperation">TableBatchOperation to execute</param> public void ExecuteNonQuery <T>(TableBatchOperation batchOperation) where T : class { if (batchOperation.Count > 0) { TableBatchOperation batchPage = new TableBatchOperation(); // all entities in a batch must have the same partition key: foreach (IEnumerable <TableOperation> operations in batchOperation.GroupBy(o => o.Entity.PartitionKey)) { // order elements in a partition by row key so that we reduce tablescans foreach (TableOperation operation in operations.OrderBy(o => o.Entity.RowKey)) { batchPage.Add(operation); if (batchPage.Count == 100) { _currentTableReference.ExecuteBatch(batchPage); batchPage.Clear(); } } } // get the remaining if (batchPage.Count > 0) { _currentTableReference.ExecuteBatch(batchPage); } } }
public async Task Preparing_with_large_entities_Sets_The_Number_Of_Batches_Correctly() { // Arrange var endpointName = nameof(Preparing_with_large_entities_Sets_The_Number_Of_Batches_Correctly); var timeoutsSource = new AspTimeoutsSource(connectionString, 1024, containerName, fakeEndpointName, fakeEndpointTimeoutTableName, tablePrefix: tableNamePrefix); var runParameters = new Dictionary <string, string> { { "Test", "TestValue" } }; var cutOffDate = DateTime.UtcNow; var endpointTimeoutTableName = tableClient.GetTableReference($"{tableNamePrefix}{fakeEndpointTimeoutTableName}"); await endpointTimeoutTableName.CreateIfNotExistsAsync(); // the entity will roughly be 98 KB and we will store 50 of those which makes the actual payload be around 5 MB string destination = new string('a', 32 * 1024); string stateAddress = new string('s', 32 * 1024); string headers = new string('h', 32 * 1024); var batch = new TableBatchOperation(); for (var x = 0; x < 50; x++) { var dateTime = cutOffDate.AddDays(2); var entity = new TimeoutDataEntity(dateTime.ToString(AspConstants.PartitionKeyScope), Guid.NewGuid().ToString()) { OwningTimeoutManager = endpointName, Destination = destination, SagaId = Guid.NewGuid(), StateAddress = stateAddress, Time = dateTime, Headers = headers, }; batch.Add(TableOperation.Insert(entity)); if (batch.Count % 25 == 0) { await endpointTimeoutTableName.ExecuteBatchAsync(batch); batch.Clear(); } } if (batch.Count > 0) { await endpointTimeoutTableName.ExecuteBatchAsync(batch); } // Act var currentMigration = await timeoutsSource.Prepare(cutOffDate, endpointName, runParameters); // Assert Assert.IsNotNull(currentMigration); Assert.AreEqual(endpointName, currentMigration.EndpointName); Assert.AreEqual(runParameters, currentMigration.RunParameters); Assert.AreEqual(1, currentMigration.NumberOfBatches); Assert.AreEqual(MigrationStatus.StoragePrepared, currentMigration.Status); }
//private static void WriteToTarget (CloudTable cloudTable, // IEnumerable<DynamicTableEntity> response) // { // var writer = new TableStorageWriter (cloudTable.Name); // foreach (var entity in response) // { // writer.InsertOrReplace (entity); // } // writer.Execute (); // } private static void WriteToTarget ( CloudTable cloudTable, IEnumerable <DynamicTableEntity> response ) { try { DynamicTableEntity[] dynamicTableEntities = response.ToArray(); if (response.Count() < 1) { return; } int remainingCount = response.Count(); // Create the batch operation. TableBatchOperation batchOperation = new TableBatchOperation(); foreach (var entitiesWithSameKey in dynamicTableEntities.GroupBy(x => x.PartitionKey)) { int i = 1; foreach (DynamicTableEntity nextEntity in entitiesWithSameKey) { batchOperation.InsertOrReplace(nextEntity); if (i % 50 == 0) { cloudTable.ExecuteBatch(batchOperation); batchOperation.Clear(); } i++; } if (batchOperation != null && batchOperation.Count > 0) { cloudTable.ExecuteBatch(batchOperation); batchOperation.Clear(); } } } catch (Exception ex) { m_pctCompleteDelegate(ex.Message, true); } }
private static async Task CheckAndInitializeTable <T>(CloudTableClient tableClient, string funcAppDirectory, string seedFilename, ILogger log) where T : EntityBase, new() { var tableName = typeof(T).Name.ToLower(); CloudTable table = tableClient.GetTableReference(tableName); var exists = await table.ExistsAsync(); if (exists) { return; } await table.CreateAsync(); var rowNumber = 0; var batchOperation = new TableBatchOperation(); var seedPath = Path.Combine(funcAppDirectory, Constants.Data.Directory, seedFilename); var seedJson = File.ReadAllText(seedPath); var seedEntries = JsonConvert.DeserializeObject <ICollection <T> >(seedJson); foreach (var entry in seedEntries) { batchOperation.Add(TableOperation.InsertOrReplace(entry)); if (batchOperation.Count > 99) { table.ExecuteBatch(batchOperation); batchOperation.Clear(); } rowNumber++; } if (batchOperation.Count > 0) { table.ExecuteBatch(batchOperation); batchOperation.Clear(); } log.LogInformation($"{seedEntries.Count} {tableName}s initially seeded"); }
public async Task PublishAllEvents_sends_pending_events() { // Arrange var domainEvents = new List <DomainEvent>(); List <Guid> users = fixture.CreateMany <Guid>().ToList(); foreach (Guid userId in users) { var userCreated = fixture.Create <FakeUserCreated>(); var usernameChanged = fixture.Create <FakeUsernameChanged>(); var events = new DomainEvent[] { userCreated, usernameChanged }; RaiseEvents(userId, events); var envelopes = new List <Envelope>(events.Select(e => new Envelope(e))); var batchOperation = new TableBatchOperation(); envelopes .Select(e => PendingEventTableEntity.FromEnvelope <FakeUser>(e, serializer)) .ForEach(batchOperation.Insert); await s_eventTable.ExecuteBatchAsync(batchOperation); batchOperation.Clear(); envelopes .Select(e => EventTableEntity.FromEnvelope <FakeUser>(e, serializer)) .ForEach(batchOperation.Insert); await s_eventTable.ExecuteBatchAsync(batchOperation); domainEvents.AddRange(events); } var messages = new List <IDomainEvent>(); Mock.Get(messageBus) .Setup( x => x.SendBatch( It.IsAny <IEnumerable <Envelope> >(), It.IsAny <CancellationToken>())) .Callback <IEnumerable <Envelope>, CancellationToken>( (batch, cancellationToken) => messages.AddRange(batch .Select(b => b.Message) .OfType <IDomainEvent>() .Where(m => users.Contains(m.SourceId)))) .Returns(Task.FromResult(true)); // Act await sut.PublishAllEvents(CancellationToken.None); // Assert messages.Should().OnlyContain(e => e is IDomainEvent); messages.ShouldAllBeEquivalentTo(domainEvents); }
private void CalculateMADataToAzure(CloudTable table, string azureTableStockCode, int MA) { DateTime startingDate = DateTime.FromFileTimeUtc(0); TableOperation retrieveStockEntityStatus = TableOperation.Retrieve<StockEntityStatus>("status-" + azureTableStockCode, "status"); var stockEntityStatus = (StockEntityStatus)table.Execute(retrieveStockEntityStatus).Result; if (stockEntityStatus != null) { startingDate = stockEntityStatus.GetLatestMAStartDate(MA); Console.WriteLine("Latest starting date for MA{0} is on {1}", MA, startingDate.ToString("yyyy-MM-dd")); } string pkFilter = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, azureTableStockCode); string rkLowerFilter = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, startingDate.ToString("yyyy-MM-dd")); string combinedFilter = TableQuery.CombineFilters(pkFilter, TableOperators.And, rkLowerFilter); TableQuery<StockEntity> query = new TableQuery<StockEntity>().Where(combinedFilter); var sortedStockEntities = table.ExecuteQuery<StockEntity>(query).OrderBy(entity => entity.Date).ToList(); if (sortedStockEntities.LongCount() >= MA) { long totalCountToUpload = sortedStockEntities.LongCount(); long currentCountUploaded = 0; Queue<double> maData = new Queue<double>(); TableBatchOperation tableBatchOperation = new TableBatchOperation(); foreach (var stockEntity in sortedStockEntities) { maData.Enqueue(stockEntity.Close); if (maData.Count == MA) { double sum = 0; foreach (var data in maData) { sum += data; } stockEntity.SetMA(MA, sum / MA); tableBatchOperation.Add(TableOperation.InsertOrMerge(stockEntity)); maData.Dequeue(); } if (tableBatchOperation.Count == 100) { table.ExecuteBatch(tableBatchOperation); currentCountUploaded += 100; Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload); tableBatchOperation.Clear(); } } if (tableBatchOperation.Count > 0) { table.ExecuteBatch(tableBatchOperation); currentCountUploaded += tableBatchOperation.Count; Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload); } sortedStockEntities.Reverse(); if (sortedStockEntities == null) { stockEntityStatus = new StockEntityStatus(azureTableStockCode); } stockEntityStatus.SetLatestMAStartDate(MA, sortedStockEntities[MA - 2].Date); table.Execute(TableOperation.InsertOrMerge(stockEntityStatus)); } }
public void TableBatchAddQueryAndOneMoreOperationShouldThrow() { TableBatchOperation batch = new TableBatchOperation(); TableOperation operation = TableOperation.Retrieve<DynamicReplicatedTableEntity>("foo", "bar"); try { batch.Add(operation); Assert.IsTrue(batch.Contains(operation)); batch.Add(TableOperation.Insert(GenerateRandomEnitity("foo"))); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } batch.Clear(); Assert.IsFalse(batch.Contains(operation)); try { batch.Add(TableOperation.Insert(GenerateRandomEnitity("foo"))); batch.Add(TableOperation.Retrieve<DynamicReplicatedTableEntity>("foo", "bar")); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } batch.Clear(); try { batch.Add(TableOperation.Retrieve<DynamicReplicatedTableEntity>("foo", "bar")); batch.Insert(0, TableOperation.Insert(GenerateRandomEnitity("foo"))); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } try { batch.Insert(0, TableOperation.Insert(GenerateRandomEnitity("foo"))); batch.Insert(0, TableOperation.Retrieve<DynamicReplicatedTableEntity>("foo", "bar")); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } }
private void UploadStockDataToAzure(List<StockEntity> stockEntities, string azureTableStockCode) { Console.WriteLine("Uploading data to azure table..."); CloudStorageAccount storageAccount = CloudStorageAccount.Parse(storageConnectionString); CloudTable table = GetAzureTable(); DateTime startingDate = DateTime.FromFileTimeUtc(0); TableOperation retrieveStockEntityStatus = TableOperation.Retrieve<StockEntityStatus>("status-" + azureTableStockCode, "status"); var stockEntityStatus = (StockEntityStatus)table.Execute(retrieveStockEntityStatus).Result; if (stockEntityStatus != null) { Console.WriteLine("Latest data from azure table is on {0}", stockEntityStatus.LastestRawDataDate.ToString("yyyy-MM-dd")); startingDate = stockEntityStatus.LastestRawDataDate; } var stockEntitiesToUpload = stockEntities.Where(entity => entity.Date > startingDate); long totalCountToUpload = stockEntitiesToUpload.LongCount(); long currentCountUploaded = 0; DateTime lastestRawDataDate = DateTime.FromFileTimeUtc(0); TableBatchOperation tableBatchOperation = new TableBatchOperation(); foreach (var stockEntity in stockEntitiesToUpload) { if (stockEntity.Date >= lastestRawDataDate) lastestRawDataDate = stockEntity.Date; tableBatchOperation.Add(TableOperation.InsertOrMerge(stockEntity)); if (tableBatchOperation.Count == 100) { table.ExecuteBatch(tableBatchOperation); currentCountUploaded += 100; Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload); tableBatchOperation.Clear(); } } if (tableBatchOperation.Count > 0) { table.ExecuteBatch(tableBatchOperation); currentCountUploaded += tableBatchOperation.Count; Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload); } if (stockEntitiesToUpload.LongCount() > 0) { if (stockEntityStatus == null) { stockEntityStatus = new StockEntityStatus(azureTableStockCode); } stockEntityStatus.LastestRawDataDate = lastestRawDataDate; Console.WriteLine("Set latest raw data date to {0}", stockEntityStatus.LastestRawDataDate.ToString("yyyy-MM-dd")); table.Execute(TableOperation.InsertOrMerge(stockEntityStatus)); } }
public void TableBatchAddQueryAndOneMoreOperationShouldThrow() { TableBatchOperation batch = new TableBatchOperation(); try { batch.Add(TableOperation.Retrieve("foo", "bar")); batch.Add(TableOperation.Insert(GenerateRandomEntity("foo"))); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } batch.Clear(); try { batch.Add(TableOperation.Insert(GenerateRandomEntity("foo"))); batch.Add(TableOperation.Retrieve("foo", "bar")); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } batch.Clear(); try { batch.Add(TableOperation.Retrieve("foo", "bar")); batch.Insert(0, TableOperation.Insert(GenerateRandomEntity("foo"))); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } try { batch.Insert(0, TableOperation.Insert(GenerateRandomEntity("foo"))); batch.Insert(0, TableOperation.Retrieve("foo", "bar")); Assert.Fail(); } catch (ArgumentException) { // no op } catch (Exception) { Assert.Fail(); } }