public static void Insert(this CloudTable table, ITableEntity entity) { if (table == null) { throw new ArgumentNullException("table"); } var operation = table.CreateInsertOperation(entity); table.ExecuteAsync(operation, CancellationToken.None).GetAwaiter().GetResult(); }
public async Task AddAsync(T item, CancellationToken cancellationToken = default(CancellationToken)) { // Careful: // 1. even with upsert, all rowkeys within a batch must be unique. If they aren't, the previous items // will be flushed. // 2. Capture at time of Add, in case item is mutated after add. // 3. Validate rowkey on the client so we get a nice error instead of the cryptic 400 from auzre. string partitionKey = item.PartitionKey; string rowKey = item.RowKey; TableClient.ValidateAzureTableKeyValue(partitionKey); TableClient.ValidateAzureTableKeyValue(rowKey); Dictionary <string, TableOperation> partition; if (!_map.TryGetValue(partitionKey, out partition)) { if (_map.Count >= MaxPartitionWidth) { // Offline cache is too large. Clear some room await FlushAsync(cancellationToken); } partition = new Dictionary <string, TableOperation>(); _map[partitionKey] = partition; } var itemCopy = Copy(item); if (partition.ContainsKey(rowKey)) { // Replacing item forces a flush to ensure correct eTag behaviour. await FlushPartitionAsync(partition, cancellationToken); // Reinitialize partition partition = new Dictionary <string, TableOperation>(); _map[partitionKey] = partition; } _log.EntitiesWritten++; if (String.IsNullOrEmpty(itemCopy.ETag)) { partition.Add(rowKey, _table.CreateInsertOperation(itemCopy)); } else if (itemCopy.ETag.Equals("*")) { partition.Add(rowKey, _table.CreateInsertOrReplaceOperation(itemCopy)); } else { partition.Add(rowKey, _table.CreateReplaceOperation(itemCopy)); } if (partition.Count >= MaxBatchSize) { await FlushPartitionAsync(partition, cancellationToken); _map.Remove(partitionKey); } }