public async Task ProcessEventsAsync(PartitionContext context, IEnumerable<EventData> messages)
        {
            var batch = new TableBatchOperation();

            foreach(var msg in messages)
            {
                var snap = JsonConvert.DeserializeObject<BusSnapshotInfo>(Encoding.UTF8.GetString(msg.GetBytes()));

                var entity = new DynamicTableEntity(snap.RouteShortName, snap.VehicleId.ToString());

                entity.Properties.Add("RouteShortName", EntityProperty.GeneratePropertyForString(snap.RouteShortName));
                entity.Properties.Add("VehicleId", EntityProperty.GeneratePropertyForInt(snap.VehicleId));
                entity.Properties.Add("TripId", EntityProperty.GeneratePropertyForInt(snap.TripId));
                entity.Properties.Add("Latitude", EntityProperty.GeneratePropertyForDouble(snap.Latitude));
                entity.Properties.Add("Longitude", EntityProperty.GeneratePropertyForDouble(snap.Longitude));
                entity.Properties.Add("DirectionOfTravel", EntityProperty.GeneratePropertyForString(snap.DirectionOfTravel.ToString()));
                entity.Properties.Add("NextStopId", EntityProperty.GeneratePropertyForInt(snap.NextStopId));
                entity.Properties.Add("Timeliness", EntityProperty.GeneratePropertyForString(snap.Timeliness.ToString()));
                entity.Properties.Add("TimelinessOffset", EntityProperty.GeneratePropertyForInt(snap.TimelinessOffset));
                entity.Properties.Add("Timestamp", EntityProperty.GeneratePropertyForDateTimeOffset(snap.Timestamp));

                batch.Add(TableOperation.InsertOrReplace(entity));
            }

            var tableClient = _account.CreateCloudTableClient();

            var table = tableClient.GetTableReference("snapshots");

            await table.CreateIfNotExistsAsync();

            await table.ExecuteBatchAsync(batch);

            await context.CheckpointAsync();
        }
示例#2
0
        public void Insert()
        {
            TableBatchOperation batchOperation = new TableBatchOperation();
            Hashtable barcode = new Hashtable();
            StringBuilder sb = new StringBuilder();
            for (int i = id; i < id + 100 && i < Program._DATA_TABLE.Rows.Count; i++)
            {
                if (!barcode.ContainsKey(Program._DATA_TABLE.Rows[i]["Barcode"].ToString().Trim()))
                {
                    try
                    {
                        sb.Append(Program._DATA_TABLE.Rows[i]["Barcode"].ToString().Trim() + "|");
                        ShopBarcodeEntity data = new ShopBarcodeEntity(Program._DATA_TABLE.Rows[i]["Shop"].ToString().Trim(), Program._DATA_TABLE.Rows[i]["Barcode"].ToString().Trim());
                        data.OrderNo = Program._DATA_TABLE.Rows[i]["BillNumber"].ToString().Trim();
                        data.Product = Program._DATA_TABLE.Rows[i]["Product"].ToString().Trim().PadLeft(8, '0');
                        data.Cost = double.Parse(Program._DATA_TABLE.Rows[i]["SellPrice"].ToString().Trim());
                        data.SellFinished = false;
                        batchOperation.InsertOrMerge(data);
                        barcode[Program._DATA_TABLE.Rows[i]["Barcode"].ToString().Trim()] = true;
                    }
                    catch { }
                }
            }

            try
            {
                Program._RECORD++;
                Console.WriteLine("Insert Record {0}-{1}\t\tTotal {2} Records", id + 1, id + 100, Program._RECORD*100);
                Program._CLOUD_TABLE.ExecuteBatch(batchOperation);
            }
            catch (Exception e)
            {
                Program.WriteErrorLog("Record " + (id + 1) + "-" + (id + 100) + " Error \n" + sb.ToString() + "\n" + e.Message + "\n" + e.StackTrace);
            }
        }
示例#3
0
        public List<Reply> GetReplyNotif(string userid)
        {
            TableQuery<ReplyNotificationEntifity> query = new TableQuery<ReplyNotificationEntifity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, userid));

            List<Reply> replies = new List<Reply>();
            TableBatchOperation batchOperation = new TableBatchOperation();
            int count = 0;

            // Print the fields for each customer.
            foreach (ReplyNotificationEntifity entity in _replyNotification.ExecuteQuery(query))
            {
                replies.Add(JsonConvert.DeserializeObject<Reply>(entity.Content));

                batchOperation.Add(TableOperation.Delete(entity));
                count++;

                if((count % 100) == 0){
                    _replyNotification.ExecuteBatch(batchOperation);
                    batchOperation = new TableBatchOperation();
                    count = 0;
                }
            }

            if (count > 0)
            {
                _replyNotification.ExecuteBatch(batchOperation);
            }

            return replies;
        }
        static void CreateCustomerMetadata(CloudTableClient tableClient)
        {
            Console.WriteLine("Creating customers metadata...");

            CloudTable customersMetadataTable = tableClient.GetTableReference("customersmetadata");
            customersMetadataTable.CreateIfNotExists();

            var msftAddress1 = new DictionaryTableEntity();
            msftAddress1.PartitionKey = "MSFT";
            msftAddress1.RowKey = "ADDRESS-" + Guid.NewGuid().ToString("N").ToUpper();
            msftAddress1.Add("city", "Seattle");
            msftAddress1.Add("street", "111 South Jackson");

            var msftWebsite1 = new DictionaryTableEntity();
            msftWebsite1.PartitionKey = "MSFT";
            msftWebsite1.RowKey = "WEBSITE-" + Guid.NewGuid().ToString("N").ToUpper();
            msftWebsite1.Add("url", "http://www.microsoft.com");

            var msftWebsite2 = new DictionaryTableEntity();
            msftWebsite2.PartitionKey = "MSFT";
            msftWebsite2.RowKey = "WEBSITE-" + Guid.NewGuid().ToString("N").ToUpper();
            msftWebsite2.Add("url", "http://www.windowsazure.com");

            var batch = new TableBatchOperation();
            batch.Add(TableOperation.Insert(msftAddress1));
            batch.Add(TableOperation.Insert(msftWebsite1));
            batch.Add(TableOperation.Insert(msftWebsite2));
            customersMetadataTable.ExecuteBatch(batch);

            Console.WriteLine("Done. Press ENTER to read the customer metadata.");
            Console.ReadLine();
        }
        /// <summary>
        /// Emit a batch of log events, running to completion synchronously.
        /// </summary>
        /// <param name="events">The events to emit.</param>
        /// <remarks>Override either <see cref="PeriodicBatchingSink.EmitBatch"/> or <see cref="PeriodicBatchingSink.EmitBatchAsync"/>,
        /// not both.</remarks>
        protected override void EmitBatch(IEnumerable<LogEvent> events)
        {
            var operation = new TableBatchOperation();
            
            var first = true;
            
            foreach (var logEvent in events)
            {
                if (first)
                {
                    //check to make sure the partition key is not the same as the previous batch
                    if (partitionKey != logEvent.Timestamp.Ticks)
                    {
                        batchRowId = 0; //the partitionkey has been reset
                        partitionKey = logEvent.Timestamp.Ticks; //store the new partition key
                    }
                    first = false;
                }

                var logEventEntity = new LogEventEntity(logEvent, _formatProvider, partitionKey);
                logEventEntity.RowKey += "|" + batchRowId;
                operation.Add(TableOperation.Insert(logEventEntity));

                batchRowId++;
            }
            _table.ExecuteBatch(operation);
        }
示例#6
0
        private void InsertBets()
        {
            if (this.Bets.Count == 0)
            {
                return;
            }

            var tableBets = this.CreateTableClient("Bet");

            tableBets.CreateIfNotExists();

            var batchOperation = new TableBatchOperation();

            foreach (var kvp in this.Bets)
            {
                var bet = new BetEntity();
                bet.TicketNumber = kvp.Key;
                bet.BetNumber = kvp.Value;
                bet.RaffleId = this.RaffleId;

                batchOperation.Insert(bet);
            }

            tableBets.ExecuteBatch(batchOperation);
        }
        public long ClearTable()
        {
            long deletionCount = 0;
            // Construct the query operation for all customer entities where PartitionKey="Smith".
            var list = new List<string>();
            list.Add("PartitionKey");
            list.Add("RowKey");
            TableQuery<SensorValueEntity> query = new TableQuery<SensorValueEntity>().Select(list).Take(100);
            var results = table.ExecuteQuery(query);

            if (results.Count() < 1)
                return deletionCount;
            foreach(var resultGroup in results.GroupBy(a => a.PartitionKey))
            {
                TableBatchOperation batchOperation = new TableBatchOperation();
                foreach (var result in resultGroup)
                {
                    batchOperation.Delete(result);
                    deletionCount++;
                }
                table.ExecuteBatch(batchOperation);
            }

            return deletionCount;
        }
示例#8
0
        async Task<CommandResult> DoInsert(CloudTable table, long n, Func<long, EntityNk[]> entityFactory)
        {
            
            var batchOperation = new TableBatchOperation();

            foreach (var e in entityFactory(n))
            {
                batchOperation.Insert(e);
            }

            var cresult = new CommandResult { Start = DateTime.UtcNow.Ticks };
            var cbt = 0L;
            var context = GetOperationContext((t) => cbt = t);
            try
            {
                var results = await table.ExecuteBatchAsync(batchOperation, operationContext: context);
                cresult.Elapsed = cbt;
            }
            catch (Exception ex)
            {
                cresult.Elapsed = -1;
                Console.Error.WriteLine("Error DoInsert {0} {1}", n, ex.ToString());
            }
            return cresult;
        }
 public AzureTableContext(CloudTable table, IEncryptor encryptor)
 {
     _table = table;
     _encryptor = encryptor;
     _context = new TableBatchOperation();
     _deleteBackupContext = new TableBatchOperation();
 }
示例#10
0
        protected override void Append(LoggingEvent[] loggingEvents)
        {
            try
            {

                TableBatchOperation batchOperation = new TableBatchOperation();
                base.Append(loggingEvents);
                foreach (var e in loggingEvents)
                {
                    var logitem = new LogItem
                        {
                            Exception = e.GetExceptionString(),
                            Level = e.Level.Name,
                            LoggerName = e.LoggerName,
                            Message = e.RenderedMessage,
                            RoleInstance = RoleInfo
                        };
                    batchOperation.Insert(logitem);
                }
                _log4NetTable.ExecuteBatch(batchOperation);

            }
            catch (Exception ex)
            {
                Trace.TraceError("AzureTableAppender Append " + ex.Message);
            }
        }
示例#11
0
文件: Bank.cs 项目: PowerDD/DataSync
        public void Insert()
        {
            TableBatchOperation batchOperation = new TableBatchOperation();
            Hashtable bank = new Hashtable();

            for (int i = id; i < id + 100 && i < Program._DATA_TABLE.Rows.Count; i++)
            {
                if (!bank.ContainsKey(Program._DATA_TABLE.Rows[i]["ID"].ToString().Trim()))
                {
                    try
                    {
                        DynamicTableEntity data = new DynamicTableEntity();
                        data.PartitionKey = "88888888";
                        data.RowKey = Program._DATA_TABLE.Rows[i]["ID"].ToString().Trim().PadLeft(6, '0');
                        Dictionary<string, EntityProperty> properties = new Dictionary<string, EntityProperty>();

                        properties.Add("Bank", new EntityProperty(Program._DATA_TABLE.Rows[i]["Bank"].ToString().Trim()));
                        properties.Add("BranchName", new EntityProperty(Program._DATA_TABLE.Rows[i]["BranchName"].ToString().ToLower().Trim()));
                        properties.Add("AccountNumber", new EntityProperty(Program._DATA_TABLE.Rows[i]["AccountNumber"].ToString().Trim()));
                        properties.Add("AccountName", new EntityProperty(Program._DATA_TABLE.Rows[i]["AccountName"].ToString().Trim()));
                        properties.Add("AccountType", new EntityProperty(int.Parse(Program._DATA_TABLE.Rows[i]["AccountType"].ToString().Trim())));

                        //BankEntity data = new BankEntity("88888888", Program._DATA_TABLE.Rows[i]["ID"].ToString().Trim().PadLeft(6, '0'));
                        //data.Bank = Program._DATA_TABLE.Rows[i]["Bank"].ToString().Trim();
                        //data.BranchName = Program._DATA_TABLE.Rows[i]["BranchName"].ToString().ToLower().Trim();
                        //data.AccountNumber = Program._DATA_TABLE.Rows[i]["AccountNumber"].ToString().Trim();
                        //data.AccountName = Program._DATA_TABLE.Rows[i]["AccountName"].ToString().Trim();
                        //data.AccountType = int.Parse(Program._DATA_TABLE.Rows[i]["AccountType"].ToString().Trim());
                        batchOperation.InsertOrMerge(data);
                        recBank++;
                        bank[Program._DATA_TABLE.Rows[i]["ID"].ToString().Trim()] = true;
                    }
                    catch { }
                }
            }

            try
            {
                if (Program._DATA_TABLE.Rows.Count > 0)
                {
                    if (Program._UPDATE)
                    {
                        Program._RECORD++;
                        Console.WriteLine("Update Record {0}-{1}\t\tTotal {2} Records", id + 1, id + 100, Program._RECORD * 100);
                        Program._CLOUD_TABLE.ExecuteBatch(batchOperation);
                    }
                    else
                    {
                        Program._RECORD++;
                        Console.WriteLine("Insert Record {0}-{1}\t\tTotal {2} Records", id + 1, id + 100, Program._RECORD * 100);
                        Program._CLOUD_TABLE.ExecuteBatch(batchOperation);
                    }
                }

            }
            catch (Exception e)
            {
                Program.WriteErrorLog("Record " + (id + 1) + "-" + (id + 100) + " Error \n" + e.Message + "\n" + e.StackTrace);
            }
        }
        public async void Add(IList<ITableEntity> notes)
        {
            var batchOperation = new TableBatchOperation();

            notes.ToList().ForEach(n => batchOperation.Insert(n));
            await _table.ExecuteBatchAsync(batchOperation);
        }
示例#13
0
        public static bool InsertEntityToAzureTable(string tableName, List<CustomerEntity> entityList)
        {
            System.Diagnostics.Trace.TraceInformation(string.Format("Date = {0}, AzureTableOperation.InsertEntityToAzureTable2 is Called", DateTime.Now));
            // Retrieve the storage account from the connection string.
            CloudStorageAccount storageAccount = CloudStorageAccount.Parse(GetStorageConnectionString());

            // Create the table client.
            CloudTableClient tableClient = storageAccount.CreateCloudTableClient();

            // Create the CloudTable object that represents the "people" table.
            CloudTable table = tableClient.GetTableReference(tableName);

            // Create the batch operation.
            TableBatchOperation batchOperation = new TableBatchOperation();

            // Add both customer entities to the batch insert operation.
            foreach (CustomerEntity entity in entityList)
            {
                batchOperation.Insert(entity);
            }

            // Execute the insert operation.
            IList<TableResult> resultList = table.ExecuteBatch(batchOperation);
            System.Diagnostics.Trace.TraceInformation(string.Format("Date = {0}, AzureTableOperation.InsertEntityToAzureTable2 insert entity to {1}: {2}", DateTime.Now, tableName, resultList.Count));

            //TODO: how to determine whether the operation is successful
            return true;
        }
        public async Task Persist(ProjectionMetaData metadata)
        {
            Trace.TraceInformation("Preparing batch for projection {0}.", metadata.ProjectionType);

            var batch = new TableBatchOperation();

           
                var entity = new DictionaryTableEntity
                {
                    PartitionKey = metadata.ProjectionType,
                    RowKey = Handler.Current()
                };

                entity.Add("ProjectionHash", metadata.ProjectionHash);
                batch.Add(TableOperation.InsertOrReplace(entity));


                Trace.TraceInformation("Executing batch for projection {0}.", metadata.ProjectionType);
                await _table.ExecuteBatchAsync(batch).ContinueWith(r =>
                {
                    Trace.TraceInformation("Batch for projection {0} complete {1} exceptions.", metadata.ProjectionType, r.Exception != null ? "with" : "without");
                    if (r.Exception != null)
                    {
                        r.Exception.Handle(exception =>
                        {
                            Trace.TraceError(exception.ToString());
                            return true;
                        });
                    }
                });
        }
示例#15
0
      private TableBatchOperation ComposeBatch(IEnumerable<LogEvent> events)
      {
         var batch = new TableBatchOperation();

         foreach (LogEvent e in events)
         {
            var row = new ElasticTableEntity
            {
               PartitionKey = e.EventTime.ToString("yy-MM-dd"),
               RowKey = e.EventTime.ToString("HH-mm-ss-fff")
            };

            row.Add("source", e.SourceName);
            row.Add("severity", e.Severity);
            row.Add("message", e.FormattedMessage);
            row.Add("error", e.ErrorException == null ? string.Empty : e.ErrorException.ToString());

            if (e.Properties != null)
            {
               foreach (var p in e.Properties)
               {
                  if (p.Key == LogEvent.ErrorPropertyName) continue;

                  row.Add(p.Key, p.Value);
               }
            }

            batch.Insert(row);
         }

         return batch.Count > 0 ? batch : null;
      }
示例#16
0
        public void TableBatchAddNullShouldThrow()
        {
            TableBatchOperation batch = new TableBatchOperation();
            try
            {
                batch.Add(null);
                Assert.Fail();
            }
            catch (ArgumentNullException)
            {
                // no op
            }
            catch (Exception)
            {
                Assert.Fail();
            }

            try
            {
                batch.Insert(0, null);
                Assert.Fail();
            }
            catch (ArgumentNullException)
            {
                // no op
            }
            catch (Exception)
            {
                Assert.Fail();
            }
        }
示例#17
0
 public ActionResult DeleteConfirmed(string partitionKey)
 {
     // Delete all rows for this mailing list, that is,
     // Subscriber rows as well as MailingList rows.
     // Therefore, no need to specify row key.
     var query = new TableQuery<MailingList>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionKey));
     var listRows = mailingListTable.ExecuteQuery(query).ToList();
     var batchOperation = new TableBatchOperation();
     int itemsInBatch = 0;
     foreach (MailingList listRow in listRows)
     {
         batchOperation.Delete(listRow);
         itemsInBatch++;
         if (itemsInBatch == 100)
         {
             mailingListTable.ExecuteBatch(batchOperation);
             itemsInBatch = 0;
             batchOperation = new TableBatchOperation();
         }
     }
     if (itemsInBatch > 0)
     {
         mailingListTable.ExecuteBatch(batchOperation);
     }
     return RedirectToAction("Index");
 }
        void ClearTable(CloudTable table)
        {
            var deviceIds = _deviceService.GetDeviceIds();

            foreach (var partitionKey in deviceIds)
            {
                TableBatchOperation batchDelete = new TableBatchOperation();

                // gets all the entities in the table for this partition key
                string partitionCondition = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionKey);
                List<DynamicTableEntity> entities = table.ExecuteQuery(new TableQuery().Where(partitionCondition)).ToList();

                entities.ForEach(e =>
                {
                    batchDelete.Add(TableOperation.Delete(e));

                    // Azure has a limit on batch operations
                    if (batchDelete.Count == 100)
                    {
                        table.ExecuteBatch(batchDelete);
                        batchDelete = new TableBatchOperation();
                    }
                });

                // flush out whatever is left
                if (batchDelete.Count > 0)
                {
                    table.ExecuteBatch(batchDelete);
                }
            }
        }
 public void AddEntryToTable(string tableName)
 {
     CloudTable table = cloudTableClient.GetTableReference(tableName);
     table.CreateIfNotExists();
     TableBatchOperation batch = new TableBatchOperation();
     batch.Add(TableOperation.Insert(new TableEntity { RowKey = "Abhishek", PartitionKey = "Kolkata" }));
     batch.Add(TableOperation.Insert(new TableEntity { RowKey = "Abhijit", PartitionKey = "Kolkata" }));
     table.ExecuteBatch(batch);
 }
 public async Task AddVersion(OwnershipRegistration registration, OwnershipOwner owner, string version)
 {
     CloudTableClient client = _account.CreateCloudTableClient();
     CloudTable table = client.GetTableReference(OwnershipTableName);
     TableBatchOperation batch = new TableBatchOperation();
     batch.InsertOrReplace(new TypedEntity(registration.GetKey(), owner.GetKey(), OwnerType));
     batch.InsertOrReplace(new TypedEntity(registration.GetKey(), version, PackageType));
     await table.ExecuteBatchAsync(batch);
 }
示例#21
0
 void PostProcess(TableBatchOperation batchOperation)
 {
     foreach (var result in Table.ExecuteBatch(batchOperation))
     {
         if (result.Result == null)
         {
             throw new Exception("Error saving");
         }
     }
 }
示例#22
0
        public void Insert()
        {
            TableBatchOperation batchOperation = new TableBatchOperation();
            Hashtable TransferD = new Hashtable();

            for (int i = id; i < id + 100 && i < Program._DATA_TABLE.Rows.Count; i++)
            {
                string Rowkey = (Program._DATA_TABLE.Rows[i]["ID"].ToString().Trim() + "-" + Program._DATA_TABLE.Rows[i]["SellNumber"].ToString().Trim());
                if (!TransferD.ContainsKey(Rowkey))
                {
                    try
                    {
                        DynamicTableEntity data = new DynamicTableEntity();
                        data.PartitionKey = "88888888";
                        data.RowKey = Rowkey;
                        Dictionary<string, EntityProperty> properties = new Dictionary<string, EntityProperty>();

                        properties.Add("SellNumber", new EntityProperty(Program._DATA_TABLE.Rows[i]["SellNumber"].ToString().Trim().PadLeft(8, '0')));
                        properties.Add("ReceiveMoney", new EntityProperty(double.Parse(Program._DATA_TABLE.Rows[i]["ReceiveMoney"].ToString().ToLower().Trim())));

                        //BankTransferDetailEntity data = new BankTransferDetailEntity("88888888", Rowkey);
                        //data.SellNumber = Program._DATA_TABLE.Rows[i]["SellNumber"].ToString().Trim().PadLeft(8, '0');
                        //data.ReceiveMoney = double.Parse(Program._DATA_TABLE.Rows[i]["ReceiveMoney"].ToString().ToLower().Trim());
                        batchOperation.InsertOrMerge(data);
                        recTransferD++;
                        TransferD[Rowkey] = true;
                    }
                    catch { }
                }
            }

            try
            {
                if (Program._DATA_TABLE.Rows.Count > 0)
                {
                    if (Program._UPDATE)
                    {
                        Program._RECORD++;
                        Console.WriteLine("Update Record {0}-{1}\t\tTotal {2} Records", id + 1, id + 100, Program._RECORD * 100);
                        Program._CLOUD_TABLE.ExecuteBatch(batchOperation);
                    }
                    else
                    {
                        Program._RECORD++;
                        Console.WriteLine("Insert Record {0}-{1}\t\tTotal {2} Records", id + 1, id + 100, Program._RECORD * 100);
                        Program._CLOUD_TABLE.ExecuteBatch(batchOperation);
                    }
                }

            }
            catch (Exception e)
            {
                Program.WriteErrorLog("Record " + (id + 1) + "-" + (id + 100) + " Error \n" + e.Message + "\n" + e.StackTrace);
            }
        }
        public TableBatchOperationAdapter(
            Func<TableBatchOperation, Task<IList<TableResult>>> executeBatch,
            ITableEntityConverter tableEntityConverter)
        {
            Require.NotNull(executeBatch, "executeBatch");
            Require.NotNull(tableEntityConverter, "tableEntityConverter");

            m_executeBatch = executeBatch;
            m_tableEntityConverter = tableEntityConverter;
            m_batch = new TableBatchOperation();
        }
示例#24
0
        public void SaveToStorageInBatch(List<Log> list)
        {
            var batchOperation = new TableBatchOperation();
            foreach (var log in list)
            {
                var entity = GetLogTableEntity(log);
                batchOperation.Insert(entity);
            }

            var result = _table.ExecuteBatch(batchOperation);
        }
        public ActionResult About()
        {
            var storageAccount = CloudStorageAccount.Parse(
               CloudConfigurationManager.GetSetting("StorageConnectionString"));

            var client = storageAccount.CreateCloudTableClient();

            var stebraTable = client.GetTableReference("StebraList");

            if (!stebraTable.Exists())
                {
                    stebraTable.Create();
                }

            TableOperation retrieveOperation = TableOperation.Retrieve<StebraEntity>("WeAreStebra", "ListItem3");

            // Execute the operation.
            TableResult retrievedResult = stebraTable.Execute(retrieveOperation);

            StebraEntity result = (StebraEntity)retrievedResult.Result;

            if (result != null)
            {

                result.ImageUrl = "http://cdn.buzznet.com/assets/users16/crizz/default/funny-pictures-thriller-kitten-impresses--large-msg-121404159787.jpg";

                //var awesomeStebra = new StebraEntity("WeAreStebra", "ListItem2");
                //awesomeStebra.ImageUrl = "http://rubmint.com/wp-content/plugins/wp-o-matic/cache/6cb1b_funny-pictures-colur-blind-kitteh-finded-yew-a-pumikin.jpg";

                //var coolStebra = new StebraEntity("WeAreStebra", "ListItem2");
                //coolStebra.ImageUrl = "http://rubmint.com/wp-content/plugins/wp-o-matic/cache/6cb1b_funny-pictures-colur-blind-kitteh-finded-yew-a-pumikin.jpg";

                //var batchOperation = new TableBatchOperation();
                TableOperation batchOperation = TableOperation.InsertOrReplace(result);

                //batchOperation.Insert(awesomeStebra);
                //batchOperation.Insert(coolStebra);
                //stebraTable.ExecuteBatch(batchOperation);
                stebraTable.Execute(batchOperation);
            }
            else
            {
                var awesomeStebra = new StebraEntity("WeAreStebra", "ListItem3");
                awesomeStebra.ImageUrl = "http://rubmint.com/wp-content/plugins/wp-o-matic/cache/6cb1b_funny-pictures-colur-blind-kitteh-finded-yew-a-pumikin.jpg";

                var batchOperation = new TableBatchOperation();
                batchOperation.Insert(awesomeStebra);
                stebraTable.ExecuteBatch(batchOperation);
            }

            return View();
        }
示例#26
0
        public IEnumerable<Usuarios> GetCreataTableStorage()
        {
            CloudTable table = getTable();
            table.CreateIfNotExists();

            TableBatchOperation batch = new TableBatchOperation();
            batch.InsertOrMerge(new Usuarios("usuarios", "759990") { Nome = "Christiano Donke", Email = "*****@*****.**" });
            batch.InsertOrMerge(new Usuarios("usuarios", "677040") { Nome = "Alexandre Campos", Email = "*****@*****.**" });

            table.ExecuteBatch(batch);

            return this.queryAzureStorage();
        }
 public void Delete()
 {
     CloudStorageAccount storageAccount = CloudStorageAccount.Parse(connectionString);
     CloudTableClient tableClient = storageAccount.CreateCloudTableClient();
     CloudTable table = tableClient.GetTableReference(tableName);
     var entries = table.CreateQuery<EventTableEntity>().Where(e => e.PartitionKey == Id).ToArray();
     var delete = new TableBatchOperation();
     foreach (var eventTableEntity in entries)
     {
         delete.Add(TableOperation.Delete(eventTableEntity));
     }
     table.ExecuteBatch(delete);
 }
        public async Task Save(Page[] pages)
        {
            var table = await GetTable();

            var batchInsertOperation = new TableBatchOperation();

            foreach (var page in pages)
            {
                batchInsertOperation.Add(TableOperation.Insert(ToPageTableEntity(page)));
            }

            await table.ExecuteBatchAsync(batchInsertOperation);
        }
示例#29
0
        private static async Task BackupPostCollection(DocumentCollection dc, DocumentClient client)
        {
            Trace.TraceInformation("Collection '{0}' start.  Time: '{1}'", dc.Id,
                DateTime.Now.ToString(CultureInfo.CurrentCulture));
            try
            {
                var ds =
                    from d in client.CreateDocumentQuery<PostMessage>(dc.DocumentsLink)
                    where d.Type == "Post"
                    select d;

                TableBatchOperation batchOperation = new TableBatchOperation();
                List<dynamic> docList = new List<dynamic>();
                foreach (var d in ds)
                {
                    TablePost c = ModelService.TablePostData(d);
                    batchOperation.Insert(c);
                    docList.Add(d);

                    if (batchOperation.Count == 100)
                    {
                        var operation = batchOperation;
                        var res = await _retryPolicy.ExecuteAsync(
                            () => _table.ExecuteBatchAsync(operation));
                        batchOperation = new TableBatchOperation();
                        if (res.Count == operation.Count)
                        {
                            await _iDbService.DocumentDb.BatchDelete(dc, docList);
                            docList = new List<dynamic>();
                            Trace.TraceInformation("inserted");
                        }
                    }
                }
                if (batchOperation.Count > 0)
                {
                    var operation = batchOperation;
                    var res = await _retryPolicy.ExecuteAsync(
                        () => _table.ExecuteBatchAsync(operation));
                    if (res.Count == operation.Count)
                    {
                        await _iDbService.DocumentDb.BatchDelete(dc, docList);
                        Trace.TraceInformation("inserted");
                    }
                }
            }
            catch (Exception e)
            {
                Trace.TraceError("Error in BackupCollection " + e.Message);
            }
        }
示例#30
0
        static TableBatchOperation SaveBootstrap(IEnumerable<SearchModel> searchModel)
        {
            var batchOperation = new TableBatchOperation();
            foreach (var model in searchModel)
            {
                if (string.IsNullOrWhiteSpace(model.Url) || string.IsNullOrWhiteSpace(model.PartitionKey))
                {
                    throw new InvalidOperationException("We require a partition key, and row key");
                }
                batchOperation.InsertOrReplace(model);
            }

            return batchOperation;
        }
        public static void CreateTable(string connectionString, string tableName, IReadOnlyDictionary <string, object>[] data)
        {
            var table = GetTable(connectionString, tableName);

            table.CreateIfNotExists();

            TableBatchOperation batch = new TableBatchOperation();

            foreach (var entity in data)
            {
                batch.Insert(new DictionaryTableEntity(Guid.NewGuid().ToString(), entity));

                if (batch.Count >= 100)
                {
                    table.ExecuteBatch(batch);
                    batch.Clear();
                }
            }

            if (batch.Count > 0)
            {
                table.ExecuteBatch(batch);
            }
        }
示例#32
0
        /// <summary>
        /// Demonstrate inserting of a large batch of entities. Some considerations for batch operations:
        ///  1. You can perform updates, deletes, and inserts in the same single batch operation.
        ///  2. A single batch operation can include up to 100 entities.
        ///  3. All entities in a single batch operation must have the same partition key.
        ///  4. While it is possible to perform a query as a batch operation, it must be the only operation in the batch.
        ///  5. Batch size must be <= 4MB
        /// </summary>
        /// <param name="table">Sample table name</param>
        private static async Task BatchInsertOfCustomerEntitiesAsync(CloudTable table)
        {
            // Create the batch operation.
            TableBatchOperation batchOperation = new TableBatchOperation();

            // The following code  generates test data for use during the query samples.
            for (int i = 0; i < 100; i++)
            {
                batchOperation.InsertOrMerge(new CustomerEntity("Smith", string.Format("{0}", i.ToString("D4")))
                {
                    Email       = string.Format("{0}@contoso.com", i.ToString("D4")),
                    PhoneNumber = string.Format("425-555-{0}", i.ToString("D4"))
                });
            }

            // Execute the batch operation.
            IList <TableResult> results = await table.ExecuteBatchAsync(batchOperation);

            foreach (var res in results)
            {
                var customerInserted = res.Result as CustomerEntity;
                Console.WriteLine("Inserted entity with\t Etag = {0} and PartitionKey = {1}, RowKey = {2}", customerInserted.ETag, customerInserted.PartitionKey, customerInserted.RowKey);
            }
        }
        public void TableBatchOver4MBShouldThrow()
        {
            TableBatchOperation batch = new TableBatchOperation();
            string pk = Guid.NewGuid().ToString();

            for (int m = 0; m < 65; m++)
            {
                DynamicReplicatedTableEntity ent = GenerateRandomEnitity(pk);

                // Maximum Entity size is 64KB
                ent.Properties.Add("binary", EntityProperty.GeneratePropertyForByteArray(new byte[64 * 1024]));
                batch.Insert(ent);
            }

            OperationContext opContext = new OperationContext();

            try
            {
                this.repTable.ExecuteBatch(batch, null, opContext);
                Assert.Fail();
            }
            catch (StorageException)
            {
                //
                // ###XXX: NEW SOURCE CODES: The commented out codes were the original codes
                //
                //TestHelper.ValidateResponse(opContext, 1, (int)HttpStatusCode.BadRequest, new string[] { "ContentLengthExceeded" }, "The content length for the requested operation has exceeded the limit (4MB).");
                TestHelper.ValidateResponse(
                    opContext,
                    2,
                    (int)HttpStatusCode.RequestEntityTooLarge,
                    new string[] { "RequestBodyTooLarge" },
                    "The request body is too large and exceeds the maximum permissible limit"
                    );
            }
        }
        public void TableBatchWithPropertyOver255CharsShouldThrow()
        {
            TableBatchOperation batch = new TableBatchOperation();
            string pk = Guid.NewGuid().ToString();

            string propName = new string('a', 256);  // propName has 256 chars

            DynamicReplicatedTableEntity ent = new DynamicReplicatedTableEntity("foo", "bar");

            ent.Properties.Add(propName, new EntityProperty("propbar"));
            batch.Insert(ent);

            OperationContext opContext = new OperationContext();

            try
            {
                this.repTable.ExecuteBatch(batch, null, opContext);
                Assert.Fail();
            }
            catch (StorageException)
            {
                TestHelper.ValidateResponse(opContext, 2, (int)HttpStatusCode.BadRequest, new string[] { "PropertyNameTooLong" }, "The property name exceeds the maximum allowed length (255).");
            }
        }
示例#35
0
        private static TableBatchOperation GetBatchOperation(IEnumerable <CloudEventEntry> entries)
        {
            const int MaxBatchSizeInBytes = 4000000;

            var batchOperation = new TableBatchOperation();

            int approximateBatchSize = 0;

            foreach (var item in entries)
            {
                var entity = item.CreateTableEntity();

                approximateBatchSize += GetApproximateEntitySize(entity);

                if (approximateBatchSize >= MaxBatchSizeInBytes)
                {
                    break;
                }

                batchOperation.InsertOrReplace(entity);
            }

            return(batchOperation);
        }
示例#36
0
        /// <summary>
        /// Delete Entities
        /// </summary>
        /// <param name="entities">Entities</param>
        /// <returns>Table Results</returns>
        public virtual async Task <IEnumerable <TableResult> > Delete(IEnumerable <ITableEntity> entities)
        {
            if (null == entities)
            {
                throw new ArgumentNullException("entities");
            }
            if (!entities.Any())
            {
                return(null);
            }

            var result = new List <TableResult>();

            foreach (var batch in this.Batch(entities))
            {
                var batchOperation = new TableBatchOperation();
                batch.ToList().ForEach(e => batchOperation.Delete(e));
                var r = await this.reference.ExecuteBatchAsync(batchOperation).ConfigureAwait(false);

                result.AddRange(r);
            }

            return(result);
        }
示例#37
0
        public static TableBatchOperation CreateScheduleEntityForAvailabilitySet(IList <VirtualMachineCrawlerResponse> filteredVmSet,
                                                                                 int schedulerFrequency, List <string> azureFiOperationList, bool domainFlage)
        {
            var tableBatchOperation = new TableBatchOperation();
            var random = new Random();
            var randomExecutionDateTime = DateTime.UtcNow.AddMinutes(random.Next(1, schedulerFrequency));
            var sessionId = Guid.NewGuid().ToString();

            foreach (var item in filteredVmSet)
            {
                if (item == null)
                {
                    continue;
                }

                string fiOperation = string.Empty;
                var    actionType  = GetActionType(item.RowKey, azureFiOperationList, item.State, out fiOperation);

                tableBatchOperation.InsertOrMerge(RuleEngineHelper.ConvertToScheduledRuleEntityForAvailabilitySet(item,
                                                                                                                  sessionId, actionType, fiOperation, randomExecutionDateTime, domainFlage));
            }

            return(tableBatchOperation);
        }
示例#38
0
        private async Task <IList <TableResult> > ExecuteBatchSafeAsync(TableBatchOperation batch)
        {
            try
            {
                return(await this.ExecuteBatchAsync(batch).ConfigureAwait(false));
            }
            catch (StorageException ex)
            {
                if (ex.RequestInformation == null ||
                    ex.RequestInformation.HttpStatusCode != 404 || // Not Found
                    ex.RequestInformation.ExtendedErrorInformation == null ||
                    ex.RequestInformation.ExtendedErrorInformation.ErrorCode != "TableNotFound")
                {
                    throw;
                }
            }

            if (await this.EnsureTableExistsAsync().ConfigureAwait(false))
            {
                return(await this.ExecuteBatchAsync(batch).ConfigureAwait(false));
            }

            return(null);
        }
示例#39
0
        private static async void BatchDelete <T>(TraceWriter log, CloudTable table, string partitionKey, List <T> items) where T : ITableEntity
        {
            TableBatchOperation operation = new TableBatchOperation();

            foreach (var item in items)
            {
                operation.Delete(item);

                if (operation.Count >= 100)
                {
                    await table.ExecuteBatchAsync(operation);

                    operation = new TableBatchOperation();
                    log.Info($"Executing deletion batch on 100 items for {partitionKey}");
                }
            }

            // Remove any remaining items, outside of the 100s batches.
            if (operation.Count > 0)
            {
                log.Info($"Executing deletion batch on {operation.Count} items for {partitionKey}");
                await table.ExecuteBatchAsync(operation);
            }
        }
示例#40
0
        public static async Task SaveData([ActivityTrigger] DurableActivityContext context)
        {
            // retrieves a list of books from the Orchestrator function
            var books = context.GetInput <List <Book> >();

            // create a table storage client
            var client = account.CreateCloudTableClient();
            var table  = client.GetTableReference("Books");

            await table.CreateIfNotExistsAsync();

            TableBatchOperation tableBatchOperations = new TableBatchOperation();

            for (int i = 0; i < books.Count; i++)
            {
                tableBatchOperations.Add(TableOperation.InsertOrMerge(
                                             new BookRepository(books[i].Id)
                {
                    Name = books[i].Name
                }));
            }

            await table.ExecuteBatchAsync(tableBatchOperations);
        }
示例#41
0
        protected async void Button4_Click(object sender, EventArgs e)
        {
            CloudStorageAccount storageAccount = CloudStorageAccount.DevelopmentStorageAccount;
            CloudTableClient    tableClient    = storageAccount.CreateCloudTableClient();
            CloudTable          table          = tableClient.GetTableReference("Urunler");
            TableBatchOperation batchOperation = new TableBatchOperation();

            for (int i = 0; i < 100; i++)
            {
                batchOperation.InsertOrMerge(new Urun()
                {
                    PartitionKey = "Musteri" + i.ToString(),
                    RowKey       = (new Random().Next(1, 100)).ToString(),
                    Adi          = "Deneme",
                    Aciklama     = "Açıklama"
                });
            }
            IList <TableResult> results = await table.ExecuteBatchAsync(batchOperation);

            foreach (var res in results)
            {
                var eklenenUrun = res.Result as Urun;
            }
        }
        public async Task WriteDataBuffer(Table table, IEnumerable <object[]> buffer, string targetTableName, CancellationToken cancellationToken)
        {
            var connection = GetCloudTableClient();
            var cloudTable = connection.GetTableReference(targetTableName);

            // Create the batch operation.
            var batchOperation = new TableBatchOperation();

            var partitionKey = table.GetDeltaColumnOrdinal(TableColumn.EDeltaType.AzurePartitionKey);
            var rowKey       = table.GetDeltaColumnOrdinal(TableColumn.EDeltaType.AzureRowKey);
            var surrogateKey = table.GetDeltaColumnOrdinal(TableColumn.EDeltaType.SurrogateKey);

            foreach (var row in buffer)
            {
                var properties = new Dictionary <string, EntityProperty>();
                for (var i = 0; i < table.Columns.Count; i++)
                {
                    if (table.Columns[i].DeltaType != TableColumn.EDeltaType.AzureRowKey && table.Columns[i].DeltaType != TableColumn.EDeltaType.AzurePartitionKey && table.Columns[i].DeltaType != TableColumn.EDeltaType.TimeStamp)
                    {
                        var value = row[i];
                        if (value == DBNull.Value)
                        {
                            value = null;
                        }
                        properties.Add(table.Columns[i].Name, NewEntityProperty(table.Columns[i].DataType, value));
                    }
                }

                var partionKeyValue = partitionKey >= 0 ? row[partitionKey] : "default";
                var rowKeyValue     = rowKey >= 0 ? row[rowKey] : surrogateKey >= 0 ? ((long)row[surrogateKey]).ToString("D20") : Guid.NewGuid().ToString();
                var entity          = new DynamicTableEntity(partionKeyValue.ToString(), rowKeyValue.ToString(), "*", properties);

                batchOperation.Insert(entity);
            }
            await cloudTable.ExecuteBatchAsync(batchOperation, null, null, cancellationToken);
        }
        public async Task <bool> AddSubscriptions(IEnumerable <UserSubscription> subscriptionsToAdd)
        {
            CloudTableClient tableClient            = storageAccount.CreateCloudTableClient();
            CloudTable       userSubscriptionsTable = tableClient.GetTableReference("userSubscriptions");

            var tableExists = await userSubscriptionsTable.ExistsAsync();

            if (!tableExists)
            {
                await userSubscriptionsTable.CreateIfNotExistsAsync();
            }

            TableBatchOperation batchOperation = new TableBatchOperation();

            foreach (var subscription in subscriptionsToAdd)
            {
                UserSubscriptionEntity userSubscriptionEntity =
                    new UserSubscriptionEntity(subscription.UserId, subscription.FriendId);
                batchOperation.InsertOrReplace(userSubscriptionEntity);
            }
            await userSubscriptionsTable.ExecuteBatchAsync(batchOperation);

            return(true);
        }
示例#44
0
        /// <summary>
        /// Make azure call to write to Table
        /// </summary>
        /// <param name="count">no of calls to be made</param>
        public static void MakeAzureCallToWriteTableWithSdk(int count)
        {
            // Retrieve storage account from connection string.
            CloudStorageAccount storageAccount =
                CloudStorageAccount.Parse(CloudConfigurationManager.GetSetting("StorageConnectionString"));

            // Create the table client.
            CloudTableClient tableClient = storageAccount.CreateCloudTableClient();

            // Create the table if it doesn't exist.
            CloudTable table = tableClient.GetTableReference("people");

            table.CreateIfNotExists();

            // Create the batch operation.
            TableBatchOperation batchOperation = new TableBatchOperation();

            // Create a customer entity and add it to the table.
            CustomerEntity customer1 = new CustomerEntity("Smith", "Jeff" + DateTime.UtcNow.Ticks);

            customer1.Email       = "*****@*****.**";
            customer1.PhoneNumber = "425-555-0104";

            // Create another customer entity and add it to the table.
            CustomerEntity customer2 = new CustomerEntity("Smith", "Ben" + DateTime.UtcNow.Ticks);

            customer2.Email       = "*****@*****.**";
            customer2.PhoneNumber = "425-555-0102";

            // Add both customer entities to the batch insert operation.
            batchOperation.Insert(customer1);
            batchOperation.Insert(customer2);

            // Execute the batch operation.
            table.ExecuteBatch(batchOperation);
        }
示例#45
0
        private async Task <List <TableResult> > ExecOrThrowAsync(CloudTable table, TableBatchOperation op)
        {
            try
            {
                return((await table.ExecuteBatchAsync(op)).ToList());
            }
            catch (AzSE ex)
            {
                switch (ex.RequestInformation.HttpStatusCode)
                {
                case (int)HttpStatusCode.Conflict:
                    throw new MeSE(ErrorCode.DuplicateKey, ex);

                case (int)HttpStatusCode.PreconditionFailed:
                    throw new MeSE(ErrorCode.PreconditionFailed, ex);

                case (int)HttpStatusCode.BadRequest:
                    throw new MeSE(ErrorCode.BadRequest, ex);

                default:
                    throw new MeSE(ex.Message, ex);
                }
            }
        }
示例#46
0
        private static void doBatchOperation(CloudTable table)
        {
            // Create the batch operation.
            TableBatchOperation batchOperation = new TableBatchOperation();

            // Create a customer entity and add it to the table.
            CustomerEntity customer1 = new CustomerEntity("Smith", "Jeff");

            customer1.Email       = "*****@*****.**";
            customer1.PhoneNumber = "425-555-0104";

            // Create another customer entity and add it to the table.
            CustomerEntity customer2 = new CustomerEntity("Smith", "Ben");

            customer2.Email       = "*****@*****.**";
            customer2.PhoneNumber = "425-555-0102";

            // Add both customer entities to the batch insert operation.
            batchOperation.Insert(customer1);
            batchOperation.Insert(customer2);

            // Execute the batch operation.
            table.ExecuteBatch(batchOperation);
        }
        async Task <string> UploadHistoryBatch(
            string instanceId,
            string executionId,
            TableBatchOperation historyEventBatch,
            StringBuilder historyEventNamesBuffer,
            int numberOfTotalEvents,
            string eTagValue)
        {
            // Adding / updating sentinel entity
            DynamicTableEntity sentinelEntity = new DynamicTableEntity(instanceId, SentinelRowKey)
            {
                Properties =
                {
                    ["ExecutionId"] = new EntityProperty(executionId),
                }
            };

            if (!string.IsNullOrEmpty(eTagValue))
            {
                sentinelEntity.ETag = eTagValue;
                historyEventBatch.Replace(sentinelEntity);
            }
            else
            {
                historyEventBatch.InsertOrReplace(sentinelEntity);
            }

            Stopwatch           stopwatch = Stopwatch.StartNew();
            IList <TableResult> tableResultList;

            try
            {
                tableResultList = await this.historyTable.ExecuteBatchAsync(
                    historyEventBatch,
                    this.StorageTableRequestOptions,
                    null);
            }
            catch (StorageException ex)
            {
                if (ex.RequestInformation.HttpStatusCode == (int)HttpStatusCode.PreconditionFailed)
                {
                    AnalyticsEventSource.Log.SplitBrainDetected(
                        this.storageAccountName,
                        this.taskHubName,
                        instanceId,
                        executionId,
                        historyEventBatch.Count,
                        numberOfTotalEvents,
                        historyEventNamesBuffer.ToString(0, historyEventNamesBuffer.Length - 1), // remove trailing comma
                        stopwatch.ElapsedMilliseconds,
                        eTagValue);
                }

                throw;
            }

            this.stats.StorageRequests.Increment();
            this.stats.TableEntitiesWritten.Increment(historyEventBatch.Count);

            if (tableResultList != null)
            {
                for (int i = tableResultList.Count - 1; i >= 0; i--)
                {
                    if (((DynamicTableEntity)tableResultList[i].Result).RowKey == SentinelRowKey)
                    {
                        eTagValue = tableResultList[i].Etag;
                        break;
                    }
                }
            }

            AnalyticsEventSource.Log.AppendedInstanceHistory(
                this.storageAccountName,
                this.taskHubName,
                instanceId,
                executionId,
                historyEventBatch.Count,
                numberOfTotalEvents,
                historyEventNamesBuffer.ToString(0, historyEventNamesBuffer.Length - 1), // remove trailing comma
                stopwatch.ElapsedMilliseconds,
                this.GetETagValue(instanceId));

            return(eTagValue);
        }
示例#48
0
        private static void GenerateCustomerRecords(TextWriter log)
        {
            if (!ConfirmOperation("YOUR STORAGE TABLE WILL BE RECREATED! ALL EXISTING DATA WILL BE LOST! Are you sure?", log))
            {
                return;
            }

            var cloudTableClient = CloudStorageAccount.CreateCloudTableClient();
            var cloudTable       = cloudTableClient.GetTableReference(DemoSettings.Storage.CustomerTableName);

            if (cloudTable.DeleteIfExists())
            {
                log.WriteLine("DELETING PREEXISTING TABLE '{0}'", cloudTable.Name);
            }
            while (true)
            {
                try
                {
                    cloudTable.CreateIfNotExists();
                    break;
                }
                catch
                {
                    Task.Delay(TimeSpan.FromSeconds(5));
                }
                log.WriteLine("RETRY TABLE CREATION");
            }

            int       count        = 1;
            const int totalRecords = 1000000;

            string[] firstNames = File.ReadAllLines("CSV_Database_of_First_Names.csv");
            string[] lastNames  = File.ReadAllLines("CSV_Database_of_Last_Names.csv");
            var      rand       = new Random(DateTime.UtcNow.Millisecond);
            var      batches    = new Dictionary <string, TableBatchOperation>();

            var tasks = new List <Task>();

            while (count <= totalRecords)
            {
                count++;
                var company = "Company " + rand.Next(1, 11);
                lock (BatchLock)
                {
                    if (!batches.ContainsKey(company))
                    {
                        batches.Add(company, new TableBatchOperation());
                    }
                }

                TableBatchOperation tableBatchOperation;

                lock (BatchLock)
                {
                    var customer = new Customer(company, count.ToString(CultureInfo.InvariantCulture))
                    {
                        Value        = (rand.NextDouble() - 0.5) * 99999.0,
                        ContractDate = DateTime.Now,
                        Name         = firstNames[rand.Next(0, firstNames.Length)] + " " + lastNames[rand.Next(0, lastNames.Length)]
                    };

                    tableBatchOperation = batches[company];

                    tableBatchOperation.Insert(customer);

                    if (tableBatchOperation.Count < Batchsize)
                    {
                        continue;
                    }

                    batches[company] = new TableBatchOperation();
                }

                tasks.Add(new Task(() => cloudTable.ExecuteBatch(tableBatchOperation)));
            }

            var remainingTasks = batches
                                 .Where(keyValuePair => keyValuePair.Value.Count > 0)
                                 .Select(keyValuePair => new Task(() => cloudTable.ExecuteBatch(keyValuePair.Value)));

            tasks.AddRange(remainingTasks);

            // No tasks are running before this line. The TaskRunner will throttle to a specific # tasks
            const int tasksInParallel = 10;
            var       taskRunner      = new TaskRunner(tasks, tasksInParallel);

            taskRunner.TaskCompleted += (o, e) => log.WriteLine("Tasks running: {0}. Average time: {1}. Tasks completed {2} . Total Time {3}.",
                                                                e.TasksInParallel,
                                                                CalculateAverage(e.TaskTimeTotal, e.TasksCompleted).ToString("g"),
                                                                e.TasksCompleted,
                                                                e.TaskTimeTotal.ToString("g"));
            // Run all tasks
            taskRunner.WaitAll();
        }
        /// <inheritdoc />
        public override async Task UpdateStateAsync(OrchestrationRuntimeState runtimeState, string instanceId, string executionId)
        {
            IList <HistoryEvent> newEvents = runtimeState.NewEvents;
            IList <HistoryEvent> allEvents = runtimeState.Events;

            var newEventListBuffer = new StringBuilder(4000);
            var historyEventBatch  = new TableBatchOperation();

            EventType?orchestratorEventType = null;

            string eTagValue = this.GetETagValue(instanceId);

            DynamicTableEntity orchestrationInstanceUpdate = new DynamicTableEntity(instanceId, "")
            {
                Properties =
                {
                    ["CustomStatus"]    = new EntityProperty(runtimeState.Status),
                    ["ExecutionId"]     = new EntityProperty(executionId),
                    ["LastUpdatedTime"] = new EntityProperty(newEvents.Last().Timestamp),
                }
            };

            for (int i = 0; i < newEvents.Count; i++)
            {
                HistoryEvent       historyEvent = newEvents[i];
                DynamicTableEntity entity       = this.tableEntityConverter.ConvertToTableEntity(historyEvent);

                await this.CompressLargeMessageAsync(entity);

                newEventListBuffer.Append(historyEvent.EventType.ToString()).Append(',');

                // The row key is the sequence number, which represents the chronological ordinal of the event.
                long sequenceNumber = i + (allEvents.Count - newEvents.Count);
                entity.RowKey       = sequenceNumber.ToString("X16");
                entity.PartitionKey = instanceId;
                entity.Properties["ExecutionId"] = new EntityProperty(executionId);

                // Replacement can happen if the orchestration episode gets replayed due to a commit failure in one of the steps below.
                historyEventBatch.InsertOrReplace(entity);

                // Table storage only supports inserts of up to 100 entities at a time.
                if (historyEventBatch.Count == 99)
                {
                    eTagValue = await this.UploadHistoryBatch(instanceId, executionId, historyEventBatch, newEventListBuffer, newEvents.Count, eTagValue);

                    // Reset local state for the next batch
                    newEventListBuffer.Clear();
                    historyEventBatch.Clear();
                }

                // Monitor for orchestration instance events
                switch (historyEvent.EventType)
                {
                case EventType.ExecutionStarted:
                    orchestratorEventType = historyEvent.EventType;
                    ExecutionStartedEvent executionStartedEvent = (ExecutionStartedEvent)historyEvent;
                    orchestrationInstanceUpdate.Properties["Name"]          = new EntityProperty(executionStartedEvent.Name);
                    orchestrationInstanceUpdate.Properties["Version"]       = new EntityProperty(executionStartedEvent.Version);
                    orchestrationInstanceUpdate.Properties["CreatedTime"]   = new EntityProperty(executionStartedEvent.Timestamp);
                    orchestrationInstanceUpdate.Properties["RuntimeStatus"] = new EntityProperty(OrchestrationStatus.Running.ToString());
                    this.SetTablePropertyForMessage(entity, orchestrationInstanceUpdate, InputProperty, InputProperty, executionStartedEvent.Input);
                    break;

                case EventType.ExecutionCompleted:
                    orchestratorEventType = historyEvent.EventType;
                    ExecutionCompletedEvent executionCompleted = (ExecutionCompletedEvent)historyEvent;
                    this.SetTablePropertyForMessage(entity, orchestrationInstanceUpdate, ResultProperty, OutputProperty, executionCompleted.Result);
                    orchestrationInstanceUpdate.Properties["RuntimeStatus"] = new EntityProperty(executionCompleted.OrchestrationStatus.ToString());
                    break;

                case EventType.ExecutionTerminated:
                    orchestratorEventType = historyEvent.EventType;
                    ExecutionTerminatedEvent executionTerminatedEvent = (ExecutionTerminatedEvent)historyEvent;
                    this.SetTablePropertyForMessage(entity, orchestrationInstanceUpdate, InputProperty, OutputProperty, executionTerminatedEvent.Input);
                    orchestrationInstanceUpdate.Properties["RuntimeStatus"] = new EntityProperty(OrchestrationStatus.Terminated.ToString());
                    break;

                case EventType.ContinueAsNew:
                    orchestratorEventType = historyEvent.EventType;
                    ExecutionCompletedEvent executionCompletedEvent = (ExecutionCompletedEvent)historyEvent;
                    this.SetTablePropertyForMessage(entity, orchestrationInstanceUpdate, ResultProperty, OutputProperty, executionCompletedEvent.Result);
                    orchestrationInstanceUpdate.Properties["RuntimeStatus"] = new EntityProperty(OrchestrationStatus.ContinuedAsNew.ToString());
                    break;
                }
            }

            // First persistence step is to commit history to the history table. Messages must come after.
            if (historyEventBatch.Count > 0)
            {
                eTagValue = await this.UploadHistoryBatch(instanceId, executionId, historyEventBatch, newEventListBuffer, newEvents.Count, eTagValue);
            }

            if (orchestratorEventType == EventType.ExecutionCompleted ||
                orchestratorEventType == EventType.ExecutionFailed ||
                orchestratorEventType == EventType.ExecutionTerminated)
            {
                this.eTagValues.TryRemove(instanceId, out _);
            }
            else
            {
                this.eTagValues[instanceId] = eTagValue;
            }

            Stopwatch orchestrationInstanceUpdateStopwatch = Stopwatch.StartNew();

            await this.instancesTable.ExecuteAsync(TableOperation.InsertOrMerge(orchestrationInstanceUpdate));

            this.stats.StorageRequests.Increment();
            this.stats.TableEntitiesWritten.Increment();

            AnalyticsEventSource.Log.InstanceStatusUpdate(
                this.storageAccountName,
                this.taskHubName,
                instanceId,
                executionId,
                orchestratorEventType?.ToString() ?? string.Empty,
                orchestrationInstanceUpdateStopwatch.ElapsedMilliseconds);
        }
        /// <summary>
        /// Inserts a set of new data entries into the table.
        /// Fails if the data does already exists.
        /// </summary>
        /// <param name="collection">Data entries to be inserted into the table.</param>
        /// <returns>Completion promise for this storage operation.</returns>
        public async Task BulkInsertTableEntries(IReadOnlyCollection <T> collection)
        {
            const string operation = "BulkInsertTableEntries";

            if (collection == null)
            {
                throw new ArgumentNullException("collection");
            }
            if (collection.Count > AzureTableDefaultPolicies.MAX_BULK_UPDATE_ROWS)
            {
                throw new ArgumentOutOfRangeException("collection", collection.Count,
                                                      "Too many rows for bulk update - max " + AzureTableDefaultPolicies.MAX_BULK_UPDATE_ROWS);
            }

            if (collection.Count == 0)
            {
                return;
            }

            var startTime = DateTime.UtcNow;

            if (Logger.IsVerbose2)
            {
                Logger.Verbose2("Bulk inserting {0} entries to {1} table", collection.Count, TableName);
            }

            try
            {
                // WAS:
                // svc.AttachTo(TableName, entry);
                // svc.UpdateObject(entry);
                // SaveChangesOptions.None | SaveChangesOptions.Batch,
                // SaveChangesOptions.None == Insert-or-merge operation, SaveChangesOptions.Batch == Batch transaction
                // http://msdn.microsoft.com/en-us/library/hh452241.aspx

                var entityBatch = new TableBatchOperation();
                foreach (T entry in collection)
                {
                    entityBatch.Insert(entry);
                }

                try
                {
                    // http://msdn.microsoft.com/en-us/library/hh452241.aspx
                    await Task <IList <TableResult> > .Factory.FromAsync(
                        tableReference.BeginExecuteBatch,
                        tableReference.EndExecuteBatch,
                        entityBatch,
                        null);
                }
                catch (Exception exc)
                {
                    Logger.Warn(ErrorCode.AzureTable_37,
                                $"Intermediate error bulk inserting {collection.Count} entries in the table {TableName}", exc);
                }
            }
            finally
            {
                CheckAlertSlowAccess(startTime, operation);
            }
        }
示例#51
0
        internal static Tuple <HttpWebRequest, Stream> BuildRequestForTableBatchOperation(Uri uri, UriQueryBuilder builder, IBufferManager bufferManager, int?timeout, string tableName, TableBatchOperation batch, bool useVersionHeader, OperationContext ctx, TableRequestOptions options, string accountName)
        {
            HttpWebRequest     msg           = BuildRequestCore(NavigationHelper.AppendPathToSingleUri(uri, "$batch"), builder, "POST", timeout, useVersionHeader, ctx);
            TablePayloadFormat payloadFormat = options.PayloadFormat.Value;

            Logger.LogInformational(ctx, SR.PayloadFormat, payloadFormat);

            // create the writer, indent for readability of the examples.
            ODataMessageWriterSettings writerSettings = new ODataMessageWriterSettings()
            {
                CheckCharacters = false,                              // sets this flag on the XmlWriter for ATOM
                Version         = TableConstants.ODataProtocolVersion // set the Odata version to use when writing the entry
            };

            HttpWebRequestAdapterMessage adapterMsg = new HttpWebRequestAdapterMessage(msg, bufferManager);

            // Start Batch
            ODataMessageWriter odataWriter = new ODataMessageWriter(adapterMsg, writerSettings);
            ODataBatchWriter   batchWriter = odataWriter.CreateODataBatchWriter();

            batchWriter.WriteStartBatch();

            bool isQuery = batch.Count == 1 && batch[0].OperationType == TableOperationType.Retrieve;

            // Query operations should not be inside changeset in payload
            if (!isQuery)
            {
                // Start Operation
                batchWriter.WriteStartChangeset();
                batchWriter.Flush();
            }

            foreach (TableOperation operation in batch)
            {
                string httpMethod = operation.HttpMethod;
                if (operation.OperationType == TableOperationType.Merge || operation.OperationType == TableOperationType.InsertOrMerge)
                {
                    options.AssertNoEncryptionPolicyOrStrictMode();
                    httpMethod = "MERGE";
                }

                ODataBatchOperationRequestMessage mimePartMsg = batchWriter.CreateOperationRequestMessage(httpMethod, operation.GenerateRequestURI(uri, tableName));
                SetAcceptAndContentTypeForODataBatchMessage(mimePartMsg, payloadFormat);

                // etag
                if (operation.OperationType == TableOperationType.Delete ||
                    operation.OperationType == TableOperationType.Replace ||
                    operation.OperationType == TableOperationType.Merge)
                {
                    mimePartMsg.SetHeader("If-Match", operation.Entity.ETag);
                }

                // Prefer header
                if (operation.OperationType == TableOperationType.Insert)
                {
                    mimePartMsg.SetHeader("Prefer", operation.EchoContent ? "return-content" : "return-no-content");
                }

                if (operation.OperationType != TableOperationType.Delete && operation.OperationType != TableOperationType.Retrieve)
                {
                    using (ODataMessageWriter batchEntryWriter = new ODataMessageWriter(mimePartMsg, writerSettings, new TableStorageModel(accountName)))
                    {
                        // Write entity
                        ODataWriter entryWriter = batchEntryWriter.CreateODataEntryWriter();
                        WriteOdataEntity(operation.Entity, operation.OperationType, ctx, entryWriter, options);
                    }
                }
            }

            if (!isQuery)
            {
                // End Operation
                batchWriter.WriteEndChangeset();
            }

            // End Batch
            batchWriter.WriteEndBatch();
            batchWriter.Flush();

            return(new Tuple <HttpWebRequest, Stream>(adapterMsg.GetPopulatedMessage(), adapterMsg.GetStream()));
        }
        internal static IList <TableResult> TableBatchOperationPostProcess(IList <TableResult> result, TableBatchOperation batch, RESTCommand <IList <TableResult> > cmd, HttpWebResponse resp, OperationContext ctx, TableRequestOptions options, string accountName)
        {
            ODataMessageReaderSettings readerSettings = new ODataMessageReaderSettings();

            readerSettings.MessageQuotas = new ODataMessageQuotas()
            {
                MaxPartsPerBatch = TableConstants.TableServiceMaxResults, MaxReceivedMessageSize = TableConstants.TableServiceMaxPayload
            };

            using (ODataMessageReader responseReader = new ODataMessageReader(new HttpResponseAdapterMessage(resp, cmd.ResponseStream), readerSettings))
            {
                // create a reader
                ODataBatchReader reader = responseReader.CreateODataBatchReader();

                // Initial => changesetstart
                if (reader.State == ODataBatchReaderState.Initial)
                {
                    reader.Read();
                }

                if (reader.State == ODataBatchReaderState.ChangesetStart)
                {
                    // ChangeSetStart => Operation
                    reader.Read();
                }

                int  index          = 0;
                bool failError      = false;
                bool failUnexpected = false;

                while (reader.State == ODataBatchReaderState.Operation)
                {
                    TableOperation currentOperation = batch[index];
                    TableResult    currentResult    = new TableResult()
                    {
                        Result = currentOperation.Entity
                    };
                    result.Add(currentResult);

                    ODataBatchOperationResponseMessage mimePartResponseMessage = reader.CreateOperationResponseMessage();
                    string contentType = mimePartResponseMessage.GetHeader(Constants.ContentTypeElement);

                    currentResult.HttpStatusCode = mimePartResponseMessage.StatusCode;

                    // Validate Status Code.
                    if (currentOperation.OperationType == TableOperationType.Insert)
                    {
                        failError = mimePartResponseMessage.StatusCode == (int)HttpStatusCode.Conflict;
                        if (currentOperation.EchoContent)
                        {
                            failUnexpected = mimePartResponseMessage.StatusCode != (int)HttpStatusCode.Created;
                        }
                        else
                        {
                            failUnexpected = mimePartResponseMessage.StatusCode != (int)HttpStatusCode.NoContent;
                        }
                    }
                    else if (currentOperation.OperationType == TableOperationType.Retrieve)
                    {
                        if (mimePartResponseMessage.StatusCode == (int)HttpStatusCode.NotFound)
                        {
                            index++;

                            // Operation => next
                            reader.Read();
                            continue;
                        }

                        failUnexpected = mimePartResponseMessage.StatusCode != (int)HttpStatusCode.OK;
                    }
                    else
                    {
                        failError      = mimePartResponseMessage.StatusCode == (int)HttpStatusCode.NotFound;
                        failUnexpected = mimePartResponseMessage.StatusCode != (int)HttpStatusCode.NoContent;
                    }

                    if (failError)
                    {
                        // If the parse error is null, then don't get the extended error information and the StorageException will contain SR.ExtendedErrorUnavailable message.
                        if (cmd.ParseError != null)
                        {
                            cmd.CurrentResult.ExtendedErrorInformation = cmd.ParseError(mimePartResponseMessage.GetStream(), resp, contentType);
                        }

                        cmd.CurrentResult.HttpStatusCode = mimePartResponseMessage.StatusCode;
                        if (!string.IsNullOrEmpty(cmd.CurrentResult.ExtendedErrorInformation.ErrorMessage))
                        {
                            string msg = cmd.CurrentResult.ExtendedErrorInformation.ErrorMessage;
                            cmd.CurrentResult.HttpStatusMessage = msg.Substring(0, msg.IndexOf("\n", StringComparison.Ordinal));
                        }
                        else
                        {
                            cmd.CurrentResult.HttpStatusMessage = mimePartResponseMessage.StatusCode.ToString(CultureInfo.InvariantCulture);
                        }

                        throw new StorageException(
                                  cmd.CurrentResult,
                                  cmd.CurrentResult.ExtendedErrorInformation != null ? cmd.CurrentResult.ExtendedErrorInformation.ErrorMessage : SR.ExtendedErrorUnavailable,
                                  null)
                              {
                                  IsRetryable = false
                              };
                    }

                    if (failUnexpected)
                    {
                        // If the parse error is null, then don't get the extended error information and the StorageException will contain SR.ExtendedErrorUnavailable message.
                        if (cmd.ParseError != null)
                        {
                            cmd.CurrentResult.ExtendedErrorInformation = cmd.ParseError(mimePartResponseMessage.GetStream(), resp, contentType);
                        }

                        cmd.CurrentResult.HttpStatusCode = mimePartResponseMessage.StatusCode;
                        if (!string.IsNullOrEmpty(cmd.CurrentResult.ExtendedErrorInformation.ErrorMessage))
                        {
                            string msg = cmd.CurrentResult.ExtendedErrorInformation.ErrorMessage;
                            cmd.CurrentResult.HttpStatusMessage = msg.Substring(0, msg.IndexOf("\n", StringComparison.Ordinal));
                        }
                        else
                        {
                            cmd.CurrentResult.HttpStatusMessage = mimePartResponseMessage.StatusCode.ToString(CultureInfo.InvariantCulture);
                        }

                        string indexString = Convert.ToString(index, CultureInfo.InvariantCulture);

                        // Attempt to extract index of failing entity from extended error info
                        if (cmd.CurrentResult.ExtendedErrorInformation != null &&
                            !string.IsNullOrEmpty(cmd.CurrentResult.ExtendedErrorInformation.ErrorMessage))
                        {
                            string tempIndex = TableRequest.ExtractEntityIndexFromExtendedErrorInformation(cmd.CurrentResult);
                            if (!string.IsNullOrEmpty(tempIndex))
                            {
                                indexString = tempIndex;
                            }
                        }

                        throw new StorageException(cmd.CurrentResult, string.Format(CultureInfo.CurrentCulture, SR.BatchErrorInOperation, indexString), null)
                              {
                                  IsRetryable = true
                              };
                    }

                    // Update etag
                    if (!string.IsNullOrEmpty(mimePartResponseMessage.GetHeader(Constants.HeaderConstants.EtagHeader)))
                    {
                        currentResult.Etag = mimePartResponseMessage.GetHeader(Constants.HeaderConstants.EtagHeader);

                        if (currentOperation.Entity != null)
                        {
                            currentOperation.Entity.ETag = currentResult.Etag;
                        }
                    }

                    // Parse Entity if needed
                    if (currentOperation.OperationType == TableOperationType.Retrieve || (currentOperation.OperationType == TableOperationType.Insert && currentOperation.EchoContent))
                    {
                        if (mimePartResponseMessage.GetHeader(Constants.ContentTypeElement).Contains(Constants.JsonNoMetadataAcceptHeaderValue))
                        {
                            ReadEntityUsingJsonParser(currentResult, currentOperation, mimePartResponseMessage.GetStream(), ctx, options);
                        }
                        else
                        {
                            ReadOdataEntity(currentResult, currentOperation, mimePartResponseMessage, ctx, readerSettings, accountName, options);
                        }
                    }
                    else if (currentOperation.OperationType == TableOperationType.Insert)
                    {
                        currentOperation.Entity.Timestamp = ParseETagForTimestamp(currentResult.Etag);
                    }

                    index++;

                    // Operation =>
                    reader.Read();
                }
            }

            return(result);
        }
示例#53
0
 public void Execute(TableBatchOperation batchOperation, TableEntity entity)
 {
     batchOperation.InsertOrReplace(entity);
 }
示例#54
0
 public override Task <IList <TableResult> > ExecuteBatchAsync(TableBatchOperation batch, TableRequestOptions requestOptions = null, OperationContext operationContext = null)
 {
     //Trace.TraceInformation("{0} calling {1}.ExecuteBatchAsync({2})", callerMachineId, debugName, BetterComparer.ToString(batch));
     return(tableCallEventProxy.ExecuteBatchAsync(batch, requestOptions, operationContext));
 }
示例#55
0
        async void Initialize()
        {
            // TODO: Figure out how to verify the transition from
            // "old table only" to "in migration" to "new table only".
            // (I don't think this is the biggest risk, but I'm still
            // interested in verifying it.)
            // I assume once we do that, the possibility of insertions while
            // we're in "old table only" will model the ability to start
            // from a nonempty table.

            configService = new InMemoryConfigurationService <MTableConfiguration>(
                MasterMigratingTable.INITIAL_CONFIGURATION);
            oldTable       = new InMemoryTable();
            newTable       = new InMemoryTable();
            referenceTable = new InMemoryTableWithHistory();

#if false
            // Second partition from the example in:
            // https://microsoft.sharepoint.com/teams/toolsforeng/Shared%20Documents/ContentRepository/LiveMigration/Migration_slides.pptx
            MTableEntity eMeta = new MTableEntity {
                PartitionKey   = MigrationModel.SINGLE_PARTITION_KEY,
                RowKey         = MigratingTable.ROW_KEY_PARTITION_META,
                partitionState = MTablePartitionState.SWITCHED,
            };
            MTableEntity e0       = TestUtils.CreateTestMTableEntity("0", "orange");
            MTableEntity e1old    = TestUtils.CreateTestMTableEntity("1", "red");
            MTableEntity e2new    = TestUtils.CreateTestMTableEntity("2", "green");
            MTableEntity e3old    = TestUtils.CreateTestMTableEntity("3", "blue");
            MTableEntity e3new    = TestUtils.CreateTestMTableEntity("3", "azure");
            MTableEntity e4old    = TestUtils.CreateTestMTableEntity("4", "yellow");
            MTableEntity e4new    = TestUtils.CreateTestMTableEntity("4", null, true);
            var          oldBatch = new TableBatchOperation();
            oldBatch.InsertOrReplace(eMeta);
            oldBatch.InsertOrReplace(e0);
            oldBatch.InsertOrReplace(e1old);
            oldBatch.InsertOrReplace(e3old);
            oldBatch.InsertOrReplace(e4old);
            IList <TableResult> oldTableResult = await oldTable.ExecuteBatchAsync(oldBatch);
            await ExecuteExportedMirrorBatchAsync(oldBatch, oldTableResult);

            var newBatch = new TableBatchOperation();
            newBatch.InsertOrReplace(e0);
            newBatch.InsertOrReplace(e2new);
            newBatch.InsertOrReplace(e3new);
            newBatch.InsertOrReplace(e4new);
            IList <TableResult> newTableResult = await newTable.ExecuteBatchAsync(newBatch);

            // Allow rows to overwrite rather than composing the virtual ETags manually.
            // InsertOrReplace doesn't use the ETag, so we don't care that the ETag was mutated by the original batch.
            await ExecuteExportedMirrorBatchAsync(newBatch, newTableResult);
#endif

            // Start with the old table now.
            var batch = new TableBatchOperation();
            batch.InsertOrReplace(TestUtils.CreateTestEntity("0", "orange"));
            batch.InsertOrReplace(TestUtils.CreateTestEntity("1", "red"));
            batch.InsertOrReplace(TestUtils.CreateTestEntity("3", "blue"));
            batch.InsertOrReplace(TestUtils.CreateTestEntity("4", "yellow"));
            IList <TableResult> oldTableResult = await oldTable.ExecuteBatchAsync(batch);

            // InsertOrReplace doesn't use the ETag, so we don't care that the ETag was mutated by the original batch.
            await referenceTable.ExecuteMirrorBatchAsync(batch, oldTableResult);

            //CreateMonitor(typeof(RunningServiceMachinesMonitor));

            for (int i = 0; i < MigrationModel.NUM_SERVICE_MACHINES; i++)
            {
                InitializeAppMachine(CreateMachine(typeof(ServiceMachine)));
            }

            InitializeAppMachine(CreateMachine(typeof(MigratorMachine)));

            Send(Id, new TablesMachineInitializedEvent());
        }
示例#56
0
 public override Task <IList <TableResult> > ExecuteBatchAsync(TableBatchOperation batch,
                                                               TableRequestOptions requestOptions = null, OperationContext operationContext = null)
 {
     return(ExecuteMirrorBatchAsync(batch, null, requestOptions, operationContext));
 }
示例#57
0
 public abstract Task <IList <TableResult> > ExecuteMirrorBatchAsync(
     TableBatchOperation originalBatch, IList <TableResult> originalResponse,
     TableRequestOptions requestOptions = null, OperationContext operationContext = null);
示例#58
0
        async Task DoRandomAtomicCalls()
        {
            for (int callNum = 0; callNum < MigrationModel.NUM_CALLS_PER_MACHINE; callNum++)
            {
                TableCall       originalCall;
                MirrorTableCall referenceCall;
                SortedDictionary <PrimaryKey, DynamicTableEntity> dump = await peekProxy.DumpReferenceTableAsync();

                if (PSharpRuntime.Nondeterministic())
                {
                    // Query
                    // XXX: Test the filtering?
                    var query = new TableQuery <DynamicTableEntity>();
                    query.FilterString = TableQuery.GenerateFilterCondition(
                        TableConstants.PartitionKey, QueryComparisons.Equal, MigrationModel.SINGLE_PARTITION_KEY);
                    // async/await pair needed to upcast the return value to object.
                    originalCall = async table => await table.ExecuteQueryAtomicAsync(query);

                    referenceCall = async referenceTable => await referenceTable.ExecuteQueryAtomicAsync(query);

                    Console.WriteLine("{0} starting query", machineId);
                }
                else
                {
                    // Batch write
                    int batchSize     = PSharpRuntime.Nondeterministic() ? 2 : 1;
                    var batch         = new TableBatchOperation();
                    var rowKeyChoices = new List <string> {
                        "0", "1", "2", "3", "4", "5"
                    };

                    for (int opNum = 0; opNum < batchSize; opNum++)
                    {
                        int    opTypeNum = PSharpNondeterminism.Choice(7);
                        int    rowKeyI   = PSharpNondeterminism.Choice(rowKeyChoices.Count);
                        string rowKey    = rowKeyChoices[rowKeyI];
                        rowKeyChoices.RemoveAt(rowKeyI);  // Avoid duplicate in same batch
                        var    primaryKey = new PrimaryKey(MigrationModel.SINGLE_PARTITION_KEY, rowKey);
                        string eTag       = null;
                        if (opTypeNum >= 1 && opTypeNum <= 3)
                        {
                            DynamicTableEntity existingEntity;
                            int etagTypeNum = PSharpNondeterminism.Choice(
                                dump.TryGetValue(primaryKey, out existingEntity) ? 3 : 2);
                            switch (etagTypeNum)
                            {
                            case 0: eTag = ChainTable2Constants.ETAG_ANY; break;

                            case 1: eTag = "wrong"; break;

                            case 2: eTag = existingEntity.ETag; break;
                            }
                        }
                        DynamicTableEntity entity = new DynamicTableEntity
                        {
                            PartitionKey = MigrationModel.SINGLE_PARTITION_KEY,
                            RowKey       = rowKey,
                            ETag         = eTag,
                            Properties   = new Dictionary <string, EntityProperty> {
                                // Give us something to see on merge.  Might help with tracing too!
                                { string.Format("{0}_c{1}_o{2}", machineId.ToString(), callNum, opNum),
                                  new EntityProperty(true) }
                            }
                        };
                        switch (opTypeNum)
                        {
                        case 0: batch.Insert(entity); break;

                        case 1: batch.Replace(entity); break;

                        case 2: batch.Merge(entity); break;

                        case 3: batch.Delete(entity); break;

                        case 4: batch.InsertOrReplace(entity); break;

                        case 5: batch.InsertOrMerge(entity); break;

                        case 6:
                            entity.ETag = ChainTable2Constants.ETAG_DELETE_IF_EXISTS;
                            batch.Delete(entity); break;
                        }
                    }

                    TableBatchOperation batchCopy = ChainTableUtils.CopyBatch <DynamicTableEntity>(batch);
                    originalCall = async table => await table.ExecuteBatchAsync(batch);

                    referenceCall = async referenceTable => await referenceTable.ExecuteMirrorBatchAsync(batchCopy, successfulBatchResult);

                    Console.WriteLine("{0} starting batch {1}", machineId, batch);
                }

                await RunCallAsync(originalCall, referenceCall);

                Console.WriteLine("{0} table call verified");
            }
        }
示例#59
0
 public IList <TableResult> ExecuteBatch(TableBatchOperation batch, TableRequestOptions requestOptions = null,
                                         OperationContext operationContext = null)
 {
     return(batch.Select(operation => Execute(operation)).ToList());
 }
示例#60
0
 public ICancellableAsyncResult BeginExecuteBatch(TableBatchOperation batch, AsyncCallback callback, object state)
 {
     return(BeginExecuteBatch(batch, null, null, callback, state));
 }