private static void PopulateTableData(CloudTable cloudTable) { // if the table does not exist then create it and populate it wih some data if (!cloudTable.Exists()) { cloudTable.CreateIfNotExists(); var tableBatchOperation = new TableBatchOperation(); for (int i = 0; i < 100; i++) { tableBatchOperation.Add( TableOperation.Insert(new Person(i.ToString(), string.Format("Person {0}", i)))); } cloudTable.ExecuteBatch(tableBatchOperation); } }
public void TableQueryWithRetrySync() { CloudTableClient tableClient = GenerateCloudTableClient(); TableBatchOperation batch = new TableBatchOperation(); for (int m = 0; m < 1500; m++) { // Insert Entity DynamicTableEntity insertEntity = new DynamicTableEntity("insert test", m.ToString()); batch.Insert(insertEntity); if ((m + 1) % 100 == 0) { currentTable.ExecuteBatch(batch); batch = new TableBatchOperation(); } } TableQuery query = new TableQuery().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "insert test")); TestHelper.ExecuteMethodWithRetry( 4, // 2 segments, 2 failures new[] { //Insert upstream network delay to prevent upload to server @ 1000ms / kb PerformanceBehaviors.InsertDownstreamNetworkDelay(10000, XStoreSelectors.TableTraffic().IfHostNameContains(tableClient.Credentials.AccountName).Alternating(true), new BehaviorOptions(4)), // After 100 ms return throttle message DelayedActionBehaviors.ExecuteAfter(Actions.ThrottleTableRequest, 100, XStoreSelectors.TableTraffic().IfHostNameContains(tableClient.Credentials.AccountName).Alternating(true), new BehaviorOptions(4)) }, (options, opContext) => currentTable.ExecuteQuery(query, (TableRequestOptions)options, opContext).ToList()); }
public void TableIngressEgressBatch() { CloudTableClient tableClient = GenerateCloudTableClient(); TableBatchOperation batch = new TableBatchOperation(); for (int m = 0; m < 100; m++) { // Insert Entity DynamicTableEntity insertEntity = new DynamicTableEntity("insert test", m.ToString()); insertEntity.Properties.Add("prop" + m.ToString(), new EntityProperty(new byte[30 * 1024])); batch.InsertOrMerge(insertEntity); } // APM TestHelper.ValidateIngressEgress(Selectors.IfUrlContains("$batch"), () => { OperationContext opContext = new OperationContext(); currentTable.EndExecuteBatch(currentTable.BeginExecuteBatch(batch, new TableRequestOptions() { RetryPolicy = new RetryPolicies.NoRetry() }, opContext, null, null)); return(opContext.LastResult); }); // SYNC TestHelper.ValidateIngressEgress(Selectors.IfUrlContains("$batch"), () => { OperationContext opContext = new OperationContext(); currentTable.ExecuteBatch(batch, new TableRequestOptions() { RetryPolicy = new RetryPolicies.NoRetry() }, opContext); return(opContext.LastResult); }); }
public static void MyClassInitialize(TestContext testContext) { tableClient = GenerateCloudTableClient(); currentTable = tableClient.GetTableReference(GenerateRandomTableName()); currentTable.CreateIfNotExists(); for (int i = 0; i < 15; i++) { TableBatchOperation batch = new TableBatchOperation(); for (int j = 0; j < 100; j++) { BaseEntity ent = GenerateRandomEntity("tables_batch_" + i.ToString()); ent.RowKey = string.Format("{0:0000}", j); batch.Insert(ent); } currentTable.ExecuteBatch(batch); } }
public static void MyClassInitialize(TestContext testContext) { CloudTableClient tableClient = GenerateCloudTableClient(); currentTable = tableClient.GetTableReference(GenerateRandomTableName()); currentTable.CreateIfNotExists(); for (int i = 0; i < 15; i++) { TableBatchOperation batch = new TableBatchOperation(); for (int j = 0; j < 100; j++) { var ent = GenerateRandomEnitity("tables_batch_" + i.ToString()); ent.RowKey = string.Format("{0:0000}", j); batch.Insert(ent); } currentTable.ExecuteBatch(batch); } }
public void TableTestTableQueryCancellation() { CloudTableClient tableClient = GenerateCloudTableClient(); TableBatchOperation batch = new TableBatchOperation(); for (int m = 0; m < 100; m++) { // Insert Entity DynamicTableEntity insertEntity = new DynamicTableEntity("insert test", m.ToString()); insertEntity.Properties.Add("prop" + m.ToString(), new EntityProperty(new byte[30 * 1024])); batch.Insert(insertEntity); } currentTable.ExecuteBatch(batch); TableQuery query = new TableQuery().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "insert test")); TestHelper.ExecuteAPMMethodWithCancellation(4000, new[] { DelayBehaviors.DelayAllRequestsIf(4000 * 3, XStoreSelectors.TableTraffic().IfHostNameContains(tableClient.Credentials.AccountName)) }, (options, opContext, callback, state) => currentTable.BeginExecuteQuerySegmented(query, null, (TableRequestOptions)options, opContext, callback, state), (res) => currentTable.EndExecuteQuerySegmented(res)); }
public void DeleteEntities(CloudTable table, string partition = null) { if (!table.Exists()) { return; } TableQuery query = new TableQuery(); if (partition != null) { query.FilterString = string.Format("PartitionKey eq '{0}'", partition); } var entities = table.ExecuteQuery(query); if (entities.Any()) { var batch = new TableBatchOperation(); foreach (var entity in entities) { batch.Delete(entity); } table.ExecuteBatch(batch); } }
public static void MyClassInitialize(TestContext testContext) { CloudTableClient tableClient = GenerateCloudTableClient(); currentTable = tableClient.GetTableReference(GenerateRandomTableName()); currentTable.CreateIfNotExists(); // Bulk Query Entities for (int i = 0; i < 15; i++) { TableBatchOperation batch = new TableBatchOperation(); for (int j = 0; j < 100; j++) { var ent = GenerateRandomEnitity("tables_batch_" + i.ToString()); ent.RowKey = string.Format("{0:0000}", j); batch.Insert(ent); } currentTable.ExecuteBatch(batch); } complexEntityTable = tableClient.GetTableReference(GenerateRandomTableName()); complexEntityTable.Create(); // Setup TableBatchOperation complexBatch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); for (int m = 0; m < 100; m++) { ComplexEntity complexEntity = new ComplexEntity(pk, string.Format("{0:0000}", m)); complexEntity.String = string.Format("{0:0000}", m); complexEntity.Binary = new byte[] { 0x01, 0x02, (byte)m }; complexEntity.BinaryPrimitive = new byte[] { 0x01, 0x02, (byte)m }; complexEntity.Bool = m % 2 == 0 ? true : false; complexEntity.BoolPrimitive = m % 2 == 0 ? true : false; complexEntity.Double = m + ((double)m / 100); complexEntity.DoublePrimitive = m + ((double)m / 100); complexEntity.Int32 = m; complexEntity.Int32N = m; complexEntity.IntegerPrimitive = m; complexEntity.IntegerPrimitiveN = m; complexEntity.Int64 = (long)int.MaxValue + m; complexEntity.LongPrimitive = (long)int.MaxValue + m; complexEntity.LongPrimitiveN = (long)int.MaxValue + m; complexEntity.Guid = Guid.NewGuid(); complexBatch.Insert(complexEntity); if (m == 50) { middleRef = complexEntity; } // Add delay to make times unique Thread.Sleep(100); } complexEntityTable.ExecuteBatch(complexBatch); }
private void DoEscapeTest(string data, bool useBatch, bool includeKey) { DynamicTableEntity ent = new DynamicTableEntity(includeKey ? "temp" + data : "temp", Guid.NewGuid().ToString()); ent.Properties.Add("foo", new EntityProperty(data)); // Insert if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Insert(ent); currentTable.ExecuteBatch(batch); } else { currentTable.Execute(TableOperation.Insert(ent)); } // Retrieve TableResult res = null; if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (currentTable.ExecuteBatch(batch))[0]; } else { res = currentTable.Execute(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } // Check equality DynamicTableEntity retrievedEntity = res.Result as DynamicTableEntity; Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Query using data filter TableQuery query = new TableQuery(); query.Where(string.Format( "(PartitionKey eq \'{0}\') and (RowKey eq \'{1}\') and (foo eq \'{2}\')", ent.PartitionKey, ent.RowKey, data.Replace("\'", "\'\'"))); retrievedEntity = currentTable.ExecuteQuery(query).Single(); Assert.IsNotNull(retrievedEntity); Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Merge ent.Properties.Add("foo2", new EntityProperty("bar2")); if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Merge(ent); currentTable.ExecuteBatch(batch); } else { currentTable.Execute(TableOperation.Merge(ent)); } // Retrieve if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (currentTable.ExecuteBatch(batch))[0]; } else { res = currentTable.Execute(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } retrievedEntity = res.Result as DynamicTableEntity; Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); // Replace ent.Properties.Remove("foo2"); ent.Properties.Add("foo3", new EntityProperty("bar3")); if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Replace(ent); currentTable.ExecuteBatch(batch); } else { currentTable.Execute(TableOperation.Replace(ent)); } // Retrieve if (useBatch) { TableBatchOperation batch = new TableBatchOperation(); batch.Retrieve(ent.PartitionKey, ent.RowKey); res = (currentTable.ExecuteBatch(batch))[0]; } else { res = currentTable.Execute(TableOperation.Retrieve(ent.PartitionKey, ent.RowKey)); } retrievedEntity = res.Result as DynamicTableEntity; Assert.AreEqual(ent.PartitionKey, retrievedEntity.PartitionKey); Assert.AreEqual(ent.RowKey, retrievedEntity.RowKey); Assert.AreEqual(ent.ETag, retrievedEntity.ETag); Assert.AreEqual(ent.Properties.Count, retrievedEntity.Properties.Count); Assert.AreEqual(ent.Properties["foo"], retrievedEntity.Properties["foo"]); }
private static void DeleteStorageTableRows(CloudTable table, Dictionary<string, TableBatchOperation> batches) { foreach (var batch in batches.Values) table.ExecuteBatch(batch); }
private void CalculateMADataToAzure(CloudTable table, string azureTableStockCode, int MA) { DateTime startingDate = DateTime.FromFileTimeUtc(0); TableOperation retrieveStockEntityStatus = TableOperation.Retrieve<StockEntityStatus>("status-" + azureTableStockCode, "status"); var stockEntityStatus = (StockEntityStatus)table.Execute(retrieveStockEntityStatus).Result; if (stockEntityStatus != null) { startingDate = stockEntityStatus.GetLatestMAStartDate(MA); Console.WriteLine("Latest starting date for MA{0} is on {1}", MA, startingDate.ToString("yyyy-MM-dd")); } string pkFilter = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, azureTableStockCode); string rkLowerFilter = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, startingDate.ToString("yyyy-MM-dd")); string combinedFilter = TableQuery.CombineFilters(pkFilter, TableOperators.And, rkLowerFilter); TableQuery<StockEntity> query = new TableQuery<StockEntity>().Where(combinedFilter); var sortedStockEntities = table.ExecuteQuery<StockEntity>(query).OrderBy(entity => entity.Date).ToList(); if (sortedStockEntities.LongCount() >= MA) { long totalCountToUpload = sortedStockEntities.LongCount(); long currentCountUploaded = 0; Queue<double> maData = new Queue<double>(); TableBatchOperation tableBatchOperation = new TableBatchOperation(); foreach (var stockEntity in sortedStockEntities) { maData.Enqueue(stockEntity.Close); if (maData.Count == MA) { double sum = 0; foreach (var data in maData) { sum += data; } stockEntity.SetMA(MA, sum / MA); tableBatchOperation.Add(TableOperation.InsertOrMerge(stockEntity)); maData.Dequeue(); } if (tableBatchOperation.Count == 100) { table.ExecuteBatch(tableBatchOperation); currentCountUploaded += 100; Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload); tableBatchOperation.Clear(); } } if (tableBatchOperation.Count > 0) { table.ExecuteBatch(tableBatchOperation); currentCountUploaded += tableBatchOperation.Count; Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload); } sortedStockEntities.Reverse(); if (sortedStockEntities == null) { stockEntityStatus = new StockEntityStatus(azureTableStockCode); } stockEntityStatus.SetLatestMAStartDate(MA, sortedStockEntities[MA - 2].Date); table.Execute(TableOperation.InsertOrMerge(stockEntityStatus)); } }
// Azure Table内のデータを削除 // POST api/datadelete public string Post([FromBody]DataDelete query) { // このAPIは、管理アプリケーション内の「削除」をクリックした時しか使われない table = common.AzureAccess(); // Azure Tableへアクセス // RDBの中から、ターゲットモジュールなどを検索 var loginuser = RDB.db.Users.Where(p => p.idName.Equals(User.Identity.Name)).Single(); var module = loginuser.Modules.Where(p => p.Name.Equals(query.modulename)).Single(); int id = module.id; CloudBlobContainer container = common.BlobAccess(); // Azure Blobへアクセス // 要求された日付範囲内のデータを取得(Take部) TableQuery<DataEntity> query1 = new TableQuery<DataEntity>().Where( TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Take," + id), TableOperators.And, TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, common.GetTimeIndex(query.datestart)), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThanOrEqual, common.GetTimeIndex(query.dateend)) ) )); List<TableBatchOperation> deleteOperationList = new List<TableBatchOperation>(); TableBatchOperation deleteOperation = new TableBatchOperation(); try { module.Type = "1"; //削除中は"1" RDB.db.SaveChanges(); } catch { } int CountNum = 0; // 100件ずつまとめて削除(Take部)(Blobデータなら、1件ずつ削除) foreach (var entity in table.ExecuteQuery(query1)) { deleteOperation.Delete(entity); if (deleteOperation.Count == 100) { deleteOperationList.Add(deleteOperation); deleteOperation = new TableBatchOperation(); } if (!(entity.DataVal == null)) { if (entity.DataVal.Equals("BlobData")) { CloudBlockBlob blockBlob = container.GetBlockBlobReference(id.ToString() + "," + entity.RowKey); blockBlob.Delete(); } } CountNum++; } if (deleteOperation.Count > 0) { deleteOperationList.Add(deleteOperation); deleteOperation = new TableBatchOperation(); } Parallel.ForEach(deleteOperationList, Operation => { table.ExecuteBatch(Operation); }); // 削除後のデータ件数を取得してRDBのNumDataを変更 TableQuery<DataEntity> Countquery = new TableQuery<DataEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Take," + module.id)); try { module.NumData = table.ExecuteQuery(Countquery).Count(); RDB.db.SaveChanges(); } catch { } // 要求された日付範囲内のデータを取得(Value部) TableQuery<DataEntity> query2 = new TableQuery<DataEntity>().Where( TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Value," + id), TableOperators.And, TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, common.GetTimeIndex(query.datestart)), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThanOrEqual, common.GetTimeIndex(query.dateend)+1) ) )); deleteOperationList.Clear(); // 100件ずつまとめて削除(Value部) foreach (var entity in table.ExecuteQuery(query2)) { deleteOperation.Delete(entity); if (deleteOperation.Count == 100) { deleteOperationList.Add(deleteOperation); deleteOperation = new TableBatchOperation(); } } if (deleteOperation.Count > 0) { deleteOperationList.Add(deleteOperation); deleteOperation = new TableBatchOperation(); } Parallel.ForEach(deleteOperationList, Operation => { table.ExecuteBatch(Operation); }); try { module.Type = "0"; RDB.db.SaveChanges(); } catch { } return "Success!!"; }
private static void DeleteAllEntitiesInBatches(CloudTable table, Expression<Func<DynamicTableEntity, bool>> filters) { Action<IEnumerable<DynamicTableEntity>> processor = entities => { var batches = new Dictionary<string, TableBatchOperation>(); foreach (var entity in entities) { TableBatchOperation batch = null; if (batches.TryGetValue(entity.PartitionKey, out batch) == false) { batches[entity.PartitionKey] = batch = new TableBatchOperation(); } batch.Add(TableOperation.Delete(entity)); if (batch.Count == 100) { table.ExecuteBatch(batch); batches[entity.PartitionKey] = new TableBatchOperation(); } } foreach (var batch in batches.Values) { if (batch.Count > 0) { table.ExecuteBatch(batch); } } }; ProcessEntities(table, processor, filters); }
/// <summary> /// Insert a batch of ElevationDataSetTableEntity into the storage table /// </summary> /// <param name="table"></param> /// <param name="entities"></param> private static void InsertBatchOfElevationDataSetTableEntity(CloudTable table, List<ElevationDataSetTableEntity> entities) { TableBatchOperation batchOperation = new TableBatchOperation(); foreach (ElevationDataSetTableEntity tableEntity in entities) batchOperation.InsertOrReplace(tableEntity); try { table.ExecuteBatch(batchOperation); } catch(Exception ex) { Console.WriteLine(ex.Message); throw; } }
private ScheduledTask[] __fetchScheduledItems(CloudTable scheduleTable, string channel, int retryCount) { /* ------------------------------------ * Make Range Query to fetch tasks with scheduled time elapsed. * The reason we retrieve 50 items is that each row will be followed by two table operations each, which makes a total of 100 operations (Maximum handled by a BatchTableOperation) *------------------------------------- */ const int count = 50; TableQuery<DynamicTableEntity> rangeQuery = new TableQuery<DynamicTableEntity>().Where( TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, channel), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThan, DateTime.Now.ToUniversalTime().ToString("yyyyMMddHHmmssffff")))) .Take(count); var results = scheduleTable.ExecuteQuery(rangeQuery).Take(count).ToArray(); if (results.Length == 0) return new ScheduledTask[0]; /* ------------------------------------ * For every ScheduleEntry retrieved, they must be deleted right away for concurency reason. * They are also 'postponed' at the specifyed time span in order to be fired again in case the task never completes. *------------------------------------- */ TableBatchOperation tb = new TableBatchOperation(); List<ScheduledTask> _items = new List<ScheduledTask>(); foreach (var x in results) { int delimiter = x.RowKey.IndexOf('-'); string[] rowSpl = new string[] { x.RowKey.Substring(0, delimiter), x.RowKey.Substring(delimiter + 1) }; string tempPostponed = DateTime.Now.ToUniversalTime().AddSeconds(POSTPONE_DELAY_FOR_UNCOMMITED_SECONDS).ToString("yyyyMMddHHmmssffff") + "-" + rowSpl[1]; DynamicTableEntity ghost = new DynamicTableEntity(x.PartitionKey, tempPostponed); ghost.Properties = x.Properties; int tryCount = 0; if (ghost.Properties.ContainsKey("RetryCount")) { tryCount = ghost.Properties["RetryCount"].Int32Value.Value; } ghost.Properties["RetryCount"] = new EntityProperty(tryCount + 1); //delete an postpone tb.Add(TableOperation.Delete(x)); tb.Add(TableOperation.Insert(ghost)); _items.Add(new ScheduledTask { ScheduledTime = DateTime.ParseExact(rowSpl[0], "yyyyMMddHHmmssffff", System.Globalization.CultureInfo.InvariantCulture), Channel = channel, FailedTimes = tryCount, Data = x["Data"].StringValue, Id = rowSpl[1] }); } /* ---------------------------------------------------------------------------------------------------------------------------------------- * Now that the batch operation containing deletes and 'postpones' is built, we execute it. * * This BatchOperation is the 'trick' that handles concurency, as the Azure Table Storage is centralized somewhere as one authority, * if two batches are made at the same time on the same rows, one of them will fail, hence it should'nt be possible to dequeue twice *------------------------------------------------------------------------------------------------------------------------------------------ */ TableResult[] tableResults = null; try { tableResults = scheduleTable.ExecuteBatch(tb).Where(x => x.HttpStatusCode == 201).ToArray(); //select only 201 response to get only inserted items results for (int i = 0; i < _items.Count; i++) { _items[i].temporaryTask = (DynamicTableEntity)tableResults[i].Result; } } catch (Exception ex) { /* ---------------------------------------------------------------------------------------------------------------------------------------- * If an exception occurs while executing, it's most likely because another Fetch operation where made at the same time (which should have succeed), * so we try to execute the FetchScheduledItems operation again to get the next items. * * If the exception keeps occuring after several retry times, it's most likely a problem with the Azure Storage Account. *------------------------------------------------------------------------------------------------------------------------------------------ */ if (retryCount >= 5) throw ex; return __fetchScheduledItems(scheduleTable, channel, ++retryCount); } return _items.ToArray(); }
public void TableRegionalQueryOnSupportedTypes() { CultureInfo currentCulture = Thread.CurrentThread.CurrentCulture; Thread.CurrentThread.CurrentCulture = new CultureInfo("tr-TR"); CloudTableClient client = GenerateCloudTableClient(); CloudTable table = client.GetTableReference(GenerateRandomTableName()); try { table.Create(); // Setup TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); DynamicTableEntity middleRef = null; for (int m = 0; m < 100; m++) { ComplexEntity complexEntity = new ComplexEntity(); complexEntity.String = string.Format("{0:0000}", m); complexEntity.Binary = new byte[] { 0x01, 0x02, (byte)m }; complexEntity.BinaryPrimitive = new byte[] { 0x01, 0x02, (byte)m }; complexEntity.Bool = m % 2 == 0 ? true : false; complexEntity.BoolPrimitive = m % 2 == 0 ? true : false; complexEntity.Double = m + ((double)m / 100); complexEntity.DoublePrimitive = m + ((double)m / 100); complexEntity.Int32 = m; complexEntity.IntegerPrimitive = m; complexEntity.Int64 = (long)int.MaxValue + m; complexEntity.LongPrimitive = (long)int.MaxValue + m; complexEntity.Guid = Guid.NewGuid(); DynamicTableEntity dynEnt = new DynamicTableEntity(pk, string.Format("{0:0000}", m)); dynEnt.Properties = complexEntity.WriteEntity(null); batch.Insert(dynEnt); if (m == 50) { middleRef = dynEnt; } // Add delay to make times unique Thread.Sleep(100); } table.ExecuteBatch(batch); // 1. Filter on String ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterCondition("String", QueryComparisons.GreaterThanOrEqual, "0050"), 50); // 2. Filter on Guid ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForGuid("Guid", QueryComparisons.Equal, middleRef["Guid"].GuidValue.Value), 1); // 3. Filter on Long ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForLong("Int64", QueryComparisons.GreaterThanOrEqual, middleRef["LongPrimitive"].Int64Value.Value), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForLong("LongPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef["LongPrimitive"].Int64Value.Value), 50); // 4. Filter on Double ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForDouble("Double", QueryComparisons.GreaterThanOrEqual, middleRef["Double"].DoubleValue.Value), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForDouble("DoublePrimitive", QueryComparisons.GreaterThanOrEqual, middleRef["DoublePrimitive"].DoubleValue.Value), 50); // 5. Filter on Integer ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForInt("Int32", QueryComparisons.GreaterThanOrEqual, middleRef["Int32"].Int32Value.Value), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForInt("IntegerPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef["IntegerPrimitive"].Int32Value.Value), 50); // 6. Filter on Date ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForDate("DateTimeOffset", QueryComparisons.GreaterThanOrEqual, middleRef["DateTimeOffset"].DateTimeOffsetValue.Value), 50); // 7. Filter on Boolean ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBool("Bool", QueryComparisons.Equal, middleRef["Bool"].BooleanValue.Value), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBool("BoolPrimitive", QueryComparisons.Equal, middleRef["BoolPrimitive"].BooleanValue.Value), 50); // 8. Filter on Binary ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("Binary", QueryComparisons.Equal, middleRef["Binary"].BinaryValue), 1); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("BinaryPrimitive", QueryComparisons.Equal, middleRef["BinaryPrimitive"].BinaryValue), 1); // 9. Filter on Binary GTE ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("Binary", QueryComparisons.GreaterThanOrEqual, middleRef["Binary"].BinaryValue), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("BinaryPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef["BinaryPrimitive"].BinaryValue), 50); // 10. Complex Filter on Binary GTE ExecuteQueryAndAssertResults(table, TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, middleRef.PartitionKey), TableOperators.And, TableQuery.GenerateFilterConditionForBinary("Binary", QueryComparisons.GreaterThanOrEqual, middleRef["Binary"].BinaryValue)), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("BinaryPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef["BinaryPrimitive"].BinaryValue), 50); } finally { Thread.CurrentThread.CurrentCulture = currentCulture; table.DeleteIfExists(); } }
public void TableGenericQueryOnSupportedTypes() { CloudTableClient client = GenerateCloudTableClient(); CloudTable table = client.GetTableReference(GenerateRandomTableName()); table.Create(); try { // Setup TableBatchOperation batch = new TableBatchOperation(); string pk = Guid.NewGuid().ToString(); ComplexEntity middleRef = null; for (int m = 0; m < 100; m++) { ComplexEntity complexEntity = new ComplexEntity(pk, string.Format("{0:0000}", m)); complexEntity.String = string.Format("{0:0000}", m); complexEntity.Binary = new byte[] { 0x01, 0x02, (byte)m }; complexEntity.BinaryPrimitive = new byte[] { 0x01, 0x02, (byte)m }; complexEntity.Bool = m % 2 == 0 ? true : false; complexEntity.BoolPrimitive = m % 2 == 0 ? true : false; complexEntity.Double = m + ((double)m / 100); complexEntity.DoublePrimitive = m + ((double)m / 100); complexEntity.Int32 = m; complexEntity.IntegerPrimitive = m; complexEntity.Int64 = m; complexEntity.LongPrimitive = m; complexEntity.Guid = Guid.NewGuid(); batch.Insert(complexEntity); if (m == 50) { middleRef = complexEntity; } // Add delay to make times unique Thread.Sleep(100); } table.ExecuteBatch(batch); // 1. Filter on String ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterCondition("String", QueryComparisons.GreaterThanOrEqual, "0050"), 50); // 2. Filter on Guid ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForGuid("Guid", QueryComparisons.Equal, middleRef.Guid), 1); // 3. Filter on Long ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForLong("Int64", QueryComparisons.GreaterThanOrEqual, middleRef.LongPrimitive), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForLong("LongPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef.LongPrimitive), 50); // 4. Filter on Double ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForDouble("Double", QueryComparisons.GreaterThanOrEqual, middleRef.Double), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForDouble("DoublePrimitive", QueryComparisons.GreaterThanOrEqual, middleRef.DoublePrimitive), 50); // 5. Filter on Integer ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForInt("Int32", QueryComparisons.GreaterThanOrEqual, middleRef.Int32), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForInt("IntegerPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef.IntegerPrimitive), 50); // 6. Filter on Date ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForDate("DateTimeOffset", QueryComparisons.GreaterThanOrEqual, middleRef.DateTimeOffset), 50); // 7. Filter on Boolean ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBool("Bool", QueryComparisons.Equal, middleRef.Bool), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBool("BoolPrimitive", QueryComparisons.Equal, middleRef.BoolPrimitive), 50); // 8. Filter on Binary ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("Binary", QueryComparisons.Equal, middleRef.Binary), 1); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("BinaryPrimitive", QueryComparisons.Equal, middleRef.BinaryPrimitive), 1); // 9. Filter on Binary GTE ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("Binary", QueryComparisons.GreaterThanOrEqual, middleRef.Binary), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("BinaryPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef.BinaryPrimitive), 50); // 10. Complex Filter on Binary GTE ExecuteQueryAndAssertResults(table, TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, middleRef.PartitionKey), TableOperators.And, TableQuery.GenerateFilterConditionForBinary("Binary", QueryComparisons.GreaterThanOrEqual, middleRef.Binary)), 50); ExecuteQueryAndAssertResults(table, TableQuery.GenerateFilterConditionForBinary("BinaryPrimitive", QueryComparisons.GreaterThanOrEqual, middleRef.BinaryPrimitive), 50); } finally { table.DeleteIfExists(); } }
// (テスト用)POST api/dataadd public string Post([FromBody]string value) { JavaScriptSerializer serializer = new JavaScriptSerializer(); List<DataAdd> dataaddList = serializer.Deserialize<List<DataAdd>>(value); String ConnectionString = "UseDevelopmentStorage=true;DevelopmentStorageProxyUri=http://127.0.0.1:10002"; CloudStorageAccount storageAccount = CloudStorageAccount.Parse(ConnectionString); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); String TableName = "Sample16"; table = tableClient.GetTableReference(TableName); TableBatchOperation batchOperationTake = new TableBatchOperation(); TableBatchOperation batchOperationValue = new TableBatchOperation(); string date = String.Empty; Marimo marimo = new Marimo(); //string[] codelist = { "i_i,0", "i_j,0", "while,count,6", "while,count,4", "send,i_j", "send,i_i", "i_i,i_i,+,1", "endw,3", "i_i,0", "i_j,i_j,+,1", "endw,2" }; //string[] codelist = { "i_data2,get,nowdata,2","if,i_data2,>,100", "send,Worning!!" ,"endi,1" }; bool Flag_MarimoCode = true; try { string codelists = RDB.db.Modules.Where(p => p.Name.Equals("HogerX01")).Single().Code.Replace(Environment.NewLine, "|"); string[] codelist = codelists.Split('|'); marimo.codelist = codelist; } catch { Flag_MarimoCode = false; } foreach (DataAdd dataadd in dataaddList) { marimo.dataadd = dataadd; if (Flag_MarimoCode) { marimo.RunMarimo(); } foreach (var data in marimo.dataadd.dat) { Debug.WriteLine("Data: " + data); } date = marimo.dataadd.dt; string time = common.GetTimeIndex(date); TestEntity customer2 = new TestEntity("Value", time); customer2.DataVal = marimo.dataadd.dat; batchOperationValue.Insert(customer2); if (batchOperationValue.Count == 100) { table.ExecuteBatch(batchOperationValue); batchOperationValue = new TableBatchOperation(); } } if (batchOperationTake.Count > 0) { table.ExecuteBatch(batchOperationTake); } if (batchOperationValue.Count > 0) { table.ExecuteBatch(batchOperationValue); } return "Success!"; }
// Azure Tableへアップロードされたデータを格納 // POST api/dataadd/{UserID名}/{Module名} public string Post(string fir, string sec, [FromBody]List<DataAdd> dataaddList) { //string json = new JavaScriptSerializer().Serialize(dataaddList); //return json; if (dataaddList == null) { return "JSONの書式が間違っています。"; } else if (dataaddList[0].dat == null) { return "データがnullです。"; } else if (dataaddList.Count > 1) { foreach (DataAdd dataadd in dataaddList) { if (dataadd.dt == null) { return "2行以上のデータを送信する場合は、更新時刻データが必須です。"; } } } // RDBの中から、ターゲットモジュールなどを検索 var user = RDB.db.Users.Where(p => p.idName.Equals(fir)).Single(); var module = user.Modules.Where(p => p.Name.Equals(sec)).Single(); var units = module.Units; // パスワードチェック(ハッシュ関数込み) try { if (!(module.wPassWord == null)) { // データ追加時は、Hash関数で暗号化されたパスワードを使う string HashPW = common.GetHashPassword(dataaddList[0].dt, sec, module.wPassWord); try { if (!(dataaddList[0].pw.Equals(HashPW))) { return "PassWord error"; } } catch { return "You need to password"; } } } catch { return "PassWord Check Error"; } try { table = common.AzureAccess(); // Azure Tableへアクセス } catch { return "Azure Access Error"; } // batchを使って、まとめてデータを格納する List<TableBatchOperation> batchOperationTakeList = new List<TableBatchOperation>(); List<TableBatchOperation> batchOperationValueList = new List<TableBatchOperation>(); TableBatchOperation batchOperationTake = new TableBatchOperation(); TableBatchOperation batchOperationValue = new TableBatchOperation(); string date = ""; int num1 = 0; Marimo marimo = new Marimo(); // 入力されたコードの「改行」はまりもコードコンパイラの中では"|"として扱う // 「置換の際にエラーが起きる」=「まりもコードは入力されていないので実行する必要なし」 bool Flag_MarimoCode = true; try { string codelists = module.Code.Replace(Environment.NewLine, "|"); string[] codelist = codelists.Split('|'); marimo.codelist = codelist; } catch { Flag_MarimoCode = false; } // データ格納中の場合はModuleテーブルのTypeプロパティを"2"にする module.Type = "2"; RDB.db.SaveChanges(); try { foreach (DataAdd dataadd in dataaddList) { // 送られてくる行データ毎にまりもコード実行 marimo.dataadd = dataadd; if (Flag_MarimoCode) { marimo.RunMarimo(); } // 現在設定されているデータ種類の個数よりもアップロードされたデータの列数が多い場合、RDBにデータ種類を追加する if (marimo.dataadd.dat.Count > units.Count) { int count = marimo.dataadd.dat.Count - units.Count; for (int i = 0; i < count; i++) { Unit unit = new Unit(); unit.Unit1 = ""; unit.TypeDataId = 10; unit.Modules.Add(module); RDB.db.Units.Add(unit); } RDB.db.SaveChanges(); units = module.Units; } if (marimo.dataadd.dt == null) { DateTime now = DateTime.Now; date = now.ToString("yyyyMMdd-HHmmss-ffff"); } else { date = marimo.dataadd.dt; } string time = common.GetTimeIndex(date); // 100件ずつまとめてListに追加して、後でListごとまとめて格納する DataEntity customer1 = new DataEntity("Take," + module.id, time); batchOperationTake.Insert(customer1); if (batchOperationTake.Count == 100) { batchOperationTakeList.Add(batchOperationTake); batchOperationTake = new TableBatchOperation(); } int num2 = 0; foreach (var unit in units) { if (marimo.dataadd.dat.Count == num2) { break; } DataEntity customer2 = new DataEntity("Value," + module.id, time + "," + unit.id); customer2.DataVal = marimo.dataadd.dat[num2]; batchOperationValue.Insert(customer2); if (batchOperationValue.Count == 100) { batchOperationValueList.Add(batchOperationValue); batchOperationValue = new TableBatchOperation(); } num2++; } num1++; } if (batchOperationTake.Count > 0) { batchOperationTakeList.Add(batchOperationTake); } if (batchOperationValue.Count > 0) { batchOperationValueList.Add(batchOperationValue); } // 100件ずつまとめられたListを分散処理させながらKVSに格納 Parallel.ForEach(batchOperationTakeList, Operation => { table.ExecuteBatch(Operation); }); Parallel.ForEach(batchOperationValueList, Operation => { table.ExecuteBatch(Operation); }); if (!date.Equals("")) { module.Latest = date; } TableQuery<DataEntity> query = new TableQuery<DataEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Take," + module.id)); module.NumData = table.ExecuteQuery(query).Count(); } catch { module.Type = "0"; RDB.db.SaveChanges(); return "Error!"; } // データ格納中以外はModuleテーブルのTypeプロパティは"0"となる module.Type = "0"; RDB.db.SaveChanges(); return "Success!"; }