private async Task UploadBufferAsync() { if (m_Buffer.Count > 1 && m_Buffer[0].UseBatches) { // Batch upload var table = MapTable(m_Buffer[0]); var uploads = new TableBatchOperation(); var links = new TableBatchOperation(); var id = m_Buffer[0].Controller; foreach (var data in m_Buffer) { var entity = data.ToEntity(data.ID ?? m_RowKeyBase + "-" + m_Seq++); uploads.Insert(entity); if (data.ID != null) { links.Insert(new Link(table.Name, data.ID, entity.PartitionKey, entity.RowKey)); } } OnDebug?.Invoke($"Batch uploading {uploads.Count} records to Azure table storage {table.Name} for controller [{id}]..."); try { if (links.Count > 0) { await m_LinksTable.ExecuteBatchAsync(links); } var r = await table.ExecuteBatchAsync(uploads); // Check for errors var errors = m_Buffer .Where((entry, x) => r.Count <= x || (r[x].HttpStatusCode != 201 && r[x].HttpStatusCode != 204)) .ToList(); var successes = m_Buffer .Where((entry, x) => r.Count > x && (r[x].HttpStatusCode == 201 || r[x].HttpStatusCode == 204)) .ToList(); OnUploadSuccess?.Invoke(201, successes, $"{m_Buffer.Count - errors.Count} record(s) out of {m_Buffer.Count} for controller [{id}] successfully uploaded to Azure table storage {table.Name}."); m_Buffer.Clear(); if (errors.Count > 0) { m_Buffer.AddRange(errors); OnUploadError?.Invoke(0, errors, $"{errors.Count} record(s) for controller [{id}] failed to upload to Azure table storage {table.Name}."); } } catch (StorageException ex) { var status = ex.RequestInformation.HttpStatusCode; var errmsg = ex.RequestInformation.ExtendedErrorInformation?.ErrorMessage ?? ex.RequestInformation.HttpStatusMessage ?? ex.Message; switch (status) { case 0: { OnError?.Invoke(ex, $"Azure table storage batch upload to {table.Name} for controller [{id}] failed."); break; } case 401: case 403: { OnUploadError?.Invoke(status, m_Buffer, $"Azure table storage batch upload to {table.Name} for controller [{id}] forbidden: {errmsg}"); break; } default: { OnUploadError?.Invoke(status, m_Buffer, $"Azure table storage batch upload to {table.Name} for controller [{id}] failed: {errmsg}"); break; } } } catch (Exception ex) { OnError?.Invoke(ex, $"Azure table storage batch upload to {table.Name} for controller [{id}] failed."); } } else if (m_Buffer.Count > 0) { // Single upload var data = m_Buffer[0]; var id = data.Controller; var table = MapTable(data); var entity = data.ToEntity(data.ID ?? m_RowKeyBase + "-" + m_Seq++); var insert = TableOperation.Insert(entity); var link = (data.ID != null) ? TableOperation.Insert(new Link(table.Name, data.ID, entity.PartitionKey, entity.RowKey)) : null; OnDebug?.Invoke($"Uploading record to Azure table storage {table.Name} for controller [{id}]..."); try { TableResult r; if (link != null) { r = await m_LinksTable.ExecuteAsync(link); } r = await table.ExecuteAsync(insert); OnUploadSuccess?.Invoke(r.HttpStatusCode, new[] { data }, $"Azure table storage upload to {table.Name} for controller [{id}] succeeded, result = {r.HttpStatusCode}."); if (m_Buffer.Count <= 1) { m_Buffer.Clear(); } else { m_Buffer.RemoveAt(0); } } catch (StorageException ex) { var status = ex.RequestInformation.HttpStatusCode; var errmsg = ex.RequestInformation.ExtendedErrorInformation?.ErrorMessage ?? ex.RequestInformation.HttpStatusMessage ?? ex.Message; switch (status) { case 0: { OnError?.Invoke(ex, $"Azure table storage upload to {table.Name} for controller [{id}] failed."); break; } case 401: case 403: { OnUploadError?.Invoke(status, new[] { data }, $"Azure table storage upload to {table.Name} for controller [{id}] forbidden: {errmsg}"); break; } default: { OnUploadError?.Invoke(status, new[] { data }, $"Azure table storage upload to {table.Name} for controller [{id}] failed: {errmsg}"); break; } } } catch (Exception ex) { OnError?.Invoke(ex, $"Azure table storage upload to {table.Name} for controller [{id}] failed."); } } }
//Phone Validation public void MigratePhoneValidationtoDest(DateTime LastRunTime) { TableQuery <SOS.OPsTools.Entities.PhoneValidationBase> UQuery = null; if (base.LoadTableSilent(ConstantOps.PhoneValidationSrcTableName)) { UQuery = new TableQuery <SOS.OPsTools.Entities.PhoneValidationBase>().Where(TableQuery.GenerateFilterConditionForDate("Timestamp", QueryComparisons.GreaterThanOrEqual, LastRunTime)); var qryReturn = base.EntityTable.ExecuteQuery(UQuery).ToList(); SOS.OPsTools.Entities.PhoneValidationDest destRecord = new PhoneValidationDest(); List <string> RetryMigratePhoneValidation = new List <string>(); if (storageAccessDestination.LoadTableSilent(ConstantOps.PhoneValidatorDestTableName)) { //Parallel and batch processing TableBatchOperation insertSessions = null; var disctinctPartitionKeys = qryReturn.Select(s => s.PartitionKey).Distinct(); MapPhoneValidation(); foreach (var p in disctinctPartitionKeys) { try { insertSessions = new TableBatchOperation(); List <Entities.PhoneValidationBase> sessionsByProfileID = qryReturn.Where(ses => ses.PartitionKey == p).ToList(); Parallel.ForEach(sessionsByProfileID, session => { destRecord = ConvertSessionToDest(session);// Use automapper TableOperation insertSession = TableOperation.Insert(destRecord); insertSessions.Add(insertSession); }); storageAccessDestination.EntityTable.ExecuteBatch(insertSessions); } catch (Exception ex) { opsLogger.WriteLog("Phone Validation Error Message: " + ex.Message + Environment.NewLine + "PartitionKey: " + p); if (!string.IsNullOrEmpty(p)) { RetryMigratePhoneValidation.Add(p); } } } //Sequential and batch processing if (RetryMigratePhoneValidation.Count > 0) { foreach (var p in RetryMigratePhoneValidation) { List <PhoneValidationBase> sessionsByProfileID = qryReturn.Where(ses => ses.PartitionKey == p).ToList(); foreach (var session in sessionsByProfileID) { try { destRecord = ConvertSessionToDest(session);// Use automapper TableOperation insertSession = TableOperation.InsertOrReplace(destRecord); storageAccessDestination.EntityTable.Execute(insertSession); } catch (Exception ex) { opsLogger.WriteLog("Phone Validation Error Message: " + ex.Message + Environment.NewLine + "PartitionKey: " + p); } } } } } else { opsLogger.WriteLog("Phone Validation not loaded, Please check settings"); } } //EndIf }
//HistoryGeoLocation public void MigrateHistoryGeoLocationtoDest(DateTime LastRunTime) { TableQuery <SOS.OPsTools.Entities.HistoryGeoLocationBase> UQuery = null; if (base.LoadTableSilent(ConstantOps.HistoryGeoLocationSrcTableName)) { UQuery = new TableQuery <SOS.OPsTools.Entities.HistoryGeoLocationBase>().Where(TableQuery.GenerateFilterConditionForDate("Timestamp", QueryComparisons.GreaterThanOrEqual, LastRunTime)); var qryReturn = base.EntityTable.ExecuteQuery(UQuery).ToList(); Dictionary <Guid, long> MapProfiles = _Repository.GetAllMapProfiles().Result; SOS.OPsTools.Entities.HistoryGeoLocationDest destRecord = new HistoryGeoLocationDest(); List <string> RetryMigrateProfiles = new List <string>(); if (storageAccessDestination.LoadTableSilent(ConstantOps.HistoryGeoLocationDestTableName)) { //Parallel and batch processing TableBatchOperation insertSessions = null; var disctinctProfileIDs = qryReturn.Select(s => s.ProfileID).Distinct(); MapHistoryGeoLocation(); foreach (var p in disctinctProfileIDs) { try { string newProfileID = MapProfiles[Guid.Parse(p)].ToString(); insertSessions = new TableBatchOperation(); List <Entities.HistoryGeoLocationBase> sessionsByProfileID = qryReturn.Where(ses => ses.ProfileID == p).ToList(); Parallel.ForEach(sessionsByProfileID, session => { if (session.Identifier.Length > 2) { destRecord = ConvertSessionToDest(session); destRecord.ProfileID = newProfileID; destRecord.SessionID = session.Identifier.Substring(2); destRecord.IsSOS = Convert.ToBoolean(Convert.ToInt32(session.Identifier.Substring(0, 1))); destRecord.Accuracy = 0; TableOperation insertSession = TableOperation.Insert(destRecord); insertSessions.Add(insertSession); } }); storageAccessDestination.EntityTable.ExecuteBatch(insertSessions); } catch (Exception ex) { opsLogger.WriteLog("HistoryGeoLocation Error Message: " + ex.Message + Environment.NewLine + "ProfileID: " + p); if (!string.IsNullOrEmpty(p)) { RetryMigrateProfiles.Add(p); } } } //Sequential and batch processing if (RetryMigrateProfiles.Count > 0) { //disctinctProfileIDs = qryReturn.Select(s => s.ProfileID).Distinct(); foreach (var p in RetryMigrateProfiles) { List <HistoryGeoLocationBase> sessionsByProfileID = qryReturn.Where(ses => ses.ProfileID == p).ToList(); foreach (var session in sessionsByProfileID) { try { if (session.Identifier.Length > 2) { destRecord = ConvertSessionToDest(session);// Use automapper destRecord.ProfileID = MapProfiles[Guid.Parse(destRecord.ProfileID)].ToString(); destRecord.SessionID = session.Identifier.Substring(2); destRecord.IsSOS = Convert.ToBoolean(Convert.ToInt32(session.Identifier.Substring(0, 1))); destRecord.Accuracy = 0; TableOperation insertSession = TableOperation.InsertOrReplace(destRecord); storageAccessDestination.EntityTable.Execute(insertSession); } } catch (Exception ex) { opsLogger.WriteLog("HistoryGeoLocation Error Message: " + ex.Message + Environment.NewLine + "ProfileID: " + p); } } } } } else { opsLogger.WriteLog("HistoryGeoLocation Table not loaded, Please check settings"); } } //EndIf }
/* * https://docs.microsoft.com/en-us/azure/storage/storage-dotnet-how-to-use-tables */ /// <summary>Does examples of basic actions on azure tables</summary> /// <param name="ConfigurationConnectionId">The name of the connection definition</param> public static void ExampleOfAzureTables(string ConfigurationConnectionId = "example") { #region Create a table /* * Entities map to C# objects by using a custom class derived from TableEntity. * To add an entity to a table, create a class that defines the properties of your entity. * The following code defines an entity class that uses the customer's first name as the * row key and last name as the partition key. * Together, an entity's partition and row key uniquely identify the entity in the table. * Entities with the same partition key can be queried faster than those with different * partition keys, but using diverse partition keys allows for greater scalability * of parallel operations. * For any property that should be stored in the Table service, the property must be a * public property of a supported type that exposes both setting and retrieving values. * Also, your entity type must expose a parameter-less constructor. */ // Parse the connection string and return a reference to the storage account. CloudStorageAccount storageAccount = CloudStorageAccount.Parse(CloudConfigurationManager.GetSetting(ConfigurationConnectionId)); // Create the table client. CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); // Retrieve a reference to the table. CloudTable table = tableClient.GetTableReference("people"); // Create the table if it doesn't exist. table.CreateIfNotExists(); #endregion #region Add a single entity to a table /* * Table operations that involve entities are performed via the CloudTable object * that you created earlier in the "Create a table" section. The operation to be * performed is represented by a TableOperation object. The following code example * shows the creation of the CloudTable object and then a CustomerEntity object. * To prepare the operation, a TableOperation object is created to insert the * customer entity into the table. Finally, the operation is executed by calling * CloudTable.Execute. */ // Create a new customer entity. CustomerEntity customer0 = new CustomerEntity("Harp", "Walter"); customer0.Email = "*****@*****.**"; customer0.PhoneNumber = "425-555-0101"; // Create the TableOperation object that inserts the customer entity. TableOperation insertOperation = TableOperation.Insert(customer0); // Execute the insert operation. table.Execute(insertOperation); #endregion #region Insert a batch of entities to a table /* * You can insert a batch of entities into a table in one write operation. Some other notes on batch operations: * * You can perform updates, deletes, and inserts in the same single batch operation. * A single batch operation can include up to 100 entities. * All entities in a single batch operation must have the same partition key. * While it is possible to perform a query as a batch operation, it must be the only operation in the batch. * * The following code example creates two entity objects and adds each to TableBatchOperation by using the Insert method. * Then, CloudTable.Execute is called to execute the operation. */ // Create the batch operation. TableBatchOperation batchOperation = new TableBatchOperation(); // Create a customer entity and add it to the table. CustomerEntity customer1 = new CustomerEntity("Smith", "Jeff"); customer1.Email = "*****@*****.**"; customer1.PhoneNumber = "425-555-0104"; // Create another customer entity and add it to the table. CustomerEntity customer2 = new CustomerEntity("Smith", "Ben"); customer2.Email = "*****@*****.**"; customer2.PhoneNumber = "425-555-0102"; // Add both customer entities to the batch insert operation. batchOperation.Insert(customer1); batchOperation.Insert(customer2); // Execute the batch operation. table.ExecuteBatch(batchOperation); #endregion #region Retrieve all entities in a partition /* * To query a table for all entities in a partition, use a TableQuery object. * The following code example specifies a filter for entities where 'Smith' is the partition key. * This example prints the fields of each entity in the query results to the console. */ // Construct the query operation for all customer entities where PartitionKey="Smith". TableQuery <CustomerEntity> query = new TableQuery <CustomerEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Smith")); // Print the fields for each customer. foreach (CustomerEntity entity in table.ExecuteQuery(query)) { Console.WriteLine("{0}, {1}\t{2}\t{3}", entity.PartitionKey, entity.RowKey, entity.Email, entity.PhoneNumber); } #endregion #region Retrieve a range of of entities in partition /* * If you don't want to query all the entities in a partition, you can specify a * range by combining the partition key filter with a row key filter. * The following code example uses two filters to get all entities in partition * 'Smith' where the row key (first name) starts with a letter earlier than * 'E' in the alphabet and then prints the query results. */ // Create the table query. TableQuery <CustomerEntity> rangeQuery = new TableQuery <CustomerEntity>().Where( TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Smith"), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThan, "E"))); // Loop through the results, displaying information about the entity. foreach (CustomerEntity entity in table.ExecuteQuery(rangeQuery)) { Console.WriteLine("{0}, {1}\t{2}\t{3}", entity.PartitionKey, entity.RowKey, entity.Email, entity.PhoneNumber); } #endregion #region Retrieve a single entity /* * You can write a query to retrieve a single, specific entity. * The following code uses TableOperation to specify the customer 'Ben Smith'. * This method returns just one entity rather than a collection, and the * returned value in TableResult.Result is a CustomerEntity object. * Specifying both partition and row keys in a query is the fastest way to * retrieve a single entity from the Table service. */ // Create a retrieve operation that takes a customer entity. TableOperation retrieveOperation = TableOperation.Retrieve <CustomerEntity>("Smith", "Ben"); // Execute the retrieve operation. TableResult retrievedResult = table.Execute(retrieveOperation); // Print the phone number of the result. if (retrievedResult.Result != null) { Console.WriteLine(((CustomerEntity)retrievedResult.Result).PhoneNumber); } else { Console.WriteLine("The phone number could not be retrieved."); } #endregion #region Replace an entity /* * To update an entity, retrieve it from the Table service, modify the entity object, * and then save the changes back to the Table service. * The following code changes an existing customer's phone number. * Instead of calling Insert, this code uses Replace. * This causes the entity to be fully replaced on the server, unless the entity on the * server has changed since it was retrieved, in which case the operation will fail. * This failure is to prevent your application from inadvertently overwriting a change * made between the retrieval and update by another component of your application. * The proper handling of this failure is to retrieve the entity again, make your changes * (if still valid), and then perform another Replace operation. * The next section will show you how to override this behavior. */ // Create a retrieve operation that takes a customer entity. TableOperation retrieveOperation2 = TableOperation.Retrieve <CustomerEntity>("Smith", "Ben"); // Execute the operation. TableResult retrievedResult2 = table.Execute(retrieveOperation2); // Assign the result to a CustomerEntity object. CustomerEntity updateEntity = (CustomerEntity)retrievedResult2.Result; if (updateEntity != null) { // Change the phone number. updateEntity.PhoneNumber = "425-555-0105"; // Create the Replace TableOperation. TableOperation updateOperation = TableOperation.Replace(updateEntity); // Execute the operation. table.Execute(updateOperation); Console.WriteLine("Entity updated."); } else { Console.WriteLine("Entity could not be retrieved."); } #endregion #region Insert-or-replace an entity /* * Replace operations will fail if the entity has been changed since it was * retrieved from the server. * Furthermore, you must retrieve the entity from the server first in order * for the Replace operation to be successful. * Sometimes, however, you don't know if the entity exists on the server and * the current values stored in it are irrelevant. * Your update should overwrite them all. * To accomplish this, you would use an InsertOrReplace operation. * This operation inserts the entity if it doesn't exist, or replaces it if * it does, regardless of when the last update was made. * In the following code example, the customer entity for Ben Smith is still * retrieved, but it is then saved back to the server via InsertOrReplace. * Any updates made to the entity between the retrieval and update operations * will be overwritten. */ // Create a retrieve operation that takes a customer entity. TableOperation retrieveOperation3 = TableOperation.Retrieve <CustomerEntity>("Smith", "Ben"); // Execute the operation. TableResult retrievedResult3 = table.Execute(retrieveOperation3); // Assign the result to a CustomerEntity object. CustomerEntity updateEntity3 = (CustomerEntity)retrievedResult3.Result; if (updateEntity3 != null) { // Change the phone number. updateEntity3.PhoneNumber = "425-555-1234"; // Create the InsertOrReplace TableOperation. TableOperation insertOrReplaceOperation = TableOperation.InsertOrReplace(updateEntity3); // Execute the operation. table.Execute(insertOrReplaceOperation); Console.WriteLine("Entity was updated."); } else { Console.WriteLine("Entity could not be retrieved."); } #endregion #region Query a subset of entity properties /* * A table query can retrieve just a few properties from an entity instead of * all the entity properties. * This technique, called projection, reduces bandwidth and can improve query * performance, especially for large entities. * The query in the following code returns only the email addresses of entities * in the table. * This is done by using a query of DynamicTableEntity and also EntityResolver. * You can learn more about projection on the Introducing Upsert and Query * Projection blog post. * Note that projection is not supported on the local storage emulator, so this * code runs only when you're using an account on the Table service. */ // Define the query, and select only the Email property. TableQuery <DynamicTableEntity> projectionQuery = new TableQuery <DynamicTableEntity>().Select(new string[] { "Email" }); // Define an entity resolver to work with the entity after retrieval. EntityResolver <string> resolver = (pk, rk, ts, props, etag) => props.ContainsKey("Email") ? props["Email"].StringValue : null; foreach (string projectedEmail in table.ExecuteQuery(projectionQuery, resolver, null, null)) { Console.WriteLine(projectedEmail); } #endregion #region Delete an entity /* * You can easily delete an entity after you have retrieved it, by using * the same pattern shown for updating an entity. * The following code retrieves and deletes a customer entity. */ // Create a retrieve operation that expects a customer entity. TableOperation retrieveOperation4 = TableOperation.Retrieve <CustomerEntity>("Smith", "Ben"); // Execute the operation. TableResult retrievedResult4 = table.Execute(retrieveOperation4); // Assign the result to a CustomerEntity. CustomerEntity deleteEntity4 = (CustomerEntity)retrievedResult4.Result; // Create the Delete TableOperation. if (deleteEntity4 != null) { TableOperation deleteOperation = TableOperation.Delete(deleteEntity4); // Execute the operation. table.Execute(deleteOperation); Console.WriteLine("Entity deleted."); } else { Console.WriteLine("Could not retrieve the entity."); } #endregion #region Retrieve entities in pages asynchronously /* * If you are reading a large number of entities, and you want to process/display * entities as they are retrieved rather than waiting for them all to return, * you can retrieve entities by using a segmented query. * This example shows how to return results in pages by using the Async-Await * pattern so that execution is not blocked while you're waiting for a large * set of results to return. * For more details on using the Async-Await pattern in .NET, * see Asynchronous programming with Async and Await (C# and Visual Basic). */ // Initialize a default TableQuery to retrieve all the entities in the table. TableQuery <CustomerEntity> tableQuery = new TableQuery <CustomerEntity>(); // Initialize the continuation token to null to start from the beginning of the table. TableContinuationToken continuationToken = null; do { // Retrieve a segment (up to 100 entities). TableQuerySegment <CustomerEntity> tableQueryResult = //await table.ExecuteQuerySegmentedAsync(tableQuery, continuationToken); table.ExecuteQuerySegmented(tableQuery, continuationToken); // Assign the new continuation token to tell the service where to // continue on the next iteration (or null if it has reached the end). continuationToken = tableQueryResult.ContinuationToken; // Print the number of rows retrieved. Console.WriteLine("Rows retrieved {0}", tableQueryResult.Results.Count); // Loop until a null continuation token is received, indicating the end of the table. } while (continuationToken != null); #endregion #region Delete a table /* * Finally, the following code example deletes a table from a storage account. * A table that has been deleted will be unavailable to be re-created for a * period of time following the deletion. */ // Print the number of rows retrieved. Console.WriteLine("Removing table {0}, {1}", table.Name, table.Uri); // Delete the table it if exists. table.DeleteIfExists(); #endregion }
public async Task CreateNewStudent(StudentEntity student) { var insertop = TableOperation.Insert(student); await studentTable.ExecuteAsync(insertop); }
public async Task AdaugareStudent(StudentEntity student) { var insertOperation = TableOperation.Insert(student); await _studentsTable.ExecuteAsync(insertOperation); }
public static async Task <HttpResponseMessage> Run([HttpTrigger(AuthorizationLevel.Anonymous, "get", "post", Route = null)] HttpRequestMessage req, [Table("urls", "1", "KEY")] NextId keyTable, [Table("urls")] CloudTable tableOut, TraceWriter log) { log.Info($"C# manually triggered function called with req: {req}"); if (req == null) { return(req.CreateResponse(HttpStatusCode.NotFound)); } Request input = await req.Content.ReadAsAsync <Request>(); if (input == null) { return(req.CreateResponse(HttpStatusCode.NotFound)); } var result = new List <Result>(); var url = input.Input; bool tagMediums = input.TagMediums.HasValue ? input.TagMediums.Value : true; bool tagSource = (input.TagSource.HasValue ? input.TagSource.Value : true) || tagMediums; log.Info($"URL: {url} Tag Source? {tagSource} Tag Mediums? {tagMediums}"); if (String.IsNullOrWhiteSpace(url)) { throw new Exception("Need a URL to shorten!"); } if (keyTable == null) { keyTable = new NextId { PartitionKey = "1", RowKey = "KEY", Id = 1024 }; var keyAdd = TableOperation.Insert(keyTable); await tableOut.ExecuteAsync(keyAdd); } log.Info($"Current key: {keyTable.Id}"); if (tagSource) { url = $"{url}?utm_source={UTM_SOURCE}"; } if (tagMediums) { foreach (var medium in UTM_MEDIUMS) { var mediumUrl = $"{url}&utm_medium={medium}"; var shortUrl = Encode(keyTable.Id++); log.Info($"Short URL for {mediumUrl} is {shortUrl}"); var newUrl = new ShortUrl { PartitionKey = $"{shortUrl.First()}", RowKey = $"{shortUrl}", Medium = medium, Url = mediumUrl }; var multiAdd = TableOperation.Insert(newUrl); await tableOut.ExecuteAsync(multiAdd); result.Add(new Result { ShortUrl = $"{SHORTENER_URL}{newUrl.RowKey}", LongUrl = WebUtility.UrlDecode(newUrl.Url) }); } } else { var shortUrl = Encode(keyTable.Id++); log.Info($"Short URL for {url} is {shortUrl}"); var newUrl = new ShortUrl { PartitionKey = $"{shortUrl.First()}", RowKey = $"{shortUrl}", Url = url }; var singleAdd = TableOperation.Insert(newUrl); await tableOut.ExecuteAsync(singleAdd); result.Add(new Result { ShortUrl = $"{SHORTENER_URL}{newUrl.RowKey}", LongUrl = WebUtility.UrlDecode(newUrl.Url) }); } var operation = TableOperation.Replace(keyTable); await tableOut.ExecuteAsync(operation); log.Info($"Done."); // until https://github.com/Azure/azure-webjobs-sdk/issues/1492 is resolved // return req.CreateResponse(HttpStatusCode.OK, result); return(new HttpResponseMessage(HttpStatusCode.OK) { Content = new StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(result)) }); }
async Task Record(Partition partition) { var header = new DynamicTableEntity(directory.PartitionKey, partition.ToString()); await directory.Table.ExecuteAsync(TableOperation.Insert(header)); }
public static async Task Run( //[TimerTrigger("*/5 * * * * *")] // For debug only, every 5 minutes [TimerTrigger("0 0 */1 * * *")] // For production, every 1 hour TimerInfo myTimer, TraceWriter log) { // Every hour: 0 0 */1 * * * // See https://codehollow.com/2017/02/azure-functions-time-trigger-cron-cheat-sheet/ log.Info($"CoinValueSaver executed at: {DateTime.Now}"); // Create account, client and table var account = CloudStorageAccount.Parse(ConnectionString); var tableClient = account.CreateCloudTableClient(); var table = tableClient.GetTableReference(TableName); await table.CreateIfNotExistsAsync(); // Get coin value (JSON) var client = new HttpClient(); var json = await client.GetStringAsync(Url); var priceBtc = 0.0; var priceEth = 0.0; try { var array = JArray.Parse(json); var priceString = array.Children <JObject>() .FirstOrDefault(c => c.Property("symbol").Value.ToString().ToLower() == CoinTrend.SymbolBtc)? .Property("price_usd").Value.ToString(); if (priceString != null) { double.TryParse(priceString, out priceBtc); } priceString = array.Children <JObject>() .FirstOrDefault(c => c.Property("symbol").Value.ToString().ToLower() == CoinTrend.SymbolEth)? .Property("price_usd").Value.ToString(); if (priceString != null) { double.TryParse(priceString, out priceEth); } } catch { // Do nothing here for demo purposes } if (priceBtc < 0.1 || priceEth < 0.1) { log.Info("Something went wrong"); return; // Do some logging here } var coinBtc = new CoinEntity { Symbol = CoinTrend.SymbolBtc, TimeOfReading = DateTime.Now, RowKey = "rowBtc" + DateTime.Now.Ticks, PartitionKey = "partition", PriceUsd = priceBtc }; // Insert new value in table table.Execute(TableOperation.Insert(coinBtc)); var coinEth = new CoinEntity { Symbol = CoinTrend.SymbolEth, TimeOfReading = DateTime.Now, RowKey = "rowEth" + DateTime.Now.Ticks, PartitionKey = "partition", PriceUsd = priceEth }; // Insert new value in table table.Execute(TableOperation.Insert(coinEth)); // Send notification to devices const string uriAndroid = "https://api.mobile.azure.com/v0.1/apps/lbugnion/CoinValue.Android/push/notifications"; const string uriUwp = "https://api.mobile.azure.com/v0.1/apps/lbugnion/CoinValue.UWP/push/notifications"; const string ApiToken = "b19f8321553f324acc49544b5179c44f9e738680"; var notification = $"{{\"notification_content\":{{\"name\":\"CoinValue\",\"title\":\"New values saved\",\"body\": \"Btc: {priceBtc} U$ | Eth: {priceEth} U$\"}}}}"; var request1 = new HttpRequestMessage() { RequestUri = new Uri(uriAndroid), Method = HttpMethod.Post, Content = new StringContent( notification, Encoding.UTF8, "application/json") }; request1.Headers.Add("X-API-Token", ApiToken); var response = await client.SendAsync(request1); if (response.StatusCode != System.Net.HttpStatusCode.Accepted) { log.Error("Error posting the push notification to Android"); } var request2 = new HttpRequestMessage() { RequestUri = new Uri(uriUwp), Method = HttpMethod.Post, Content = new StringContent( notification, Encoding.UTF8, "application/json") }; request2.Headers.Add("X-API-Token", ApiToken); response = await client.SendAsync(request2); if (response.StatusCode != System.Net.HttpStatusCode.Accepted) { log.Error("Error posting the push notification to UWP"); } }
public static void Run([TimerTrigger("0 30 * * * *", RunOnStartup = true)] TimerInfo myTimer, TraceWriter log) { log.Info($"C# Timer trigger function executed at: {DateTime.Now}"); int numberOfStates = 12; // Retrieve storage account from connection string. var storageAccount = CloudStorageAccount.Parse(CloudConfigurationManager.GetSetting("AzureWebJobsStorage")); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference("snotelmergetracker"); table.CreateIfNotExists(); #if DEBUG int numberOfHoursToCheck = 3 * 7 * 24; //was 1; #else int numberOfHoursToCheck = 7 * 24; //one week #endif //look back x days and fill in any missing values; TableQuery <FileProcessedTracker> dateQuery = new TableQuery <FileProcessedTracker>().Where( TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionName), TableOperators.And, TableQuery.GenerateFilterConditionForDate("ForecastDate", QueryComparisons.GreaterThan, DateTime.UtcNow.AddDays(-1 * numberOfHoursToCheck)) ) ); var results = table.ExecuteQuery(dateQuery); //refactoring the below code to a shared method can cause an .net issue //related to binding redirect to arise; leave this here for now. See AzureUtilities.cs //for more info log.Info($"Attempting to sign in to ad for datalake upload"); var adlsAccountName = CloudConfigurationManager.GetSetting("ADLSAccountName"); //auth secrets var domain = CloudConfigurationManager.GetSetting("Domain"); var webApp_clientId = CloudConfigurationManager.GetSetting("WebAppClientId"); var clientSecret = CloudConfigurationManager.GetSetting("ClientSecret"); var clientCredential = new ClientCredential(webApp_clientId, clientSecret); var creds = ApplicationTokenProvider.LoginSilentAsync(domain, clientCredential).Result; var fullAdlsAccountName = CloudConfigurationManager.GetSetting("ADLSFullAccountName"); // Create client objects and set the subscription ID var adlsFileSystemClient = new DataLakeStoreFileSystemManagementClient(creds); var adlsClient = AdlsClient.CreateClient(fullAdlsAccountName, creds); var checkDate = DateTime.UtcNow.AddHours(-1 * numberOfHoursToCheck); while (checkDate < DateTime.UtcNow) { //has it already been marked as complete in the table string nameToCheck = SnotelUtilities.CreateSnotelFileDate(checkDate) + ".snotel.csv"; if (results.Where(r => r.RowKey == nameToCheck).Count() == 0) { log.Info($"{nameToCheck} doesn't exist in completed table, need to see if all files exist to concat"); var lexStartAndEnd = SnotelUtilities.CreateSnotelFileDate(checkDate); var hourlyFilesOnAdls = adlsClient.EnumerateDirectory(csvDirectory).Where(f => f.Name.StartsWith(lexStartAndEnd)).Select(f => f.Name).ToList(); if (hourlyFilesOnAdls.Count == numberOfStates) { if (ConcatFiles(adlsClient, nameToCheck, hourlyFilesOnAdls)) { //mark file as finished in table FileProcessedTracker tracker = new FileProcessedTracker { ForecastDate = checkDate, PartitionKey = partitionName, RowKey = nameToCheck, Url = "unknown" }; table.Execute(TableOperation.Insert(tracker)); } else { log.Error($"Missing data for {checkDate} need to manually backfill, can't concat"); } } else { log.Info($"all state files don't exist for {checkDate}, waiting until next run"); } } else { log.Info($"{nameToCheck} marked as already concated"); } checkDate = checkDate.AddHours(1); } }
public static async Task <HttpResponseMessage> Run([HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequestMessage req, ILogger log) { log.LogTrace("C# HTTP trigger function processed a request."); dynamic body = await req.Content.ReadAsStringAsync(); var data = JsonConvert.DeserializeObject <AssessmentProfile>(body as string); // parse query parameter Guid? CompanyId = data?.CompanyId; string CompanyName = data?.CompanyName; Guid? AssessmentId = data?.AssessmentId; string GroupAdminToken = data?.GroupAdminToken; string AssessmentToken = data?.AssessmentToken; // Contact Contact = data?.Contact; string connectionstring2 = "DefaultEndpointsProtocol=https;AccountName=spassessservices20190227;AccountKey=KLx/VDJ279oOZ2Z2wELr90GauiVlEN4pr8r2ss2xAiokZJGAi4PF6eGz0nI0Vz0IieEwtKxqkgoM+ukeVoWxMw==;EndpointSuffix=core.windows.net"; if (CompanyName == null && CompanyId == null) { return(req.CreateResponse(HttpStatusCode.BadRequest, "Please pass a CompanyId in the request body")); } //this code will do a insert and do a check for the id CloudStorageAccount storageAccount = CloudStorageAccount.Parse(connectionstring2); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); CloudTable cloudTable = tableClient.GetTableReference("AssessmentProfile"); TableOperation tableOperation = TableOperation.Retrieve <AssessmentProfile>(CompanyId.Value.ToString(), CompanyName); TableResult tableResult = await cloudTable.ExecuteAsync(tableOperation); AssessmentProfile person = tableResult.Result as AssessmentProfile; if (person == null) { try { if (AssessmentId == null) { AssessmentId = GenerateAssessmentId(); } var assessment = new AssessmentProfile() { PartitionKey = CompanyId.Value.ToString(), RowKey = CompanyName, CompanyId = CompanyId, AssessmentId = AssessmentId, CompanyName = CompanyName, GroupAdminToken = GroupAdminToken, //Contact = Contact }; TableOperation insertOperation = TableOperation.Insert(assessment); TableResult insertResult = await cloudTable.ExecuteAsync(insertOperation); } catch (StorageException se) { log.LogTrace(se.Message); } catch (Exception ex) { log.LogTrace(ex.Message); } } return(req.CreateResponse(HttpStatusCode.Created, "successful")); }
public async Task CreateAsync(GlossaryItem message) => await Table.ExecuteAsync(TableOperation.Insert(message));
public static async Task <string> Run( [HttpTrigger(AuthorizationLevel.Anonymous, "get", "post", Route = null)] HttpRequest req, ILogger log) { string code = req.Query["code"]; string username = req.Query["user"]; string grant_type = "authorization_code"; string url = @"https://login.microsoftonline.com/{teant}/oauth2/v2.0/token"; HttpClient client = new HttpClient(); CloudStorageAccount storageAccount = CloudStorageAccount.Parse("XXXX"); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference("appUsers"); TableOperation retrieveOperation = TableOperation.Retrieve <User>(username, username); TableResult retrievedResult = await table.ExecuteAsync(retrieveOperation); User deleteEntity = (User)retrievedResult.Result; if (deleteEntity != null) { TableOperation deleteOperation = TableOperation.Delete(deleteEntity); await table.ExecuteAsync(deleteOperation); } var values = new Dictionary <string, string> { { "grant_type", "authorization_code" }, { "client_id", "a02f6ab7-1acd-4bfc-97d5-992acc1301b1" }, { "scope", "https://graph.microsoft.com/User.Read offline_access" }, { "redirect_uri", "https://graphsharepoint.azurewebsites.net/api/GetToken" }, { "client_secret", "XXX" }, { "code", code } }; var content = new FormUrlEncodedContent(values); var response = await client.PostAsync(url, content); dynamic responseString = await response.Content.ReadAsAsync <object>(); await table.CreateIfNotExistsAsync(); User user = new User { RowKey = username, name = username, accessToken = responseString.access_token, refreshToken = responseString.refresh_token, PartitionKey = username }; TableOperation insertOperation = TableOperation.Insert(user); await table.ExecuteAsync(insertOperation); return(responseString.access_token); }
static async Task InsertCustomerAsync(CloudTable table, CustomerEntity customer) { var insert = TableOperation.Insert(customer); await table.ExecuteAsync(insert); }
public static async Task <HttpResponseMessage> Run( [HttpTrigger(AuthorizationLevel.Anonymous, "get", "post", Route = null)] HttpRequest req, ILogger log, ExecutionContext context) { log.LogInformation("C# HTTP trigger function processed a request."); string connectionsJson = File.ReadAllText(Path.Combine(context.FunctionAppDirectory, "Connections.json")); JObject ConnectionsObject = JObject.Parse(connectionsJson); string connectionString = ConnectionsObject["AZURE_STORAGE_URL"].ToString(); CloudStorageAccount storageAccount = CloudStorageAccount.Parse(connectionString); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference("virtualroomconfig"); await table.CreateIfNotExistsAsync(); var room = req.Headers["room"]; if (string.IsNullOrEmpty(room)) { room = req.Query["room"]; } if (string.IsNullOrEmpty(room)) { return(new HttpResponseMessage(HttpStatusCode.BadRequest) { Content = new StringContent("Please pass a room name on the query string or in the header") }); } var partitionKey = "Demo"; var rowKey = room; try { // get the room from the table var getRoom = TableOperation.Retrieve <VirtualRoomConfig>(partitionKey, rowKey); var query = await table.ExecuteAsync(getRoom); var currRoomConfig = (VirtualRoomConfig)query.Result; // if room not exist, create a record using default config if (currRoomConfig == null) { var defaultRoom = new VirtualRoomConfig(partitionKey, rowKey); var createRoom = TableOperation.Insert(defaultRoom); await table.ExecuteAsync(createRoom); currRoomConfig = (VirtualRoomConfig)(await table.ExecuteAsync(getRoom)).Result; } var operation = req.Query["operation"].ToString().ToLower(); var updated = false; if (!string.IsNullOrEmpty(operation)) { if (operation.Equals("reset")) { currRoomConfig.LoadDefaultConfig(); updated = true; } else if (operation.Equals("turn")) { var item = req.Query["item"].ToString().ToLower(); var instance = req.Query["instance"].ToString().ToLower(); var value = req.Query["value"].ToString().ToLower(); bool?valueBool = (value.Equals("on") || value.Equals("open")) ? true : ((value.Equals("off") || value.Equals("close")) ? (bool?)false : null); if (valueBool == null) { updated = false; } else if (item.Equals("lights")) { if (instance.Equals("all")) { if (currRoomConfig.Lights_bathroom == (bool)valueBool && currRoomConfig.Lights_room == (bool)valueBool) { currRoomConfig.Message = $"All lights already {value}"; } else { currRoomConfig.Lights_bathroom = (bool)valueBool; currRoomConfig.Lights_room = (bool)valueBool; currRoomConfig.Message = $"Ok, turning all the lights {value}"; } updated = true; } else if (instance.Equals("room")) { if (currRoomConfig.Lights_room == (bool)valueBool) { currRoomConfig.Message = $"Room light already {value}"; } else { currRoomConfig.Lights_room = (bool)valueBool; currRoomConfig.Message = $"Ok, turning {value} the room light"; } updated = true; } else if (instance.Equals("bathroom")) { if (currRoomConfig.Lights_bathroom == (bool)valueBool) { currRoomConfig.Message = $"Bathroom light already {value}"; } else { currRoomConfig.Lights_bathroom = (bool)valueBool; currRoomConfig.Message = $"Ok, turning {value} the bathroom light"; } updated = true; } } else if (item.Equals("tv")) { if (currRoomConfig.Television == (bool)valueBool) { currRoomConfig.Message = $"TV already {value}"; } else { currRoomConfig.Television = (bool)valueBool; currRoomConfig.Message = $"Ok, turning the TV {value}"; } updated = true; } else if (item.Equals("blinds")) { if (currRoomConfig.Blinds == (bool)valueBool) { currRoomConfig.Message = (bool)valueBool ? "Blinds already opened" : "Blinds already closed"; } else { currRoomConfig.Blinds = (bool)valueBool; currRoomConfig.Message = (bool)valueBool ? "All right, opening the blinds" : "All right, closing the blinds"; } updated = true; } else if (item.Equals("ac")) { if (currRoomConfig.AC == (bool)valueBool) { currRoomConfig.Message = $"AC already {value}"; } else { currRoomConfig.AC = (bool)valueBool; currRoomConfig.Message = $"Ok, turning the AC {value}"; } updated = true; } } else if (operation.Equals("settemperature")) { currRoomConfig.Temperature = int.Parse(req.Query["value"]); currRoomConfig.Message = "set temperature to " + req.Query["value"]; updated = true; } else if (operation.Equals("increasetemperature")) { currRoomConfig.Temperature += int.Parse(req.Query["value"]); currRoomConfig.Message = "raised temperature by " + req.Query["value"] + " degrees"; updated = true; } else if (operation.Equals("decreasetemperature")) { currRoomConfig.Temperature -= int.Parse(req.Query["value"]); currRoomConfig.Message = "decreased temperature by " + req.Query["value"] + " degrees"; updated = true; } } if (updated) { var updateRoom = TableOperation.Replace(currRoomConfig as VirtualRoomConfig); await table.ExecuteAsync(updateRoom); log.LogInformation("successfully updated the record"); } return(new HttpResponseMessage(HttpStatusCode.OK) { Content = new StringContent(JsonConvert.SerializeObject(currRoomConfig, Formatting.Indented), Encoding.UTF8, "application/json") }); } catch (Exception e) { log.LogError(e.Message); return(new HttpResponseMessage(HttpStatusCode.BadRequest) { Content = new StringContent("Failed to process request") }); } }
void InsertTestEntity(TestEntity entity) { entity.PartitionKey = partition.PartitionKey; table.Execute(TableOperation.Insert(entity)); }
public async Task <bool> CreateAsync(string itemId, T item, bool orReplace = true) { try { var callbackStr = JsonConvert.SerializeObject(item, serializerSettings); var entity = new DynamicTableEntity(Escape(Scope), Escape(itemId)) { Properties = { new KeyValuePair <string, EntityProperty>("data", new EntityProperty(callbackStr)) } }; await Table.ExecuteAsync(orReplace?TableOperation.InsertOrReplace(entity) : TableOperation.Insert(entity)).ConfigureAwait(false); } catch (StorageException ex) { if (ex.RequestInformation != null && ex.RequestInformation.HttpStatusCode == 409 && !orReplace) { return(false); } throw; } return(true); }
public async Task <string> Store(string expectedETag, string metadata, List <PendingTransactionState <TState> > statesToPrepare, long?commitUpTo, long?abortAfter) { if (this.key.ETag != expectedETag) { throw new ArgumentException(nameof(expectedETag), "Etag does not match"); } // assemble all storage operations into a single batch // these operations must commit in sequence, but not necessarily atomically // so we can split this up if needed var batchOperation = new BatchOperation(logger, key, table); // first, clean up aborted records if (abortAfter.HasValue && states.Count != 0) { while (states.Count > 0 && states[states.Count - 1].Key > abortAfter) { var entity = states[states.Count - 1].Value; await batchOperation.Add(TableOperation.Delete(entity)).ConfigureAwait(false); states.RemoveAt(states.Count - 1); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{states[states.Count - 1].Key:x16} Delete {entity.TransactionId}"); } } } // second, persist non-obsolete prepare records var obsoleteBefore = commitUpTo.HasValue ? commitUpTo.Value : key.CommittedSequenceId; if (statesToPrepare != null) { foreach (var s in statesToPrepare) { if (s.SequenceId >= obsoleteBefore) { if (FindState(s.SequenceId, out var pos)) { // overwrite with new pending state var existing = states[pos].Value; existing.TransactionId = s.TransactionId; existing.TransactionTimestamp = s.TimeStamp; existing.TransactionManager = s.TransactionManager; existing.SetState(s.State, this.jsonSettings); await batchOperation.Add(TableOperation.Replace(existing)).ConfigureAwait(false); states.RemoveAt(pos); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{existing.SequenceId:x16} Update {existing.TransactionId}"); } } else { var entity = StateEntity.Create(this.jsonSettings, this.partition, s); await batchOperation.Add(TableOperation.Insert(entity)).ConfigureAwait(false); states.Insert(pos, new KeyValuePair <long, StateEntity>(s.SequenceId, entity)); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{s.SequenceId:x16} Insert {entity.TransactionId}"); } } } } } // third, persist metadata and commit position key.Metadata = metadata; if (commitUpTo.HasValue && commitUpTo.Value > key.CommittedSequenceId) { key.CommittedSequenceId = commitUpTo.Value; } if (string.IsNullOrEmpty(this.key.ETag)) { await batchOperation.Add(TableOperation.Insert(this.key)).ConfigureAwait(false); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.k Insert"); } } else { await batchOperation.Add(TableOperation.Replace(this.key)).ConfigureAwait(false); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.k Update"); } } // fourth, remove obsolete records if (states.Count > 0 && states[0].Key < obsoleteBefore) { FindState(obsoleteBefore, out var pos); for (int i = 0; i < pos; i++) { await batchOperation.Add(TableOperation.Delete(states[i].Value)).ConfigureAwait(false); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{states[i].Key:x16} Delete {states[i].Value.TransactionId}"); } } states.RemoveRange(0, pos); } await batchOperation.Flush().ConfigureAwait(false); if (logger.IsEnabled(LogLevel.Debug)) { logger.LogDebug($"{partition} Stored v{this.key.CommittedSequenceId} eTag={key.ETag}"); } return(key.ETag); }
public static async Task Run( [EventGridTrigger()] EventGridEvent eventPayload, [Table("images")] CloudTable imageTable, TraceWriter log) { log.Info($"Received event: {eventPayload}"); var imageData = eventPayload.Data.ToObject <ImageData>(); if (imageData != null) { log.Info($"Request to process URL: {imageData.Url}."); if (Uri.TryCreate(imageData.Url, UriKind.Absolute, out Uri uri)) { try { var client = new HttpClient(); var result = await client.GetAsync(imageData.Url); var mime = result.Content.Headers.ContentType.MediaType; if (!mime.StartsWith("image/")) { log.Warning($"Not an image: {mime}"); return; } var id = Guid.NewGuid().ToString(); log.Info("Get storage account."); var storageAccount = CloudStorageAccount.Parse(ConfigurationManager.AppSettings["AzureWebJobsStorage"].ToString()); log.Info("Create client."); var blobClient = storageAccount.CreateCloudBlobClient(); log.Info("Get container reference."); var container = blobClient.GetContainerReference("images"); log.Info("Get block blog reference."); var blob = container.GetBlockBlobReference(id); log.Info("Upload image stream."); await blob.UploadFromStreamAsync(await result.Content.ReadAsStreamAsync()); log.Info($"Added blob with id {id}"); var newEntry = new ImageEntry { PartitionKey = ImageEntry.GetPartitionKey(uri), RowKey = ImageEntry.GetRowKey(uri), BlobId = id, Url = uri.ToString(), MimeType = mime, Caption = string.Empty }; log.Info($"Adding table: {JsonConvert.SerializeObject(newEntry)}."); var operation = TableOperation.Insert(newEntry); log.Info("Insert table."); await imageTable.ExecuteAsync(operation); log.Info($"Added table mapping."); } catch (Exception ex) { log.Error("Unexpected exception", ex); throw; } } else { log.Warning($"Bad url: {imageData.Url}"); } } }
public async Task CreateActivityLog(ActivityLog activityLog) { TableOperation insertOperation = TableOperation.Insert(activityLog); await this._ActivityLogTable.ExecuteAsync(insertOperation); }
public async Task <int> GetNextId(string table) { await SequenceGeneratorSemaphore.semaphoreSlim.WaitAsync(); try { var counterTable = await Utils.GetTable(CounterTableName); if (!TableIds.Any(w => w.Table == table)) { var getCounter = TableOperation.Retrieve <Counter>("default", table); var result = await counterTable.ExecuteAsync(getCounter); //load and update existing record if (result.Result != null) { var counter = (Counter)result.Result; counter.CurrentCounter = counter.CurrentCounter + CounterAdvance; TableIds.Add(new SequenceRecord() { Table = table, NextId = counter.CurrentCounter, SaveId = counter.CurrentCounter + CounterAdvance }); TableOperation updateOperation = TableOperation.Replace(counter); await counterTable.ExecuteAsync(updateOperation); } // create and save new record else { var counter = new Counter(table); TableOperation insertOperation = TableOperation.Insert(counter); await counterTable.ExecuteAsync(insertOperation); TableIds.Add(new SequenceRecord() { Table = table, NextId = counter.CurrentCounter, SaveId = counter.CurrentCounter + CounterAdvance }); } } var sr = TableIds.First(w => w.Table == table); sr.NextId++; if (sr.NextId >= sr.SaveId) { var getCounter = TableOperation.Retrieve <Counter>("default", table); var result = await counterTable.ExecuteAsync(getCounter); var counter = (Counter)result.Result; counter.CurrentCounter = sr.NextId; TableOperation updateOperation = TableOperation.Replace(counter); await counterTable.ExecuteAsync(updateOperation); } return(sr.NextId); } catch (Exception e) { LogFactory.GetLogger().Log(LogLevel.Error, e); return(0); } finally { SequenceGeneratorSemaphore.semaphoreSlim.Release(); } }
static void Main(string[] args) { // Account name and key. Modify for your account. // setup before use! string accountName = ""; string accountKey = ""; if (accountName.Length == 0 || accountKey.Length == 0) { Console.WriteLine("Setup accountName and accountKey!"); } else { try { //Get a reference to the storage account, with authentication credentials. CloudStorageAccount storageAccount = new CloudStorageAccount(new StorageCredentials(accountName, accountKey), true); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); // Retrieve a reference to a container. CloudTable cloudTable = tableClient.GetTableReference("system"); // Create the container if it does not already exist. cloudTable.CreateIfNotExists(); // Output container URI to debug window. Console.WriteLine(cloudTable.Uri); // Read storage string partitionFilter = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "PK0123456789"); string rowFilter = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.Equal, "RK0123456789"); string finalFilter = TableQuery.CombineFilters(partitionFilter, TableOperators.And, rowFilter); bool itemChanged; for (int i = 0; i < 20; i++) { itemChanged = false; DateTime dtm = DateTime.Now; TableQuery <IdEntity> query = new TableQuery <IdEntity>().Where(finalFilter); var list = cloudTable.ExecuteQuery(query).ToList(); IdEntity customer1 = null; // Check if exist if (list.Count == 0) { // customer1 = new IdEntity("PK0123456789", "RK0123456789"); customer1.Id = 0; // Create the TableOperation that inserts the customer entity. var insertOperation = TableOperation.Insert(customer1); // Execute the insert operation. cloudTable.Execute(insertOperation); } else { customer1 = list[0]; customer1.Id++; try { TableResult tblr = cloudTable.Execute(TableOperation.InsertOrReplace(customer1), null, new OperationContext { UserHeaders = new Dictionary <String, String> { { "If-Match", customer1.ETag } } }); } catch (StorageException ex) { if (ex.RequestInformation.HttpStatusCode == 412) { Console.WriteLine("Optimistic concurrency violation – entity has changed since it was retrieved."); itemChanged = true; } else { throw; } } } if (itemChanged) { continue; } TimeSpan tsp = DateTime.Now - dtm; query = new TableQuery <IdEntity>().Where(finalFilter); list = cloudTable.ExecuteQuery(query).ToList(); if (list.Count == 1) { customer1 = list[0]; Console.WriteLine("Id: {0}, Time: {1} ms, Etag: {2}", customer1.Id, ( int )tsp.Milliseconds, customer1.ETag); } else { Console.WriteLine("No item found"); } } } catch (Exception ex) { Console.WriteLine(ex.Message); } } Console.ReadKey(); }
public TableOperation Prepare() => TableOperation.Insert(stream);
public static Task <TableResult> Insert <T>(this CloudTable table, T obj) where T : ITableEntity { return(table.ExecuteAsync(TableOperation.Insert(obj))); }
public async Task <IActionResult> Run( [HttpTrigger(AuthorizationLevel.Function, "post", Route = null)] HttpRequest req, [Table("Businesses", Connection = Defaults.DefaultStorageConnection)] CloudTable table, [Blob("logos/{rand-guid}", FileAccess.Write)] ICloudBlob logoBlob, [Queue("businesses", Connection = Defaults.DefaultStorageConnection)] ICollector <string> queue, ILogger log) { if (!req.HasFormContentType) { return(new BadRequestErrorMessageResult("Request must be form-data")); } var request = BindModel(req.Form); // Validate var validationResult = await new CreateBusinessDto.Validator().ValidateAsync(request); if (!validationResult.IsValid) { return(new BadRequestErrorMessageResult(validationResult.ToString())); } // Map var entity = Defaults.Mapper.Map <Business>(request); // Retrieve position try { await ApplyGeoPositionAsync($"{request.Name} {request.AddressLine} {request.ZipCode} {request.Town}", entity); } catch { return(new BadRequestErrorMessageResult("Couldn't find business")); } // Upload logo to blob if (request.Logo != null) { entity.LogoSource = (await UploadLogoAsync(request.Logo, logoBlob)).ToString(); } // Insert into table try { await table.ExecuteAsync(TableOperation.Insert(entity)); } catch (Exception ex) { log.LogError(ex, "Table insert operation failed"); return(new InternalServerErrorResult()); } try { queue.Add(JsonConvert.SerializeObject(entity)); } catch (Exception ex) { log.LogError(ex, "Couldn't add business to queue"); } return(new OkResult()); // TODO: Return created at result }
public async Task Insert(ITableEntity entity) { await this.ExecuteAsync(TableOperation.Insert(entity)); }
//GroupAdmins public void MigrateGroupAdminstoDest(DateTime LastRunTime) { TableQuery <SOS.OPsTools.Entities.GroupAdminsBase> UQuery = null; if (base.LoadTableSilent(ConstantOps.GroupAdminsSrcTableName)) { UQuery = new TableQuery <SOS.OPsTools.Entities.GroupAdminsBase>().Where(TableQuery.GenerateFilterConditionForDate("Timestamp", QueryComparisons.GreaterThanOrEqual, LastRunTime)); var qryReturn = base.EntityTable.ExecuteQuery(UQuery).ToList(); SOS.OPsTools.Entities.GroupAdminsDest destRecord = new GroupAdminsDest(); List <int> RetryMigrateAdminIDs = new List <int>(); if (storageAccessDestination.LoadTableSilent(ConstantOps.GroupAdminsDestTableName)) { //Parallel and batch processing TableBatchOperation insertSessions = null; var disctinctAdminIDs = qryReturn.Select(s => s.AdminID).Distinct(); MapGroupAdmin(); foreach (var p in disctinctAdminIDs) { try { insertSessions = new TableBatchOperation(); List <Entities.GroupAdminsBase> sessionsByAdminID = qryReturn.Where(ses => ses.AdminID == p).ToList(); Parallel.ForEach(sessionsByAdminID, session => { destRecord = ConvertSessionToDest(session);// Use automapper destRecord.AllowGroupManagement = false; TableOperation insertSession = TableOperation.Insert(destRecord); insertSessions.Add(insertSession); }); storageAccessDestination.EntityTable.ExecuteBatch(insertSessions); } catch (Exception ex) { opsLogger.WriteLog("GroupAdmins Error Message: " + ex.Message + Environment.NewLine + "GroupID: " + p); RetryMigrateAdminIDs.Add(p); } } //Sequential and batch processing if (RetryMigrateAdminIDs.Count > 0) { //disctinctProfileIDs = qryReturn.Select(s => s.AdminID).Distinct(); foreach (var p in RetryMigrateAdminIDs) { List <GroupAdminsBase> sessionsByAdminID = qryReturn.Where(ses => ses.AdminID == p).ToList(); foreach (var session in sessionsByAdminID) { try { destRecord = ConvertSessionToDest(session);// Use automapper destRecord.AllowGroupManagement = false; TableOperation insertSession = TableOperation.InsertOrReplace(destRecord); storageAccessDestination.EntityTable.Execute(insertSession); } catch (Exception ex) { opsLogger.WriteLog("GroupAdmins Error Message: " + ex.Message + Environment.NewLine + "GroupID: " + p); } } } }//end if } else { opsLogger.WriteLog("GroupAdmins Table not loaded, Please check settings"); } } //EndIf }
static void Main(string[] args) { // Use a single partition - OK for us but not for real for scalability! const String partitionName = "My_Peoples_Partition"; try { // Azure tables are just another form of storage so this is the same as before with blobs and queues... // As before you will have to set the connection string in App.config CloudStorageAccount storageAccount = CloudStorageAccount.Parse(ConfigurationManager.ConnectionStrings["AzureWebJobsStorage"].ToString()); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); // Give the table a name and create it if it does not already exist. CloudTable table = tableClient.GetTableReference("people"); table.CreateIfNotExists(); #region CREATE CustomerEntity customer1 = new CustomerEntity(partitionName, "Walter"); customer1.Email = "*****@*****.**"; customer1.PhoneNumber = "425-555-0101"; // Create the TableOperation that inserts the customer entity. var insertOperation = TableOperation.Insert(customer1); // Execute the insert operation. table.Execute(insertOperation); // Create the batch operation. TableBatchOperation batchOperation = new TableBatchOperation(); // Create a customer entity and add it to the table. CustomerEntity customer2 = new CustomerEntity(partitionName, "Jeff"); customer2.Email = "*****@*****.**"; customer2.PhoneNumber = "425-555-0104"; // Create another customer entity and add it to the table. CustomerEntity customer3 = new CustomerEntity(partitionName, "Ben"); customer3.Email = "*****@*****.**"; customer3.PhoneNumber = "425-555-0102"; // Add both customer entities to the batch insert operation. batchOperation.Insert(customer2); batchOperation.Insert(customer3); // Execute the batch operation. table.ExecuteBatch(batchOperation); #endregion #region READ // Construct the query operation for all customer entities in partition called "My_Peoples_Partition". TableQuery <CustomerEntity> query2 = new TableQuery <CustomerEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionName)); List <CustomerEntity> entityList2 = new List <CustomerEntity>(table.ExecuteQuery(query2)); // Print the fields for each customer. foreach (CustomerEntity entity2 in entityList2) { Console.WriteLine("{0}; {1}; {2}; {3}", entity2.PartitionKey, entity2.RowKey, entity2.Email, entity2.PhoneNumber); } Console.WriteLine(); // Create a retrieve operation that takes a customer entity. TableOperation retrieveOperation2 = TableOperation.Retrieve <CustomerEntity>(partitionName, "Ben"); // Execute the retrieve operation. TableResult retrievedResult2 = table.Execute(retrieveOperation2); // Print the phone number of the result. if (retrievedResult2.Result != null) { Console.WriteLine("Ben's phone number: " + ((CustomerEntity)retrievedResult2.Result).PhoneNumber); // note the cast } else { Console.WriteLine("Ben's phone number could not be retrieved."); } #endregion #region UPDATE // Create a retrieve operation that takes a customer entity. TableOperation retrieveOperation3 = TableOperation.Retrieve <CustomerEntity>(partitionName, "Ben"); // Execute the operation. TableResult retrievedResult3 = table.Execute(retrieveOperation3); // Assign the result to a CustomerEntity object. CustomerEntity updateEntity3 = (CustomerEntity)retrievedResult3.Result; // note the cast if (updateEntity3 != null) { // Change the phone number. updateEntity3.PhoneNumber = "425-555-0105"; // Create the Replace TableOperation. TableOperation updateOperation3 = TableOperation.Replace(updateEntity3); // Execute the operation. table.Execute(updateOperation3); Console.WriteLine("Ben's phone number updated."); } else { Console.WriteLine("Entity could not be retrieved."); } #endregion #region DELETE // Create a retrieve operation that expects a customer entity. TableOperation retrieveOperation4 = TableOperation.Retrieve <CustomerEntity>(partitionName, "Walter"); // Execute the operation. TableResult retrievedResult4 = table.Execute(retrieveOperation4); // Assign the result to a CustomerEntity. CustomerEntity deleteEntity4 = (CustomerEntity)retrievedResult4.Result; // note the cast // Create the Delete TableOperation. if (deleteEntity4 != null) { TableOperation deleteOperation4 = TableOperation.Delete(deleteEntity4); // Execute the operation. table.Execute(deleteOperation4); Console.WriteLine("Walter's entity deleted."); } else { Console.WriteLine("Could not retrieve the entity."); } Console.WriteLine(); #endregion // regions #region READ // Construct the query operation for all customer entities in partition called "My_Peoples_Partition". TableQuery <CustomerEntity> query5 = new TableQuery <CustomerEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionName)); List <CustomerEntity> entityList5 = new List <CustomerEntity>(table.ExecuteQuery(query5)); // Print the fields for each customer. foreach (CustomerEntity entity5 in entityList5) { Console.WriteLine("{0}; {1}; {2}; {3}", entity5.PartitionKey, entity5.RowKey, entity5.Email, entity5.PhoneNumber); } Console.WriteLine(); // Create a retrieve operation that takes a customer entity. TableOperation retrieveOperation5 = TableOperation.Retrieve <CustomerEntity>(partitionName, "Ben"); // Execute the retrieve operation. TableResult retrievedResult5 = table.Execute(retrieveOperation5); // Print the phone number of the result. if (retrievedResult5.Result != null) { Console.WriteLine("Ben's phone number: " + ((CustomerEntity)retrievedResult5.Result).PhoneNumber); // note the cast } else { Console.WriteLine("Ben's phone number could not be retrieved."); } #endregion } catch (StorageException ex) { Console.WriteLine("oops..." + ex.Message); // Exception handling here. } }
public async Task CreateAsync(T user) { if (user == null) { throw new ArgumentNullException("user"); } user.SetPartitionAndRowKey(); var userNameIndex = new UserNameIndexEntity(user.UserName.ToSha256(), user.Id); TableOperation indexOperation = TableOperation.Insert(userNameIndex); try { await _userNameIndexTable.ExecuteAsync(indexOperation); } catch (StorageException ex) { if (ex.RequestInformation.HttpStatusCode == 409) { throw new DuplicateUsernameException(); } throw; } if (!String.IsNullOrWhiteSpace(user.Email)) { var userEmailIndexEntity = new UserEmailIndexEntity(user.Email.ToSha256(), user.Id); TableOperation emailIndexOperation = TableOperation.Insert(userEmailIndexEntity); try { await _userEmailIndexTable.ExecuteAsync(emailIndexOperation); } catch (StorageException ex) { try { userNameIndex.ETag = "*"; TableOperation deleteOperation = TableOperation.Delete(userNameIndex); _userNameIndexTable.ExecuteAsync(deleteOperation).Wait(); } catch (Exception) { // if we can't delete the index item throw out the exception below } if (ex.RequestInformation.HttpStatusCode == 409) { throw new DuplicateEmailException(); } throw; } } try { if (user.LockoutEndDate < _minTableStoreDate) { user.LockoutEndDate = _minTableStoreDate; } TableOperation operation = TableOperation.InsertOrReplace(user); await _userTable.ExecuteAsync(operation); if (user.Logins.Any()) { var batch = new TableBatchOperation(); var loginIndexItems = new List <UserLoginProviderKeyIndexEntity>(); foreach (UserLoginEntity login in user.Logins) { login.UserId = user.Id; login.SetPartitionKeyRowKey(); batch.InsertOrReplace(login); var loginIndexItem = new UserLoginProviderKeyIndexEntity(user.Id, login.ProviderKey, login.LoginProvider); loginIndexItems.Add(loginIndexItem); } await _userLoginTable.ExecuteBatchAsync(batch); // can't batch the index items as different primary keys foreach (UserLoginProviderKeyIndexEntity loginIndexItem in loginIndexItems) { await _userLoginProviderKeyIndexTable.ExecuteAsync(TableOperation.InsertOrReplace(loginIndexItem)); } } } catch (Exception) { // attempt to delete the index item - needs work userNameIndex.ETag = "*"; TableOperation deleteOperation = TableOperation.Delete(userNameIndex); _userNameIndexTable.ExecuteAsync(deleteOperation).Wait(); throw; } }
/// <summary> /// Emit the provided log event to the sink. /// </summary> /// <param name="logEvent">The log event to write.</param> public void Emit(LogEvent logEvent) { // todo: Use batch insert operation via timer like the Mongo and Couch sinks? _table.Execute(TableOperation.Insert(new LogEventEntity(logEvent, _formatProvider))); }