public void Update(T entity) { var updateOperation = TableOperation.InsertOrReplace(entity); _table.Execute(updateOperation); }
/// <summary> /// Delete all the records in the table linked to this event stream /// </summary> public void DeleteStream() { // 1- mark the stream footer as "Deleting" bool recordUpdated = false; int tries = 0; TableEntityKeyRecord streamFooter = null; while (!recordUpdated) { tries += 1; // read in the a [TableEntityKeyRecord] streamFooter = (TableEntityKeyRecord)Table.Execute( TableOperation.Retrieve <TableEntityKeyRecord>(this.InstanceKey, SequenceNumberAsString(0)), operationContext: GetDefaultOperationContext()).Result; if (null == streamFooter) { streamFooter = new TableEntityKeyRecord(this); } streamFooter.Deleting = true; string lastETag = streamFooter.ETag; try { TableResult tres = Table.Execute(TableOperation.InsertOrReplace(streamFooter), null, new OperationContext { UserHeaders = new Dictionary <String, String> { { "If-Match", lastETag } } }); if (tres.HttpStatusCode == 204) { recordUpdated = true; } } catch (Microsoft.Azure.Cosmos.Table.StorageException sEx) { if (sEx.RequestInformation.HttpStatusCode == (int)HttpStatusCode.PreconditionFailed) { // Precondition Failed - could not update the footer due to a concurrency error recordUpdated = false; // Wait a random-ish amount of time int delayMilliseconds = 13 * new Random().Next(10, 100); System.Threading.Thread.Sleep(delayMilliseconds); } else { throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to set the Deleting flag stream sequence number due to storage error", source: "Table Event Stream Writer", innerException: sEx); } } if (tries > 500) { // catastrophic deadlock throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to set the Deleting flag number due to deadlock", source: "Table Event Stream Writer"); } } // 2- delete the actual stream records in reverse order if (Table != null) { // We need a continuation token as this is done in batches of 100... TableContinuationToken token = new TableContinuationToken(); TableQuery getEventsToDeleteQuery = DeleteRowsQuery(); //TableOperation.Delete(); do { // create the query to be executed.. var segment = Table.ExecuteQuerySegmented(getEventsToDeleteQuery, token, requestOptions: new TableRequestOptions() { PayloadFormat = TablePayloadFormat.Json, TableQueryMaxItemCount = MAX_BATCH_SIZE }, operationContext: GetDefaultOperationContext()); TableBatchOperation deleteBatch = new TableBatchOperation(); foreach (DynamicTableEntity dteRow in segment) { deleteBatch.Add(TableOperation.Delete(dteRow)); } Table.ExecuteBatch(deleteBatch); // update the continuation token to get the next chunk of records token = segment.ContinuationToken; } while (null != token); } }
protected override void OnCreate(Bundle savedInstanceState) { base.OnCreate(savedInstanceState); SetContentView(Resource.Layout.Menu); Button Veg = FindViewById <Android.Widget.Button>(Resource.Id.Veg); Button Sauce = FindViewById <Android.Widget.Button>(Resource.Id.Sauce); Button Rolls = FindViewById <Android.Widget.Button>(Resource.Id.Rolls); Button sendOrder = FindViewById <Android.Widget.Button>(Resource.Id.sendOrder); var fullName = FindViewById <EditText>(Resource.Id.fullName); var email = FindViewById <EditText>(Resource.Id.email); if (newCustomerActivity) { customer = new Customer(); } newCustomerActivity = false; Veg.Click += delegate { var intent = new Intent(this, typeof(Veg)); StartActivity(intent); }; Sauce.Click += delegate { var intent = new Intent(this, typeof(Sauce)); StartActivity(intent); }; Rolls.Click += delegate { var intent = new Intent(this, typeof(Rolls)); StartActivity(intent); }; sendOrder.Click += async delegate { customer.fullName = fullName.Text; customer.email = email.Text; String vegs = "Vegetables: "; foreach (VegItem veg in customer.order.selectedVegItems) { if (veg.selected) { vegs += (veg.item + ", "); } } if (vegs.LastIndexOf(',') > -1) { vegs.Remove(vegs.LastIndexOf(','), 2); } String sauces = "Sauces: "; foreach (SauceItem sauce in customer.order.selectedSauceItems) { if (sauce.selected) { sauces += (sauce.item + ", "); } } if (sauces.LastIndexOf(',') > -1) { sauces.Remove(sauces.LastIndexOf(','), 2); } String rolls = "Rolls: "; foreach (RollItem roll in customer.order.selectedRollItems) { if (roll.selected) { rolls += (roll.item + ", "); } } if (rolls.LastIndexOf(',') > -1) { rolls.Remove(rolls.LastIndexOf(','), 2); } customer.Sorder = "Customer: " + customer.fullName + "\nEmail: " + customer.email + "\n" + vegs + "\n" + sauces + "\n" + rolls; customer.RowKey = customer.email; customer.PartitionKey = "ORDERED"; // Create the TableOperation object that inserts the customer entity. TableOperation insertOperation = TableOperation.InsertOrReplace(customer); // Execute the insert operation. try { await table.ExecuteAsync(insertOperation); } catch (Exception e) { Toast.MakeText(this, e.Message, ToastLength.Long).Show(); } customer = new Customer(); Toast.MakeText(this, "The order has been sent", ToastLength.Long).Show(); }; }
void TranslateOperationForNewTable( TableOperation op, MTableEntity existingEntity, bool leaveTombstones, ref TableOperation newOp, ref HttpStatusCode?errorCode) { ITableEntity passedEntity = op.GetEntity(); TableOperationType opType = op.GetOperationType(); switch (opType) { case TableOperationType.Insert: if (existingEntity == null) { newOp = TableOperation.Insert(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity)); } else if (existingEntity.deleted) { newOp = TableOperation.Replace(ImportWithIfMatch(passedEntity, existingEntity.ETag)); } else { errorCode = HttpStatusCode.Conflict; } break; case TableOperationType.Replace: if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { newOp = TableOperation.Replace(ImportWithIfMatch(passedEntity, existingEntity.ETag)); } break; case TableOperationType.Merge: if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { newOp = TableOperation.Merge(ImportWithIfMatch(passedEntity, existingEntity.ETag)); } break; case TableOperationType.Delete: string buggablePartitionKey, buggableRowKey; if (IsBugEnabled(MTableOptionalBug.DeletePrimaryKey)) { buggablePartitionKey = buggableRowKey = null; } else { buggablePartitionKey = passedEntity.PartitionKey; buggableRowKey = passedEntity.RowKey; } if (leaveTombstones) { if (passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS) { newOp = TableOperation.InsertOrReplace(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, deleted = true }); } else if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { newOp = TableOperation.Replace(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, deleted = true, ETag = existingEntity.ETag }); } } else { if (passedEntity.ETag == ChainTable2Constants.ETAG_DELETE_IF_EXISTS) { if (existingEntity != null) { newOp = TableOperation.Delete(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, // It's OK to delete the entity and return success whether or not // the entity is a tombstone by the time it is actually deleted. ETag = IsBugEnabled(MTableOptionalBug.DeleteNoLeaveTombstonesETag) ? null : ChainTable2Constants.ETAG_ANY }); } // Otherwise generate nothing. // FIXME: This is not linearizable! It can also generate empty batches. } else if ((errorCode = CheckExistingEntity(passedEntity, existingEntity)) == null) { // Another client in USE_NEW_WITH_TOMBSTONES could concurrently replace the // entity with a tombstone, in which case we need to return 404 to the caller, // hence this needs to be conditioned on the existing ETag. newOp = TableOperation.Delete(new MTableEntity { PartitionKey = buggablePartitionKey, RowKey = buggableRowKey, ETag = IsBugEnabled(MTableOptionalBug.DeleteNoLeaveTombstonesETag) ? null : existingEntity.ETag }); } } break; case TableOperationType.InsertOrReplace: newOp = TableOperation.InsertOrReplace(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity)); break; case TableOperationType.InsertOrMerge: newOp = TableOperation.InsertOrMerge(ChainTableUtils.CopyEntity <MTableEntity>(passedEntity)); break; default: throw new NotImplementedException(); } }
public async Task SetLock(GlobalXApiTokenLockInfo lockInfo) { await _lockTableReference.ExecuteAsync(TableOperation.InsertOrReplace(new LockInfoTableEntity(lockInfo))); }
/// <summary> /// Create/update entity /// </summary> /// <param name="entity"></param> public void Upsert(TodoEntity entity) { var operation = TableOperation.InsertOrReplace(entity); todoTable.Execute(operation); }
public async Task <TableResult> CreateAsync(T entity) { var tableOperation = TableOperation.InsertOrReplace(entity); return(await _cloudTable.ExecuteAsync(tableOperation)); }
static void Main(string[] args) { CloudTableClient tableClient = CloudStorageAccount.DevelopmentStorageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference("roster"); // Ensure that the table is created. table.CreateIfNotExists(); // Create 3 Employee instances. Employee first = new Employee { PartitionKey = "IT", RowKey = "ibahena", YearsAtCompany = 7 }; Employee second = new Employee { PartitionKey = "HR", RowKey = "rreeves", YearsAtCompany = 12 }; Employee third = new Employee { PartitionKey = "HR", RowKey = "rromani", YearsAtCompany = 3 }; // Insert the employee with the IT partition key to the table. // POST TableOperation insertOperation = TableOperation.InsertOrReplace(first); table.Execute(insertOperation); // Batch insert the employees with the HR partition key to the table. // Batch operations can be used to insert multiple entities into an Azure Storage table. // The entities must all have the same PartitionKey in order to be inserted as a single batch. // POST TableBatchOperation batchOperation = new TableBatchOperation(); batchOperation.InsertOrReplace(second); batchOperation.InsertOrReplace(third); table.ExecuteBatch(batchOperation); // GET // Query the table for employees with a partition key equal to HR. TableQuery <Employee> query = new TableQuery <Employee>() .Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "HR")); Console.WriteLine("HR Employees\n"); foreach (Employee hrEmployee in table.ExecuteQuery <Employee>(query)) { Console.WriteLine(hrEmployee); } // GET // Retrieve the single employee with a partition key of IT, and a row key of ibahena. Console.WriteLine("\n\n\n\nIT Employee\n"); TableOperation retrieveOperation = TableOperation.Retrieve <Employee>("IT", "ibahena"); TableResult result = table.Execute(retrieveOperation); Employee itEmployee = result.Result as Employee; Console.WriteLine(itEmployee); }
public static async Task Run( [HttpTrigger(AuthorizationLevel.Anonymous, "get", "post", Route = null)] HttpRequest req, ILogger log) { var storage = CloudStorageAccount.Parse(StorageAccount); var cloudTableClient = storage.CreateCloudTableClient(); var usersTable = cloudTableClient.GetTableReference("users"); var userStatisticsTable = cloudTableClient.GetTableReference("userstatistics"); await usersTable.CreateIfNotExistsAsync().ConfigureAwait(false); await userStatisticsTable.CreateIfNotExistsAsync().ConfigureAwait(false); // Get a Bearer Token with the App var httpClient = new HttpClient(); string token; try { var app = ConfidentialClientApplicationBuilder.Create(AppId) .WithAuthority($"https://login.windows.net/{TenantId}/oauth2/token") .WithRedirectUri("https://governance365function") .WithClientSecret(AppSecret) .Build(); var authenticationProvider = new MsalAuthenticationProvider(app, Scopes); token = authenticationProvider.GetTokenAsync().Result; httpClient.DefaultRequestHeaders.Authorization = new System.Net.Http.Headers.AuthenticationHeaderValue("Bearer", token); } catch (Exception) { throw; } httpClient.DefaultRequestHeaders.Authorization = new System.Net.Http.Headers.AuthenticationHeaderValue("Bearer", token); //initialize object for storing all user statistics UserQueryObject usersPage = null; var userStatistics = new UserStatisticsTableEntity() { PartitionKey = "userstatistics", RowKey = TenantId, DeactivatedUsers = 0, DeletedUsers = 0, GuestUsers = 0, InternalUsers = 0, Users = 0 }; //get all users (until nextlinnk is empty) and members/guests + sum up statistics do { if (string.IsNullOrEmpty(usersPage?.OdataNextLink?.ToString())) { //first request usersPage = JsonConvert.DeserializeObject <Governance365.Models.UserQueryObject>(await(await httpClient.GetAsync(usersUrl + UserSelectedProperties).ConfigureAwait(false)).Content.ReadAsStringAsync().ConfigureAwait(false)); } else { //next page request usersPage = JsonConvert.DeserializeObject <Governance365.Models.UserQueryObject>(await(await httpClient.GetAsync(usersPage?.OdataNextLink?.ToString()).ConfigureAwait(false)).Content.ReadAsStringAsync().ConfigureAwait(false)); } //batch request to store pages of users in Azure storage var userPageBatchOperation = new TableBatchOperation(); foreach (var user in usersPage.Value) { if (user.UserType != null) { userStatistics.Users++; if (user.UserType.Equals("Member")) { userStatistics.InternalUsers++; } else if (user.UserType.Equals("Guest")) { userStatistics.GuestUsers++; } if (string.IsNullOrEmpty(user.AccountEnabled) && !bool.Parse(user.AccountEnabled)) { userStatistics.DeactivatedUsers++; } } user.PartitionKey = "user"; user.RowKey = user.Id.ToString(); //add user entity to batch operation userPageBatchOperation.Add(TableOperation.InsertOrReplace(user)); } //write user page to Azure tabel storage await usersTable.ExecuteBatchAsync(userPageBatchOperation).ConfigureAwait(false); } while (!string.IsNullOrEmpty(usersPage.OdataNextLink?.ToString())); //write user statistics to table "userstatistics" -> single value with overwrite var insertUserStatisticsOperation = TableOperation.InsertOrReplace(userStatistics); await userStatisticsTable.ExecuteAsync(insertUserStatisticsOperation).ConfigureAwait(false); httpClient.Dispose(); }
public async Task UpdateAsync(T item) { await _table.ExecuteAsync(TableOperation.InsertOrReplace(item)); }
public async Task Upsert(T item) { var upsertItem = TableOperation.InsertOrReplace(item); await StorageTable.ExecuteAsync(upsertItem); }
public void InsertOrRelace(Student newStudent) { TableOperation insertOperation = TableOperation.InsertOrReplace(newStudent); _table.Execute(insertOperation); }
/// <summary> /// Uploads all the images that reside in 'testfolder' and then adds /// the estimate age in a table after calling the Face API. /// Once successfully done, the message gets deleted from the queue /// Messages that remain the queue are the failed ones. /// </summary> /// <param name="queue">The sample queue</param> /// <param name="container">The sample blob container</param> /// <param name="table">The sample table</param> /// <param name="faceClient">Face API client</param> /// <returns>A task representing the asynchronous operation</returns> private static async Task ProcessImages(CloudQueue queue, CloudBlobContainer container, CloudTable table, FaceServiceClient faceClient) { try { // Iterate over photos in 'testfolder' var images = Directory.EnumerateFiles("testfolder", "*.jpg"); foreach (string currentFile in images) { string fileName = currentFile.Replace("testfolder\\", ""); Console.WriteLine("Processing image {0}", fileName); // Add a message to the queue for each photo. Note the visibility timeout // as blob and table operations in the following process may take up to 900 seconds. // For simplicity, other factors like FaceAPI call timeout are ignored. // After the 900 seconds, the message will be visible and a worker role can pick up // the message from queue for cleanup. Default time to live for the message is 7 days. CloudQueueMessage message = new CloudQueueMessage(fileName); queue.AddMessage(message, null, TimeSpan.FromSeconds(900)); // read the file using (var fileStream = File.OpenRead(currentFile)) { // detect face and estimate the age var faces = await faceClient.DetectAsync(fileStream, false, true, new FaceAttributeType[] { FaceAttributeType.Age }); Console.WriteLine(faces.Length + " face(s) detected in " + fileName); CloudBlockBlob blob = container.GetBlockBlobReference(fileName); var tableEntity = new DynamicTableEntity("FaceImages", fileName); // iterate over detected faces int i = 1; foreach (var face in faces) { // append the age info as property in the table entity tableEntity.Properties.Add("person" + i.ToString(), new EntityProperty(face.FaceAttributes.Age.ToString())); i++; // ignore if more than 250 faces were found. An entity can contain up to 252 properties. if (i > 250) { break; } } // upload the blob if a face was detected if (faces.Length > 0) { await blob.UploadFromFileAsync(currentFile); } // store the age info in the table table.Execute(TableOperation.InsertOrReplace(tableEntity)); // delete the queue message with the pop receipt if previous operations completed successfully if (blob.Exists() && table.Execute(TableOperation.Retrieve("FaceImages", fileName)).HttpStatusCode == 200) { await queue.DeleteMessageAsync(message.Id, message.PopReceipt); } } } } catch (StorageException ex) { Console.WriteLine("Storage error: " + ex.RequestInformation.ExtendedErrorInformation.ErrorCode); } catch (FaceAPIException ex) { Console.WriteLine("Face API error: " + ex.ErrorCode + ex.ErrorMessage); } catch (Exception ex) { Console.WriteLine("Exception: " + ex.Message); if (ex is DirectoryNotFoundException || ex is FileNotFoundException) { Console.WriteLine("Please make sure that the folder \"testfolder\" (with images) is present in the current directory where the sample is running"); } } }
private async Task RunAsync(CancellationToken cancellationToken) { try { CloudStorageAccount storageAccount = CloudStorageAccount.Parse("DefaultEndpointsProtocol=https;AccountName=[name];AccountKey=[key]"); CloudQueueClient queueClient = storageAccount.CreateCloudQueueClient(); CloudQueue queue = queueClient.GetQueueReference("myqueue"); CloudTableClient tableClient = storageAccount.CreateCloudTableClient(); CloudTable table = tableClient.GetTableReference("mytable"); // Create the queue if it doesn't already exist queue.CreateIfNotExists(); // Create the table if it doesn't exist. table.CreateIfNotExists(); // TODO: Replace the following with your own logic. while (!cancellationToken.IsCancellationRequested) { // Get the next message CloudQueueMessage retrievedMessage = queue.GetMessage(); if (retrievedMessage != null) { var messages = new StringBuilder("Worker received: " + retrievedMessage.AsString); messages.AppendLine(); // read the latest messages from the table TableOperation retrieveOperation = TableOperation.Retrieve <MyMessages>("Partition0", "Row0"); // Execute the retrieve operation. TableResult retrievedResult = table.Execute(retrieveOperation); MyMessages myMessages = retrievedResult.Result == null ? new MyMessages { PartitionKey = "Partition0", RowKey = "Row0", LastUpdated = DateTime.Now } : (MyMessages)retrievedResult.Result; messages.AppendLine(myMessages.Messages); var filename = RoleEnvironment.IsEmulated ? @"c:\windows\system32\cmd.exe" : @"d:\windows\system32\cmd.exe"; var processStartInfo = new ProcessStartInfo() { Arguments = "/c echo \"test message from a process on the worker vm\"", FileName = filename, RedirectStandardOutput = true, UseShellExecute = false }; var process = Process.Start(processStartInfo); using (var streamReader = new StreamReader(process.StandardOutput.BaseStream)) { messages.AppendLine(streamReader.ReadToEnd() + " at " + DateTime.Now.ToString() + " on " + RoleEnvironment.CurrentRoleInstance); } // replace the messages myMessages.Messages = messages.ToString(); myMessages.LastUpdated = DateTime.Now; // Create the Replace TableOperation. TableOperation replaceOperation = TableOperation.InsertOrReplace(myMessages); // Execute the operation. var result = table.Execute(replaceOperation); //Process the message in less than 30 seconds, and then delete the message queue.DeleteMessage(retrievedMessage); Trace.TraceInformation("Updated myMessage"); } Trace.TraceInformation("Working"); await Task.Delay(1000); } } catch (Exception ex) { Trace.TraceError("Worker failed with " + ex.Message); } }
/// <summary> /// Imports data of DataTable to table storage /// </summary> /// <param name="dtSheetInfo"></param> /// <param name="strSheetName"></param> private void ImportDataToTable(System.Data.DataTable dtSheetInfo, string strSheetName) { var client = storageAccount.CreateCloudTableClient(); CloudTable table = client.GetTableReference(strPIITable); Response.Write(new string(' ', 1024)); Response.Write(String.Format("<div>Deleting existing data")); Response.Flush(); table.DeleteIfExists(); create: try { Response.Write("."); Response.Flush(); table.Create(); } catch (StorageException ex) when(ex.RequestInformation.ExtendedErrorInformation.ErrorCode.Equals(TableErrorCodeStrings.TableBeingDeleted)) { Thread.Sleep(1000); goto create; } Response.Write(String.Format("</div><div>Uploading {0} rows for sheet {1}", dtSheetInfo.Rows.Count, strSheetName.Replace("$", ""))); Response.Flush(); // Create a new partition key for this data instead of overwriting old data. var partitionKey = strSheetName; var batch = new TableBatchOperation(); for (int j = 0; j < dtSheetInfo.Rows.Count; j++) { ExcelTableEntity entity = new ExcelTableEntity(partitionKey, (j + 2).ToString("D5")); var hasContent = false; for (int i = 0; i < dtSheetInfo.Columns.Count; i++) { string strCloName = dtSheetInfo.Columns[i].ColumnName; if (!(dtSheetInfo.Rows[j][i] is DBNull) && (dtSheetInfo.Rows[j][i] != null)) { hasContent = true; string strValue = dtSheetInfo.Rows[j][i].ToString().Trim(); if (!CheckPropertyExist(strCloName, strValue, entity)) { EntityProperty property = entity.ConvertToEntityProperty(strCloName, dtSheetInfo.Rows[j][i]); if (!entity.properties.ContainsKey(strCloName)) { entity.properties.Add(strCloName, property); } else { entity.properties[strCloName] = property; } } } } if (hasContent) { batch.Add(TableOperation.InsertOrReplace(entity)); } if (batch.Count >= 100) { table.ExecuteBatch(batch); Response.Write("."); Response.Flush(); batch.Clear(); } } if (batch.Count > 0) { table.ExecuteBatch(batch); Response.Write("."); Response.Flush(); } Response.Write("</div><hr/>"); Response.Flush(); }
public Message PostMessage(string userid, string groupID, string eventID, string schemaID, string[] owner, string[] atUser, string[] topicName, string message, string richMessage, string[] attachmentID, int importance, DateTime timestamp) { if ("none".Equals(message)) { message = ""; } if (message.Length > 2048) { throw new MessageTooLongException(); } UserProfile user = _accManager.FindUser(userid); if (user == null) { throw new UserNotFoundException(userid); } if (!_schemaManager.Contain(schemaID)) { throw new SchemaNotFoundException(); } //merge userid from argument as well as message HashSet <string> validAtUsers = new HashSet <string>(); HashSet <string> atUserids = new HashSet <string>(); atUserids.UnionWith(Utils.GetAtUserid(message)); if (atUser != null) { atUserids.UnionWith(atUser); } foreach (string uid in atUserids) { try { Membership member = _groupManager.CheckMembership(groupID, userid); validAtUsers.Add(member.MemberID); } catch { } } //merge topic from argument as well as message HashSet <string> topic = new HashSet <string>(); topic.UnionWith(Utils.GetTopicNames(message)); if (topicName != null) { topic.UnionWith(topicName); } //Insert Rich Message string richMessageID = null; if (!string.IsNullOrEmpty(richMessage)) { richMessageID = _richMsgManager.PostRichMessage(userid, timestamp, richMessage); } // create message Message msg = new Message(userid, groupID, message, timestamp, eventID, schemaID, owner, validAtUsers.ToArray(), topic.ToArray(), richMessageID, attachmentID, importance); //insert into Userline TableOperation insertOperation = TableOperation.InsertOrReplace(new UserLineEntity(msg)); _userline.Execute(insertOperation); //insert into poster's homeline insertOperation = TableOperation.InsertOrReplace(new HomeLineEntity(msg.User, msg)); _homeline.Execute(insertOperation); //insert into QueueMessage //QueueMessage queueMessage = new QueueMessage(QueueMessage.TypeMessage, msg.ToJsonString()); _queue.AddMessage(msg.toAzureCloudQueueMessage()); _spiderqueue.AddMessage(msg.toAzureCloudQueueMessage()); _mailMessageQueue.AddMessage(msg.toAzureCloudQueueMessage()); user.MessageCount++; _accManager.UpdateUser(user); return(msg); }
public async Task UpdateChat(SendData chatToUpdate) { TableOperation updateOp = TableOperation.InsertOrReplace(chatToUpdate); await table.ExecuteAsync(updateOp); }
public void SpreadMessage(Message message) { TableOperation insertOperation; //insert into Eventline if (!string.IsNullOrEmpty(message.EventID) && !"none".Equals(message.EventID)) { insertOperation = TableOperation.InsertOrReplace(new EventLineEntity(message)); _eventline.Execute(insertOperation); } //insert into PublicSquareLine insertOperation = TableOperation.InsertOrReplace(new PublicSquareLineEntity(message)); _publicSquareLine.Execute(insertOperation); //insert into Atline if (message.AtUser != null) { foreach (string atUserid in message.AtUser) { UserProfile user = _accManager.FindUser(atUserid); if (user != null) { insertOperation = TableOperation.InsertOrReplace(new AtLineEntity(user.Userid, message)); _atline.Execute(insertOperation); _notifManager.incrementAtlineNotifCount(user.Userid); } } } //insert into Topicline if (message.TopicName != null) { foreach (string topicName in message.TopicName) { Topic topic = _topicManager.FindTopicByName(topicName, new string[] { message.Group }); if (topic == null) { topic = new Topic(); topic.Name = topicName; topic.MsgCount = 0; topic.GroupID = message.Group; _topicManager.AddTopic(topic); topic = _topicManager.FindTopicByName(topicName, new string[] { message.Group }); } insertOperation = TableOperation.InsertOrReplace(new TopicLine(message, topic.Id.ToString())); _topicline.Execute(insertOperation); _topicManager.incrementTopicCount(topic.Id); _topicManager.incrementUnreadMsgCountOfFavouriteTopic(topic.Id); } } if (message.Owner != null) { foreach (string ownerid in message.Owner) { UserProfile user = _accManager.FindUser(ownerid); if (user != null) { insertOperation = TableOperation.InsertOrReplace(new OwnerLineEntity(user.Userid, message)); _ownerline.Execute(insertOperation); _notifManager.incrementOwnerlineNotifCount(user.Userid); } } } //insert into homeline HashSet <string> groupMember = new HashSet <string>(); foreach (var member in _groupManager.GetAllGroupMember(message.Group)) { groupMember.Add(member.MemberID.ToLower()); } List <UserProfile> followers = _accManager.Followers(message.User); //followers.Add(_accManager.FindUser(message.User)); //speed tweet to followers foreach (UserProfile user in followers) { if (!groupMember.Contains(user.Userid.ToLower())) { continue; } HomeLineEntity entity = new HomeLineEntity(user.Userid, message); insertOperation = TableOperation.InsertOrReplace(entity); _homeline.Execute(insertOperation); _notifManager.incrementHomelineNotifCount(user.Userid); if (message.Importance == 0) { _notifManager.incrementImportantMsgCount(user.Userid); } } }
public static async Task <TableResult> InsertOrReplace(this CloudTable table, ITableEntity entity) { var operation = TableOperation.InsertOrReplace(entity); return(await table.ExecuteAsync(operation)); }
public static async Task Execute(string connectionString, string tableName, List <ObjectInfoTableEntity> objInfos) { // Design for querying // https://docs.microsoft.com/en-us/azure/storage/tables/table-storage-design-for-query var acct = CloudStorageAccount.Parse(connectionString); var tableClient = acct.CreateCloudTableClient(); var table = tableClient.GetTableReference(tableName); //create the table if needed Console.WriteLine($"Create Table - {tableName}"); await table.CreateIfNotExistsAsync(); Console.WriteLine(Environment.NewLine); //insert all the object infos into the table Console.WriteLine($"\n\nAdding:"); foreach (var objinfo in objInfos) { Console.WriteLine($" {objinfo}"); await table.ExecuteAsync(TableOperation.InsertOrReplace(objinfo)); } //pick a random object to play with var tofind = objInfos[RandomValueGen.GetRandomInt(0, objInfos.Count - 1)]; Console.WriteLine($"\n\nSearching for\n {tofind}"); var found = await table.ExecuteAsync(TableOperation.Retrieve <ObjectInfoTableEntity>(tofind.PartitionKey, tofind.RowKey)); Console.WriteLine($"\n\nFound by Partition/Row key\n {found.Result as ObjectInfoTableEntity}"); Console.WriteLine($"\n\nFind all revisions of a given object"); var finds = table.ExecuteQuery(new TableQuery <ObjectInfoTableEntity>() .Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, tofind.PartitionKey))); foreach (var item in finds) { Console.WriteLine($" {item}"); } Console.WriteLine($"\n\nFind all newer revisions of a given object"); finds = table.ExecuteQuery(new TableQuery <ObjectInfoTableEntity>() .Where(TableQuery.CombineFilters( TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, tofind.PartitionKey), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, tofind.RowKey)))); foreach (var item in finds) { Console.WriteLine($" {item}"); } Console.WriteLine($"\n\nFind all future revisions of a given object removed within the last year"); var newDate = DateTime.UtcNow.AddYears(-1); finds = table.ExecuteQuery(new TableQuery <ObjectInfoTableEntity>() .Where(TableQuery.CombineFilters( TableQuery.CombineFilters(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, tofind.PartitionKey), TableOperators.And, TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, tofind.RowKey)), TableOperators.And, TableQuery.GenerateFilterConditionForDate("Removed", QueryComparisons.GreaterThanOrEqual, newDate)))); foreach (var item in finds) { Console.WriteLine($" {item}"); } Console.WriteLine($"\n\nFind all future revisions of a given object removed within the last year with linq"); finds = from objinfo in table.CreateQuery <ObjectInfoTableEntity>() where objinfo.PartitionKey == tofind.PartitionKey && objinfo.Created >= tofind.Created && objinfo.Removed >= newDate select objinfo; foreach (var item in finds) { Console.WriteLine($" {item}"); } Console.WriteLine($"\n\nFind all future revisions of a given object removed within the last year in client"); finds = table.ExecuteQuery(new TableQuery <ObjectInfoTableEntity>() .Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, tofind.PartitionKey))) .Where(x => x.Created >= tofind.Created && x.Removed >= newDate); foreach (var item in finds) { Console.WriteLine($" {item}"); } }
public async Task CreateEntityAsync(ITableEntity entity) { await _table.ExecuteAsync(TableOperation.InsertOrReplace(entity)); }
public Task UpsertEntity(T entity) { var operation = TableOperation.InsertOrReplace(entity); return(cloudTable.ExecuteAsync(operation)); }
public Task CreateOrUpdateEntityAsync(TEntity entity) { var operation = TableOperation.InsertOrReplace(entity); return(_table.ExecuteAsync(operation)); }
public void InsertOrUpdateItem(TableCacheEntity entity) { TableOperation operation = TableOperation.InsertOrReplace(entity); _cloudTable.Execute(operation); }
public async Task AddOrUpdateTimestampAsync(string user, string name, long timestamp) { var entity = new TimestampEntity(user, name, timestamp); var op = TableOperation.InsertOrReplace(entity); await _table.ExecuteAsync(op); }
public Task InsertOrUpdateItemAsync(TableCacheEntity entity, CancellationToken token) { TableOperation operation = TableOperation.InsertOrReplace(entity); return(_cloudTable.ExecuteAsync(operation, token)); }
/// <summary> /// Increment the sequence number for this event stream and return the new number /// </summary> /// <remarks> /// This is done before the event itself is written so that a partial failure leaves a gap in the event stream which is /// less harmful than an overwritten event record /// </remarks> private async Task <int> IncrementSequenceNumber() { bool recordUpdated = false; int tries = 0; TableEntityKeyRecord streamFooter = null; while (!recordUpdated) { tries += 1; // read in the a [TableEntityKeyRecord] streamFooter = (TableEntityKeyRecord)Table.Execute( TableOperation.Retrieve <TableEntityKeyRecord>(this.InstanceKey, SequenceNumberAsString(0)), operationContext: GetDefaultOperationContext()).Result; if (null == streamFooter) { streamFooter = new TableEntityKeyRecord(this); } streamFooter.LastSequence += 1; string lastETag = streamFooter.ETag; try { TableResult tres = await Table.ExecuteAsync(TableOperation.InsertOrReplace(streamFooter), null, new OperationContext { UserHeaders = new Dictionary <String, String> { { "If-Match", lastETag } } }); if (tres.HttpStatusCode == 204) { recordUpdated = true; } } catch (Microsoft.Azure.Cosmos.Table.StorageException sEx) { if (sEx.RequestInformation.HttpStatusCode == (int)HttpStatusCode.PreconditionFailed) { // Precondition Failed - could not update the footer due to a concurrency error recordUpdated = false; // Wait a random-ish amount of time int delayMilliseconds = 13 * new Random().Next(10, 100); await Task.Delay(delayMilliseconds); } else { throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to increment the stream sequence number due to storage error", source: "Table Event Stream Writer", innerException: sEx); } } if (tries > 500) { // catastrophic deadlock throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to increment the stream sequence number due to deadlock", source: "Table Event Stream Writer"); } } if (null != streamFooter) { if (streamFooter.Deleting) { // Do not allow a write to an event stream that is being deleted throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to write to this event stream as it is being deleted", source: "Table Event Stream Writer"); } return(streamFooter.LastSequence); } else { return(1); } }
async public void Save <T>(T entity) where T : ITableEntity, new() { var upsert = TableOperation.InsertOrReplace(entity); var result = await Table.ExecuteAsync(upsert); }
//parse through HTML urls, index the information, add them to the Url table. Follow each link accompanied by it, check its validity and visit status, then add back to queue. public void parseHTML(string url) { if (crawlable) { //update status, increase number crawled var crawled = 0; var sizeQueue = 0; var sizeIndex = 0; TableQuery <Performance> query3 = new TableQuery <Performance>() .Take(1); foreach (Performance item in StorageManager.getPerformanceTable().ExecuteQuery(query3)) { crawled = item.NumCrawled + 1; sizeQueue = item.SizeQueue; sizeIndex = item.SizeIndex; } Performance.insertPerformance("Crawling", crawled, sizeQueue, sizeIndex); try { //index information var Url = url; var web = new HtmlWeb(); var doc = web.Load(url); var title = doc.DocumentNode.SelectSingleNode("//head/title").InnerHtml; var meta = doc.DocumentNode.SelectNodes("//meta"); string date = "no date found"; foreach (HtmlNode tag in meta) { string property = tag.GetAttributeValue("property", ""); if (property.Contains("published_time") || property.Contains("pubdate")) { date = tag.GetAttributeValue("content", ""); } } //add to table addToTable(Url, title, date); //check header links that relate to the link var linksList = doc.DocumentNode.SelectNodes("//head/link"); if (linksList != null) { foreach (HtmlNode link in linksList) { string href = link.GetAttributeValue("href", ""); if (href.Contains("cnn.com") || href.Contains("bleacherreport.com/articles") || href.Contains("bleacherreport.com/nba")) { //Debug.WriteLine("new link: "); //Debug.WriteLine(href); if (!checkDisallow(href, this.Disallow)) { if (!this.Visited.Contains(href)) { addToUrlQueue(href); } } } } } //check body links that relate to the link var aList = doc.DocumentNode.SelectNodes("//a[@href]"); if (aList != null) { foreach (HtmlNode a in aList) { string href = a.GetAttributeValue("href", ""); if (href.Contains("cnn.com") || href.Contains("bleacherreport.com/articles") || href.Contains("bleacherreport.com/nba")) { //Debug.WriteLine("new link: "); //Debug.WriteLine(href); if (!checkDisallow(href, this.Disallow)) { if (!this.Visited.Contains(href)) { addToUrlQueue(href); } } } } } } catch (Exception e) { //put in exception table with URL ExceptionUrl except = new ClassLibrary1.ExceptionUrl(e.ToString(), url); //add exception to table TableOperation insertOrReplaceOperation = TableOperation.InsertOrReplace(except); StorageManager.getExceptionTable().Execute(insertOrReplaceOperation); } } else { return; } }
public void InsertOrReplace(TableEntity entity) { Table.Execute(TableOperation.InsertOrReplace(entity)); }