public CloudEventLogPositionLengthSegment(List <CloudEventLogPositionLength> results, TableContinuationToken continuationToken) { this.Results = results; this.ContinuationToken = continuationToken; }
/// <summary> /// Adds a column to an Azure table. /// The column to be added is specified using the following extended properties /// Extended Properties /// columnName - Name of the column to be added /// type - Data type of the column. Only supported types right now are: int32, bool, and string /// defaultValue - Default value of the column. This is optional and will default to type's default value. /// rowKeyPrefix - Rowkey prefix of the row in which the column will be added. This is optional and will identify the subset of rows to do this operation. /// columnName and type are mandatory. /// Extended Properties Example /// "columnName": "DisableHandleValidation", /// "type": "bool", /// "defaultValue": "False", /// "rowKeyPrefix": "ProfilesObject:" /// Activity Operation /// The activity iterates through all the rows from the input table with the matching rowKeyPrefix, /// checks for the column, adds it if the column is not found and runs a merge table operation to merge the contents of /// modified row/entity with an existing row/entity in the table. /// </summary> /// <param name="linkedServices">Linked services referenced by activity definition.</param> /// <param name="datasets">Datasets referenced by activity definition.</param> /// <param name="activity">Activity definition.</param> /// <param name="logger">Used to log messages during activity execution.</param> /// <returns>Activity state at the end of execution</returns> public IDictionary <string, string> Execute( IEnumerable <LinkedService> linkedServices, IEnumerable <Dataset> datasets, Activity activity, IActivityLogger logger) { DotNetActivity dotNetActivity = (DotNetActivity)activity.TypeProperties; IDictionary <string, string> extendedProperties = dotNetActivity.ExtendedProperties; logger.Write("Logging extended properties if any..."); foreach (KeyValuePair <string, string> entry in extendedProperties) { logger.Write("<key:{0}> <value:{1}>", entry.Key, entry.Value); } if (!extendedProperties.ContainsKey("columnName")) { throw new ArgumentException("Column name is required", "columnName"); } string columnName = extendedProperties["columnName"]; if (!extendedProperties.ContainsKey("type")) { throw new ArgumentException("Type information is required", "type"); } string type = extendedProperties["type"]; string defaultValueStr = null; if (extendedProperties.ContainsKey("defaultValue")) { defaultValueStr = extendedProperties["defaultValue"]; } string rowKeyPrefix = string.Empty; if (extendedProperties.ContainsKey("rowKeyPrefix")) { rowKeyPrefix = extendedProperties["rowKeyPrefix"]; } AzureStorageLinkedService inputLinkedService; AzureTableDataset sourceTable; // For activities working on a single dataset, the first entry is the input dataset. // The activity.Inputs can have multiple datasets for building pipeline workflow dependencies. We can ignore the rest of the datasets Dataset inputDataset = datasets.Single(dataset => dataset.Name == activity.Inputs.First().Name); sourceTable = inputDataset.Properties.TypeProperties as AzureTableDataset; logger.Write("input table:{0}", sourceTable.TableName); inputLinkedService = linkedServices.First( ls => ls.Name == inputDataset.Properties.LinkedServiceName).Properties.TypeProperties as AzureStorageLinkedService; string inputConnectionString = inputLinkedService.ConnectionString; // create storage client for input. Pass the connection string. CloudStorageAccount inputStorageAccount = CloudStorageAccount.Parse(inputConnectionString); CloudTableClient inputTableClient = inputStorageAccount.CreateCloudTableClient(); CloudTable inputTable = inputTableClient.GetTableReference(sourceTable.TableName); EntityProperty columnValue = this.GetEntityProperty(type, defaultValueStr); long totalProcessedRecords = 0; long actualAffectedRecords = 0; TableContinuationToken tableContinuationToken = null; List <Task> tasks = new List <Task>(); do { var resultSegment = inputTable.ExecuteQuerySegmented(new TableQuery(), tableContinuationToken); tableContinuationToken = resultSegment.ContinuationToken; var partitionGroups = (from s in resultSegment.Results where string.IsNullOrWhiteSpace(rowKeyPrefix) ? true : s.RowKey.StartsWith(rowKeyPrefix) select s).GroupBy(a => a.PartitionKey); foreach (IGrouping <string, DynamicTableEntity> g in partitionGroups) { // Create a new batch for every partition group within the resultSegment TableBatchOperation batch = new TableBatchOperation(); foreach (DynamicTableEntity e in g.AsEnumerable()) { // If the columnName does not exist in the properties, then only Add it if (!e.Properties.ContainsKey(columnName)) { e.Properties.Add(columnName, columnValue); batch.Merge(e); logger.Write("<partition key:{0}>, <row key:{1}>", e.PartitionKey, e.RowKey); } } if (batch.Count > 0) { // ExecuteBatchInChunkAsync is an extension method to chunk and process 100 operations in a batch tasks.Add(inputTable.ExecuteBatchInChunkAsync(batch)); actualAffectedRecords += batch.Count; } } totalProcessedRecords += resultSegment.Results.Count; logger.Write("Processed records count: {0}", totalProcessedRecords); logger.Write("Affected records count: {0}", actualAffectedRecords); }while (tableContinuationToken != null); // The batch operations complete when Task.WaitAll completes // TODO : Add ContinueWith on ExecuteBatchAsync for tracing of each batch operation as it completes Task.WaitAll(tasks.ToArray()); logger.Write("Added new column to {0} records", actualAffectedRecords); return(new Dictionary <string, string>()); }
public async Task <TableQuerySegment <T> > ExecuteQuerySegmentedAsync <T>(TableQuery <T> query, TableContinuationToken token) where T : ITableEntity, new() { return(await CloudTable.ExecuteQuerySegmentedAsync(query, token)); }
public Task <TableQuerySegment <TElement> > ExecuteSegmentedAsync(TableContinuationToken currentToken, TableRequestOptions requestOptions, OperationContext operationContext, CancellationToken cancellationToken) { return(AsyncExtensions.TaskFromApm(this.BeginExecuteSegmented, this.EndExecuteSegmented, currentToken, requestOptions, operationContext, cancellationToken)); }
private TableCommand <ResultSegment <TElement>, IEnumerable <TElement> > GenerateExecuteCommand(TableContinuationToken continuationToken, TableRequestOptions requestOptions) { DataServiceQuery <TElement> localQuery = this.Query; // Continuation localQuery = TableUtilities.ApplyContinuationToQuery <TElement>(continuationToken, localQuery); if (requestOptions.ServerTimeout.HasValue) { localQuery = localQuery.AddQueryOption("timeout", Convert.ToString(requestOptions.ServerTimeout.Value.TotalSeconds, CultureInfo.InvariantCulture)); } TableCommand <ResultSegment <TElement>, IEnumerable <TElement> > cmd = new TableCommand <ResultSegment <TElement>, IEnumerable <TElement> >(); cmd.ExecuteFunc = localQuery.Execute; cmd.Begin = (callback, state) => localQuery.BeginExecute(callback, state); cmd.End = localQuery.EndExecute; cmd.ParseResponse = this.ParseTableQueryResponse; cmd.ParseDataServiceError = ODataErrorHelper.ReadDataServiceResponseFromStream; cmd.Context = this.Context; requestOptions.ApplyToStorageCommand(cmd); return(cmd); }
public ICancellableAsyncResult BeginExecuteSegmented(TableContinuationToken currentToken, AsyncCallback callback, object state) { return(this.BeginExecuteSegmented(currentToken, null /* RequestOptions */, null /* OperationContext */, callback, state)); }
public Task <TableQuerySegment <TElement> > ExecuteSegmentedAsync(TableContinuationToken currentToken, CancellationToken cancellationToken) { return(AsyncExtensions.TaskFromApm(this.BeginExecuteSegmented, this.EndExecuteSegmented, currentToken, cancellationToken)); }
public MessagePagination HomeLine(string userid, string groupID, int count = 25, TableContinuationToken continuationToken = null) { string query = GenerateStartWithConditionQuery(userid + "_"); query = TableQuery.CombineFilters( query, TableOperators.And, GenerateStartWithConditionQuery(groupID + "_", "RowKey") ); TableQuery <BaseMessageEntity> tableQuery = new TableQuery <BaseMessageEntity>().Where(query).Take(count); TableQuerySegment <BaseMessageEntity> queryResult = _homeline.ExecuteQuerySegmented(tableQuery, continuationToken); MessagePagination ret = new MessagePagination(); ret.continuationToken = Utils.Token2String(queryResult.ContinuationToken); ret.message = new List <Message>(); foreach (BaseMessageEntity entity in queryResult) { //var msg = JsonConvert.DeserializeObject<Message>(entity.Content); ret.message.Add(entity.ToMessage()); } return(ret); }
public MessagePagination TopicLine(string topicID, DateTime start, DateTime end, int count = 25, TableContinuationToken continuationToken = null) { TableQuery <BaseMessageEntity> rangeQuery = new TableQuery <BaseMessageEntity>().Where( GenerateTimestampConditionQuery(topicID, start, end) ).Take(count);; TableQuerySegment <BaseMessageEntity> queryResult = _topicline.ExecuteQuerySegmented(rangeQuery, continuationToken); MessagePagination ret = new MessagePagination(); ret.continuationToken = Utils.Token2String(queryResult.ContinuationToken); ret.message = new List <Message>(); foreach (BaseMessageEntity entity in queryResult) { //var msg = JsonConvert.DeserializeObject<Message>(entity.Content); ret.message.Add(entity.ToMessage()); } return(ret); }
public async Task <ActionResult <string> > DeleteTrip(int id) { string UserID = User.Claims.FirstOrDefault(c => c.Type == ClaimTypes.NameIdentifier).Value; //string UserID = "666"; var tripList = new List <TripTableEntity>(); var tripQuery = new TableQuery <TripTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, UserID)); TableContinuationToken tokenTrip = null; do { TableQuerySegment <TripTableEntity> resultSegment = await _tableTrip.ExecuteQuerySegmentedAsync(tripQuery, tokenTrip); tokenTrip = resultSegment.ContinuationToken; foreach (TripTableEntity entity in resultSegment.Results) { if (entity.TripId == id) { tripList.Add(entity); } } } while (tokenTrip != null); var tripToDelete = tripList.FirstOrDefault(); if (tripToDelete != null) { try { List <Pitstop> pitstopList = new List <Pitstop>(); TableQuery <PitstopTableEntity> queryPitstop = new TableQuery <PitstopTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, tripToDelete.TripId.ToString() + ";" + UserID)); TableContinuationToken tokenPitstop = null; do { TableQuerySegment <PitstopTableEntity> resultSegment = await _tablePitstop.ExecuteQuerySegmentedAsync(queryPitstop, tokenPitstop); tokenPitstop = resultSegment.ContinuationToken; foreach (PitstopTableEntity entity in resultSegment.Results) { string removedPitstop = await RemovePitstopImagesFromBlob(entity, _container); TableOperation deleteOperation = TableOperation.Delete(entity); await _tablePitstop.ExecuteAsync(deleteOperation); } } while (tokenPitstop != null); // Removing images from the blob storage string removed = await RemoveTripImagesFromBlob(tripToDelete, _container); TableOperation deleteOperationTrip = TableOperation.Delete(tripToDelete); await _tableTrip.ExecuteAsync(deleteOperationTrip); return(Ok($"Deleted trip {id} and all pitstops therein")); } catch (DocumentClientException de) { switch (de.StatusCode.Value) { case System.Net.HttpStatusCode.NotFound: return(NotFound()); } } } return(NotFound()); }
public async Task <ActionResult <IEnumerable <string> > > GetTrips() { string UserID = User.Claims.FirstOrDefault(c => c.Type == ClaimTypes.NameIdentifier).Value; //string UserID = "666"; var tripList = new List <TripTableEntity>(); //Check if user exists var userList = new List <PersonTableEntity>(); var personQuery = new TableQuery <PersonTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, UserID)); TableContinuationToken tokenPerson = null; do { TableQuerySegment <PersonTableEntity> resultSegment = await _tablePerson.ExecuteQuerySegmentedAsync(personQuery, tokenPerson); tokenPerson = resultSegment.ContinuationToken; foreach (PersonTableEntity entity in resultSegment.Results) { userList.Add(entity); } } while (tokenPerson != null); //if user exits get trips else add user if (userList.Count() != 0) { var tripQuery = new TableQuery <TripTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, UserID)); TableContinuationToken tokenTrip = null; do { TableQuerySegment <TripTableEntity> resultSegment = await _tableTrip.ExecuteQuerySegmentedAsync(tripQuery, tokenTrip); tokenTrip = resultSegment.ContinuationToken; foreach (TripTableEntity entity in resultSegment.Results) { tripList.Add(entity); } } while (tokenTrip != null); if (tripList == null) { return(Ok("[]")); } } else { //Add user to Person table and return an empty triplist Person person = new Person { PersonId = UserID, Nickname = string.Empty, Avatar = string.Empty }; PersonTableEntity personTable = new PersonTableEntity(person); TableOperation insertOperation = TableOperation.Insert(personTable); await _tablePerson.ExecuteAsync(insertOperation); } return(Ok(tripList.OrderByDescending(a => a.StartDate))); }
public async Task <ActionResult <string> > PostNewTrip(NewTrip newTrip) { string UserID = User.Claims.FirstOrDefault(c => c.Type == ClaimTypes.NameIdentifier).Value; //string UserID = "666"; //if (!ModelState.IsValid) //{ // return BadRequest("Something wrong with the trip details."); //} Trip trip = new Trip(); var tripList = new List <TripTableEntity>(); string photoName = await StorePicture(newTrip.picture); // Determining the tripId number var tripQuery = new TableQuery <TripTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, UserID)); TableContinuationToken tokenTrip = null; do { TableQuerySegment <TripTableEntity> resultSegment = await _tableTrip.ExecuteQuerySegmentedAsync(tripQuery, tokenTrip); tokenTrip = resultSegment.ContinuationToken; foreach (TripTableEntity entity in resultSegment.Results) { tripList.Add(entity); } } while (tokenTrip != null); var tripCount = 0; if (tripList.Count() != 0) { tripCount = tripList.Max(a => a.TripId); } trip.TripId = tripCount + 1; trip.PersonId = UserID; trip.Headline = newTrip.Headline; trip.Description = newTrip.Description; trip.StartDate = newTrip.StartDate; trip.EndDate = newTrip.EndDate; if (trip.EndDate.Year.Equals(0001)) { trip.EndDate = trip.StartDate; } trip.Position = newTrip.Position; trip.MainPhotoUrl = photoName; // this needs to be updated! And the picture will be deleted at some point - we will not store huge pics. trip.MainPhotoSmallUrl = string.Empty; TripTableEntity tripTable = new TripTableEntity(trip); TableOperation insertOperation = TableOperation.Insert(tripTable); await _tableTrip.ExecuteAsync(insertOperation); await AddQueueItem(new QueueParam { PictureUri = photoName, RowKey = tripTable.RowKey, PartitionKey = tripTable.PartitionKey }); return(Ok($"Trip created, id: {trip.TripId}")); }
public async Task <ActionResult <string> > GetTripAndPitstops(int Id) { string UserID = User.Claims.FirstOrDefault(c => c.Type == ClaimTypes.NameIdentifier).Value; //string UserID = "666"; var tripList = new List <TripTableEntity>(); var tripQuery = new TableQuery <TripTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, UserID)); TableContinuationToken tokenTrip = null; do { TableQuerySegment <TripTableEntity> resultSegment = await _tableTrip.ExecuteQuerySegmentedAsync(tripQuery, tokenTrip); tokenTrip = resultSegment.ContinuationToken; foreach (TripTableEntity entity in resultSegment.Results) { if (entity.TripId == Id) { tripList.Add(entity); } } } while (tokenTrip != null); var tripDetails = tripList.FirstOrDefault(); if (tripDetails == null) { return(NotFound()); } //Get pitstops List <Pitstop> pitstopList = new List <Pitstop>(); TableQuery <PitstopTableEntity> queryPitstop = new TableQuery <PitstopTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, Id.ToString() + ";" + UserID)); TableContinuationToken tokenPitstop = null; do { TableQuerySegment <PitstopTableEntity> resultSegment = await _tablePitstop.ExecuteQuerySegmentedAsync(queryPitstop, tokenPitstop); tokenPitstop = resultSegment.ContinuationToken; foreach (PitstopTableEntity entity in resultSegment.Results) { Pitstop pitstop = new Pitstop(entity); pitstopList.Add(pitstop); } } while (tokenPitstop != null); Trip trip = new Trip { TripId = tripDetails.TripId, Description = tripDetails.Description, EndDate = tripDetails.EndDate, Headline = tripDetails.Headline, MainPhotoSmallUrl = tripDetails.MainPhotoSmallUrl, MainPhotoUrl = tripDetails.MainPhotoUrl, PersonId = tripDetails.PersonId, Pitstops = pitstopList, Position = tripDetails.Position, StartDate = tripDetails.StartDate }; return(Ok(trip)); }
public static async Task <Error> EqualToAsync(this TableEntitySdk src, TableEntitySdk dest, Action <int> onItemAction = null) { List <Error> errors = new List <Error>(); var rangeQuery = new TableQuery <DynamicTableEntity>(); TableContinuationToken srcTableContinuationToken = null; TableContinuationToken destTableContinuationToken = null; int chunkCounter = 0; int itemCounter = 0; do { chunkCounter++; var srcQuery = src.CloudTable.ExecuteQuerySegmentedAsync(rangeQuery, srcTableContinuationToken); var destQuery = dest.CloudTable.ExecuteQuerySegmentedAsync(rangeQuery, destTableContinuationToken); var srcQueryResponse = await srcQuery; srcTableContinuationToken = srcQueryResponse.ContinuationToken; var srcResult = srcQueryResponse.Results.ToArray(); var destQueryResponse = await destQuery; destTableContinuationToken = destQueryResponse.ContinuationToken; var destResult = destQueryResponse.Results.ToArray(); if (srcResult.Length != destResult.Length) { return(new Error { Msg = $"Amount in chunk {chunkCounter} does not match.{Environment.NewLine}Src table: {srcResult.Length}{Environment.NewLine}Dest table: {destResult.Length}" }); } for (int i = 0; i < srcResult.Length; i++, itemCounter++) { onItemAction?.Invoke(itemCounter); if (!srcResult[i].EqualTo(destResult[i])) { return(new Error { Msg = $"Chunk: {chunkCounter}{Environment.NewLine}" + $"Item: {i}{Environment.NewLine}" + $"Src object PartitionKey: {srcResult[i].PartitionKey}{Environment.NewLine}" + $"Src object RowKey: {srcResult[i].RowKey}{Environment.NewLine}" + $"Dest object PartitionKey: {destResult[i].PartitionKey}{Environment.NewLine}" + $"Dest object RowKey: {destResult[i].RowKey}{Environment.NewLine}" }); } } } while (srcTableContinuationToken != null); //records left in dest storage if (destTableContinuationToken != null) { return new Error { Msg = "Dest table contains more elements" } } ; return(null); } }
/// <summary> /// Delete all the records in the table linked to this event stream /// </summary> public void DeleteStream() { // 1- mark the stream footer as "Deleting" bool recordUpdated = false; int tries = 0; TableEntityKeyRecord streamFooter = null; while (!recordUpdated) { tries += 1; // read in the a [TableEntityKeyRecord] streamFooter = (TableEntityKeyRecord)Table.Execute( TableOperation.Retrieve <TableEntityKeyRecord>(this.InstanceKey, SequenceNumberAsString(0)), operationContext: GetDefaultOperationContext()).Result; if (null == streamFooter) { streamFooter = new TableEntityKeyRecord(this); } streamFooter.Deleting = true; string lastETag = streamFooter.ETag; try { TableResult tres = Table.Execute(TableOperation.InsertOrReplace(streamFooter), null, new OperationContext { UserHeaders = new Dictionary <String, String> { { "If-Match", lastETag } } }); if (tres.HttpStatusCode == 204) { recordUpdated = true; } } catch (Microsoft.Azure.Cosmos.Table.StorageException sEx) { if (sEx.RequestInformation.HttpStatusCode == (int)HttpStatusCode.PreconditionFailed) { // Precondition Failed - could not update the footer due to a concurrency error recordUpdated = false; // Wait a random-ish amount of time int delayMilliseconds = 13 * new Random().Next(10, 100); System.Threading.Thread.Sleep(delayMilliseconds); } else { throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to set the Deleting flag stream sequence number due to storage error", source: "Table Event Stream Writer", innerException: sEx); } } if (tries > 500) { // catastrophic deadlock throw new EventStreamWriteException(this, streamFooter.LastSequence, message: "Unable to set the Deleting flag number due to deadlock", source: "Table Event Stream Writer"); } } // 2- delete the actual stream records in reverse order if (Table != null) { // We need a continuation token as this is done in batches of 100... TableContinuationToken token = new TableContinuationToken(); TableQuery getEventsToDeleteQuery = DeleteRowsQuery(); //TableOperation.Delete(); do { // create the query to be executed.. var segment = Table.ExecuteQuerySegmented(getEventsToDeleteQuery, token, requestOptions: new TableRequestOptions() { PayloadFormat = TablePayloadFormat.Json, TableQueryMaxItemCount = MAX_BATCH_SIZE }, operationContext: GetDefaultOperationContext()); TableBatchOperation deleteBatch = new TableBatchOperation(); foreach (DynamicTableEntity dteRow in segment) { deleteBatch.Add(TableOperation.Delete(dteRow)); } Table.ExecuteBatch(deleteBatch); // update the continuation token to get the next chunk of records token = segment.ContinuationToken; } while (null != token); } }
public MessagePagination PublicSquareLine(string groupID, DateTime start, DateTime end, int count = 25, TableContinuationToken continuationToken = null) { if (end == null) { end = DateTime.UtcNow; } if (start == null) { start = end.AddDays(0 - 1); } string query = TableQuery.GenerateFilterCondition( "PartitionKey", QueryComparisons.LessThan, groupID + "_" + Utils.NextKeyString(Utils.ToAzureStorageDayBasedString(start))); query = TableQuery.CombineFilters( query, TableOperators.And, TableQuery.GenerateFilterCondition( "PartitionKey", QueryComparisons.GreaterThanOrEqual, groupID + "_" + Utils.ToAzureStorageDayBasedString(end)) ); query = TableQuery.CombineFilters( query, TableOperators.And, TableQuery.GenerateFilterCondition( "RowKey", QueryComparisons.LessThan, Utils.NextKeyString(Utils.ToAzureStorageSecondBasedString(start))) ); query = TableQuery.CombineFilters( query, TableOperators.And, TableQuery.GenerateFilterCondition( "RowKey", QueryComparisons.GreaterThanOrEqual, Utils.ToAzureStorageSecondBasedString(end)) ); TableQuery <BaseMessageEntity> rangeQuery = new TableQuery <BaseMessageEntity>().Where(query).Take(count);; TableQuerySegment <BaseMessageEntity> queryResult = _publicSquareLine.ExecuteQuerySegmented(rangeQuery, continuationToken); MessagePagination ret = new MessagePagination(); ret.continuationToken = Utils.Token2String(queryResult.ContinuationToken); ret.message = new List <Message>(); foreach (BaseMessageEntity entity in queryResult) { //var msg = JsonConvert.DeserializeObject<Message>(entity.Content); ret.message.Add(entity.ToMessage()); } return(ret); }
public TableQuerySegment <TElement> ExecuteSegmented(TableContinuationToken continuationToken, TableRequestOptions requestOptions = null, OperationContext operationContext = null) { requestOptions = TableRequestOptions.ApplyDefaults(requestOptions, this.Context.ServiceClient); operationContext = operationContext ?? new OperationContext(); return(new TableQuerySegment <TElement>(this.ExecuteSegmentedCore(continuationToken, requestOptions, operationContext))); }
public override async Task <TableQuerySegment> ExecuteQuerySegmentedAsync(TableQuery query, TableContinuationToken token) { return(await Task.FromResult <TableQuerySegment>(null)); }
public Task <TableQuerySegment <TElement> > ExecuteSegmentedAsync(TableContinuationToken currentToken) { return(this.ExecuteSegmentedAsync(currentToken, CancellationToken.None)); }
protected async Task <int> ExecuteBatchDeleteAsync <TEntity>(TableQuery <TEntity> query) where TEntity : TableEntity, new() { int deleted = 0, r = 0, pos = 0; TableContinuationToken continuationToken = null; do { var queryResults = await Table.ExecuteQuerySegmentedAsync(query, continuationToken); continuationToken = queryResults.ContinuationToken; var aggregatedEntities = from entity in queryResults.Results group entity by entity.PartitionKey into newEntity orderby newEntity.Key select newEntity; if (aggregatedEntities.Count() > 0) { foreach (var entities in aggregatedEntities) { if (entities.Count() < TableConstants.TableServiceBatchMaximumOperations) { var tableBatchOperation = new TableBatchOperation(); foreach (var entity in entities) { tableBatchOperation.Add(TableOperation.Delete(entity)); } Console.WriteLine($"Patch delete for {tableBatchOperation.Count}"); var result = await Table.ExecuteBatchAsync(tableBatchOperation); for (int i = 0; i < result.Count; i++) { deleted += (result[i].HttpStatusCode == 204 ? 1 : 0); } } else { var entityList = entities.ToList(); do { int i; var tableBatchOperation = new TableBatchOperation(); for (i = pos; i < pos + TableConstants.TableServiceBatchMaximumOperations && i < entityList.Count; i++) { tableBatchOperation.Add(TableOperation.Delete(entityList[i])); } pos = i; Console.WriteLine($"Patch delete for {tableBatchOperation.Count}"); var result = await Table.ExecuteBatchAsync(tableBatchOperation); for (i = 0; i < result.Count; i++) { deleted += (result[i].HttpStatusCode == 204 ? 1 : 0); } } while (pos < entities.Count()); pos = 0; } Console.WriteLine($"Deleted {deleted} entries"); } Console.WriteLine($"Finish {r} round of batch delete"); } r++; } while (continuationToken != null); return(deleted); }
public Task <TableQuerySegment <TElement> > ExecuteSegmentedAsync(TableContinuationToken currentToken, TableRequestOptions requestOptions, OperationContext operationContext) { return(this.ExecuteSegmentedAsync(currentToken, requestOptions, operationContext, CancellationToken.None)); }
public override Task <TableQuerySegment <DynamicTableEntity> > ExecuteQuerySegmentedAsync(TableQuery query, TableContinuationToken token, TableRequestOptions requestOptions, OperationContext operationContext) { this.ExecuteQuerySegmentedAsync_InputQuery = query; this.ExecuteQuerySegmentedAsync_InputToken = token; this.ExecuteQuerySegmentedAsync_InputRequestOptions = requestOptions; this.ExecuteQuerySegmentedAsync_InputOperationContext = operationContext; return(Task.FromResult(this.ExecuteQuerySegmentedAsync_Output !)); }
internal ResultSegment <TElement> ExecuteSegmentedCore(TableContinuationToken continuationToken, TableRequestOptions requestOptions, OperationContext operationContext) { TableCommand <ResultSegment <TElement>, IEnumerable <TElement> > cmd = this.GenerateExecuteCommand(continuationToken, requestOptions); return(TableExecutor.ExecuteSync(cmd, requestOptions.RetryPolicy, operationContext)); }
internal static async Task <TableQuerySegment <TResult> > QueryCollectionsAsync <TResult>(int?maxItemCount, string filterString, TableContinuationToken token, CloudTableClient client, CloudTable table, TableRequestOptions requestOptions, OperationContext operationContext) { ValidateContinuationToken(token); FeedOptions defaultFeedOptions = GetDefaultFeedOptions(requestOptions); defaultFeedOptions.RequestContinuation = token?.NextRowKey; FeedResponse <DocumentCollection> feedResponse; if (string.IsNullOrEmpty(filterString)) { feedResponse = await client.DocumentClient.CreateDocumentCollectionQuery(UriFactory.CreateDatabaseUri("TablesDB"), defaultFeedOptions).AsDocumentQuery().ExecuteNextAsync <DocumentCollection>(); } else { string sqlQuery = QueryTranslator.GetSqlQuery("*", filterString, isLinqExpression: false, isTableQuery: true, null); feedResponse = await client.DocumentClient.CreateDocumentCollectionQuery(UriFactory.CreateDatabaseUri("TablesDB"), sqlQuery, defaultFeedOptions).AsDocumentQuery().ExecuteNextAsync <DocumentCollection>(); } operationContext.RequestResults.Add(feedResponse.ToRequestResult()); List <TResult> list = new List <TResult>(); foreach (DocumentCollection item in feedResponse) { list.Add((TResult)(object)new DynamicTableEntity { Properties = { { "TableName", new EntityProperty(item.Id) } } }); } TableQuerySegment <TResult> tableQuerySegment = new TableQuerySegment <TResult>(list); if (!string.IsNullOrEmpty(feedResponse.ResponseContinuation)) { tableQuerySegment.ContinuationToken = new TableContinuationToken { NextRowKey = feedResponse.ResponseContinuation }; } tableQuerySegment.RequestCharge = feedResponse.RequestCharge; return(tableQuerySegment); }
public async Task <IActionResult> GetGlobalLogs([FromBody] TableContinuationToken tableContinuationToken = null) { var entities = await _objectHistoryClient.GetObjectHistoryRecordsByApplicationNamePartitionKeyAsync(tableContinuationToken); return(Ok(entities)); }
internal static async Task <TableQuerySegment <TResult> > QueryDocumentsAsync <TResult>(int?maxItemCount, string filterString, IList <string> selectColumns, TableContinuationToken token, CloudTableClient client, CloudTable table, EntityResolver <TResult> resolver, TableRequestOptions requestOptions, OperationContext operationContext, bool isLinqExpression, IList <OrderByItem> orderByItems, string tombstoneKey) { ValidateContinuationToken(token); selectColumns = ((selectColumns != null) ? new List <string>(selectColumns) : null); Dictionary <string, bool> selectedSystemProperties = new Dictionary <string, bool>(); string sqlQuery = QueryTranslator.GetSqlQuery(GetSelectList(selectColumns, requestOptions, out selectedSystemProperties), filterString, isLinqExpression, isTableQuery: false, orderByItems, tombstoneKey, enableTimestampQuery: true); FeedOptions defaultFeedOptions = GetDefaultFeedOptions(requestOptions); if (maxItemCount.HasValue) { defaultFeedOptions.MaxItemCount = maxItemCount; } defaultFeedOptions.SessionToken = requestOptions.SessionToken; defaultFeedOptions.RequestContinuation = token?.NextRowKey; FeedResponse <Document> feedResponse = await client.DocumentClient.CreateDocumentQuery <Document>(table.GetCollectionUri(), sqlQuery, defaultFeedOptions).AsDocumentQuery().ExecuteNextAsync <Document>(); operationContext.RequestResults.Add(feedResponse.ToRequestResult()); List <TResult> list = new List <TResult>(); foreach (Document item in feedResponse) { var itemETag = EtagHelper.ConvertFromBackEndETagFormat(item.ETag); item.SetPropertyValue("_etag", itemETag); IDictionary <string, EntityProperty> entityPropertiesFromDocument = EntityTranslator.GetEntityPropertiesFromDocument(item, selectColumns); list.Add(resolver(selectedSystemProperties["PartitionKey"] ? item.GetPropertyValue <string>("$pk") : null, selectedSystemProperties["RowKey"] ? item.GetPropertyValue <string>("$id") : null, selectedSystemProperties["Timestamp"] ? ((DateTimeOffset)item.Timestamp) : default(DateTimeOffset), entityPropertiesFromDocument, selectedSystemProperties["Etag"] ? item.ETag : null)); } TableQuerySegment <TResult> tableQuerySegment = new TableQuerySegment <TResult>(list); if (!string.IsNullOrEmpty(feedResponse.ResponseContinuation)) { tableQuerySegment.ContinuationToken = new TableContinuationToken { NextRowKey = feedResponse.ResponseContinuation }; } tableQuerySegment.RequestCharge = feedResponse.RequestCharge; return(tableQuerySegment); }
public async Task <TableQuerySegment <DynamicTableEntity> > ExecuteQuerySegmentedAsync(TableQuery query, TableContinuationToken token) { return(await CloudTable.ExecuteQuerySegmentedAsync(query, token)); }
public async Task <JobResult> GetAsync(int jobId, [FromQuery] string lastNodeName, [FromQuery] int?nodeCount, CancellationToken token) { this.logger.LogInformation("Get clusrun job called. getting job"); var jobTable = this.utilities.GetJobsTable(); var jobPartitionKey = this.utilities.GetJobPartitionKey($"{JobType.ClusRun}", jobId); var rowKey = utilities.JobEntryKey; var result = await jobTable.ExecuteAsync( TableOperation.Retrieve <JsonTableEntity>(jobPartitionKey, rowKey), null, null, token); this.logger.LogInformation("Retrive job {0} status code {1}", jobId, result.HttpStatusCode); HttpResponseMessage response = new HttpResponseMessage((HttpStatusCode)result.HttpStatusCode); response.EnsureSuccessStatusCode(); if (result.Result == null) { return(null); } JobResult j = ((JsonTableEntity)result.Result).GetObject <JobResult>(); this.logger.LogInformation("Fetching job {0} output", jobId); var lowResultKey = this.utilities.GetJobResultKey(lastNodeName, null); var highResultKey = this.utilities.GetMaximumJobResultKey(); var partitionQuery = this.utilities.GetPartitionQueryString(jobPartitionKey); var rowKeyRangeQuery = this.utilities.GetRowKeyRangeString(lowResultKey, highResultKey); var q = new TableQuery <JsonTableEntity>() .Where(TableQuery.CombineFilters(partitionQuery, TableOperators.And, rowKeyRangeQuery)) .Take(nodeCount); TableContinuationToken conToken = null; j.Results = new List <NodeResult>(nodeCount ?? 1000); var taskInfos = new List <(string, ComputeNodeTaskCompletionEventArgs)>(); do { var queryResult = await jobTable.ExecuteQuerySegmentedAsync(q, conToken, null, null, token); taskInfos.AddRange(queryResult.Results.Select(r => (r.RowKey, r.GetObject <ComputeNodeTaskCompletionEventArgs>()))); conToken = queryResult.ContinuationToken; }while (conToken != null); j.Results = taskInfos.GroupBy(t => t.Item2.NodeName.ToLowerInvariant()).Select(g => new NodeResult() { NodeName = g.Key, JobId = jobId, Results = g.Select(e => new CommandResult() { CommandLine = j.CommandLine, NodeName = g.Key, ResultKey = e.Item1, TaskInfo = e.Item2.TaskInfo, Test = j.DiagnosticTests?[e.Item2.TaskInfo.TaskId], }).ToList(), }).ToList(); return(j); }
/// <summary> /// Data collection step (ie producer). /// /// Collects data to process, caches it locally on disk and adds to the _partitionKeyQueue collection for the consumer /// </summary> private void CollectDataToProcess(int purgeRecordsOlderThanDays) { var query = PartitionKeyHandler.GetTableQuery(purgeRecordsOlderThanDays); var continuationToken = new TableContinuationToken(); string previouslyCachedPartitionKey = null; try { do { var page = TableReference.ExecuteQuerySegmented(query, continuationToken); if (page.Results.Count == 0) { Logger.Information("No results available"); break; } var firstResultTimestamp = PartitionKeyHandler.ConvertPartitionKeyToDateTime(page.Results.First().PartitionKey); LogStartingToProcessPage(page, firstResultTimestamp); _cancellationTokenSource.Token.ThrowIfCancellationRequested(); var partitionsFromPage = GetPartitionsFromPage(page.Results); foreach (var partition in partitionsFromPage) { var partitionKey = partition.First().PartitionKey; if (!string.IsNullOrEmpty(previouslyCachedPartitionKey) && partitionKey != previouslyCachedPartitionKey) { // we've moved onto a new partitionKey, queue the one we previously cached QueuePartitionKeyForProcessing(previouslyCachedPartitionKey); } using (var streamWriter = GetStreamWriterForPartitionTempFile(partitionKey)) { foreach (var entity in partition) { var lineToWrite = $"{entity.PartitionKey},{entity.RowKey}"; streamWriter.WriteLine(lineToWrite); Interlocked.Increment(ref _globalEntityCounter); } } previouslyCachedPartitionKey = partitionKey; } continuationToken = page.ContinuationToken; } while (continuationToken != null); // queue the last partition we just processed if (previouslyCachedPartitionKey != null) { QueuePartitionKeyForProcessing(previouslyCachedPartitionKey); } } finally { _partitionKeyQueue.CompleteAdding(); } }
private static async Task <bool> BeginDownload(TableQuery <DynamicTableEntity> query, string runid, bool isTable) { DataTable perfdataTable = new DataTable(); SqlBulkCopy sqlBulkCopy = new SqlBulkCopy(ConfigurationManager.AppSettings["LoadTestDBConString"]); StringBuilder sb = new StringBuilder(); if (isTable) { perfdataTable = DbHelper.InitializeDataTable(); sqlBulkCopy = DbHelper.InitializeCopyMap(ConfigurationManager.AppSettings["LoadTestDBConString"]); } else { sb.AppendLine("PreciseTime,MachineName,CounterCategory,CounterInstance,CounterName,CounterValue"); } CloudStorageAccount counterStorageAccount = CloudStorageAccount.Parse(ConfigurationManager.AppSettings["DiagnosticsStorageAccount"]); CloudTableClient perfCounterTableClient = counterStorageAccount.CreateCloudTableClient(); CloudTable perfCounterTable = perfCounterTableClient.GetTableReference("WADPerformanceCountersTable"); TableContinuationToken token = null; var linecount = 0; int categoryMarker = 0, instanceMarker = 0; string category = string.Empty, instance = string.Empty, countername = string.Empty; do { TableQuerySegment <DynamicTableEntity> segment = perfCounterTable.ExecuteQuerySegmented(query, token); token = segment.ContinuationToken; if (segment.Results.Count != 0) { foreach (DynamicTableEntity counter in segment) { DataRow row = perfdataTable.NewRow(); string temp = counter["CounterName"].StringValue; try { if (temp.IndexOf(")") != -1) { categoryMarker = temp.IndexOf("("); category = temp.Substring(1, categoryMarker - 1); temp = temp.Substring(categoryMarker + 1); instanceMarker = temp.IndexOf(")"); instance = temp.Substring(0, (instanceMarker)); countername = temp.Substring(instanceMarker + 2); } else { categoryMarker = temp.IndexOf(@"\", 2); category = temp.Substring(1, (categoryMarker - 1)); instance = "systemdiagnosticsperfcounterlibsingleinstance"; countername = temp.Substring(categoryMarker + 1); } if (isTable) { row["RunId"] = runid; row["CounterCollectionTime"] = counter.Properties["PreciseTimeStamp"].DateTime; row["MachineName"] = counter.Properties["RoleInstance"].StringValue; row["CounterCategory"] = category; row["CounterInstance"] = instance; row["CounterName"] = countername; row["CounterValue"] = counter.Properties["CounterValue"].DoubleValue; perfdataTable.Rows.Add(row); } else { sb.AppendLine(counter["PreciseTimeStamp"].DateTime + "," + counter["RoleInstance"].StringValue + "," + category + "," + instance + "," + countername + "," + counter["CounterValue"].DoubleValue); } } catch (Exception e) { Console.WriteLine("Exception : " + e.Message); } ++linecount; } if (linecount > 0) { try { if (isTable) { sqlBulkCopy.WriteToServer(perfdataTable); perfdataTable.Rows.Clear(); linecount = 0; } else { var filename = string.Concat("PerfCounters", Guid.NewGuid().ToString().Replace('-', '_') + ".csv"); using (StreamWriter sw = File.CreateText(Path.Combine(ConfigurationManager.AppSettings["CSVFileLocation"], filename))) { sw.Write(sb.ToString()); sb.Clear(); sb.AppendLine("PreciseTime,MachineName,CounterCategory,CounterInstance,CounterName,CounterValue"); linecount = 0; } } } catch (Exception e) { Console.WriteLine("Exception when writing data " + e.Message); } } } } while (token != null); if (linecount > 0) { if (isTable) { sqlBulkCopy.WriteToServer(perfdataTable); perfdataTable.Rows.Clear(); linecount = 0; } else { var filename = string.Concat("PerfCounters", Guid.NewGuid().ToString().Replace('-', '_') + ".csv"); using (StreamWriter sw = File.CreateText(Path.Combine(ConfigurationManager.AppSettings["CSVFileLocation"], filename))) { sw.Write(sb.ToString()); sb.Clear(); sb.AppendLine("PreciseTime,MachineName,CounterCategory,CounterInstance,CounterName,CounterValue"); linecount = 0; } } } return(true); }