async Task PurgeOrchestrationHistorySegmentAsync( TableQuerySegment<OrchestrationStateEntity> orchestrationStateEntitySegment) { var stateEntitiesToDelete = new List<OrchestrationStateEntity>(orchestrationStateEntitySegment.Results); var historyEntitiesToDelete = new ConcurrentBag<IEnumerable<OrchestrationHistoryEventEntity>>(); await Task.WhenAll(orchestrationStateEntitySegment.Results.Select( entity => Task.Run(async () => { IEnumerable<OrchestrationHistoryEventEntity> historyEntities = await tableClient.ReadOrchestrationHistoryEventsAsync( entity.State.OrchestrationInstance.InstanceId, entity.State.OrchestrationInstance.ExecutionId).ConfigureAwait(false); historyEntitiesToDelete.Add(historyEntities); }))); List<Task> historyDeleteTasks = historyEntitiesToDelete.Select( historyEventList => tableClient.DeleteEntitesAsync(historyEventList)).Cast<Task>().ToList(); // need to serialize history deletes before the state deletes so we dont leave orphaned history events await Task.WhenAll(historyDeleteTasks).ConfigureAwait(false); await Task.WhenAll(tableClient.DeleteEntitesAsync(stateEntitiesToDelete)).ConfigureAwait(false); }
public async Task Execute(params string[] args) { var stopWatch = Stopwatch.StartNew(); var parsedArguments = TableCopyArguments.Parse(args); if (!parsedArguments.Valid) { Console.WriteLine("Must give source and target, both in a form environmentName:TableName."); return; } Console.Out.WriteLine("Copying {0} from {1} to {2} at {3}.", parsedArguments.Source.TableName, parsedArguments.Source.Environment, parsedArguments.Target.TableName, parsedArguments.Target.Environment); var sourceTable = new EnvironmentClient(parsedArguments.Source.Environment).GetTable(parsedArguments.Source.TableName); var targetTable = new EnvironmentClient(parsedArguments.Target.Environment).GetTable(parsedArguments.Target.TableName); if (!await sourceTable.ExistsAsync()) { Console.Out.WriteLine("Source table does not exist, exiting."); return; } await targetTable.CreateIfNotExistsAsync(); var allQuery = new TableQuery <DynamicTableEntity>(); TableQuerySegment <DynamicTableEntity> querySegment = null; var sourceEntities = new List <DynamicTableEntity>(); while (querySegment == null || querySegment.ContinuationToken != null) { querySegment = await sourceTable.ExecuteQuerySegmentedAsync( allQuery, querySegment != null?querySegment.ContinuationToken : null); sourceEntities.AddRange(querySegment); } var batches = new List <TableBatchOperation>(); var groupedByPartitionKey = sourceEntities.GroupBy( p => p.PartitionKey, e => e, (key, list) => new KeyValuePair <string, IEnumerable <DynamicTableEntity> >(key, list)); foreach (var g in groupedByPartitionKey) { const int BatchSize = 100; int i = 1; // If batch size is more than 100, chunk it as it is the max sixe for azure storage batch operation foreach (var chunk in g.Value.Chunk(BatchSize)) { Console.Out.WriteLine(g.Key + " - Batch " + i); var batch = new TableBatchOperation(); foreach (var entity in chunk) { batch.Insert(entity); } batches.Add(batch); i++; } } Console.Out.WriteLine("This might take a while..."); var waits = batches.Select(targetTable.ExecuteBatchAsync).Cast <Task>().ToArray(); Task.WaitAll(waits, TimeSpan.FromHours(1)); Console.Out.WriteLine("\r\nCopying successful, copied {0:n0} rows in {1:n1} seconds.", sourceEntities.Count, stopWatch.Elapsed.TotalSeconds); return; }
public static dynamic GetAzureStorageListingsCore(HttpRequest req, Logging logging) { string requestBody = new System.IO.StreamReader(req.Body).ReadToEndAsync().Result; dynamic taskInformation = JsonConvert.DeserializeObject(requestBody); string _TaskInstanceId = taskInformation["TaskInstanceId"].ToString(); string _ExecutionUid = taskInformation["ExecutionUid"].ToString(); try { string _storageAccountName = taskInformation["Source"]["StorageAccountName"]; //The name is actually the base url so we need to parse it to get the name only _storageAccountName = _storageAccountName.Split('.')[0].Replace("https://", ""); string _storageAccountToken = taskInformation["Source"]["StorageAccountToken"]; Int64 _SourceSystemId = taskInformation["Source"]["SystemId"]; TaskMetaDataDatabase TMD = new TaskMetaDataDatabase(); using SqlConnection _con = TMD.GetSqlConnection(); var res = _con.QueryWithRetry(string.Format("Select Max(PartitionKey) MaxPartitionKey from AzureStorageListing where SystemId = {0}", _SourceSystemId.ToString())); string MaxPartitionKey = DateTime.UtcNow.AddDays(-1).ToString("yyyy-MM-dd hh:mm"); foreach (var r in res) { if (r.MaxPartitionKey != null) { MaxPartitionKey = DateTime.Parse(r.MaxPartitionKey).AddMinutes(-1).ToString("yyyy-MM-dd hh:mm"); } } using (HttpClient SourceClient = new HttpClient()) { //Now use the SAS URI to connect rather than the MSI / Service Principal as AD Based Auth not yet avail for tables var _storageCredentials = new StorageCredentials(_storageAccountToken); var SourceStorageAccount = new CloudStorageAccount(storageCredentials: _storageCredentials, accountName: _storageAccountName, endpointSuffix: "core.windows.net", useHttps: true); var client = SourceStorageAccount.CreateCloudTableClient(); CloudTable table = client.GetTableReference("Filelist"); TableQuery <DynamicTableEntity> query = new TableQuery <DynamicTableEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.GreaterThan, MaxPartitionKey.ToString())); DataTable dt = new DataTable(); DataColumn dc = new DataColumn(); dc.ColumnName = "PartitionKey"; dc.DataType = typeof(string); dt.Columns.Add(dc); DataColumn dc1 = new DataColumn(); dc1.ColumnName = "RowKey"; dc1.DataType = typeof(string); dt.Columns.Add(dc1); DataColumn dc2 = new DataColumn(); dc2.ColumnName = "SystemId"; dc2.DataType = typeof(Int64); dt.Columns.Add(dc2); DataColumn dc3 = new DataColumn(); dc3.ColumnName = "FilePath"; dc3.DataType = typeof(string); dt.Columns.Add(dc3); string Filelist = ""; TableContinuationToken token = null; do { TableQuerySegment <DynamicTableEntity> resultSegment = table.ExecuteQuerySegmentedAsync(query, token).Result; token = resultSegment.ContinuationToken; //load into data table foreach (var entity in resultSegment.Results) { DataRow dr = dt.NewRow(); dr["PartitionKey"] = entity.PartitionKey; dr["RowKey"] = entity.RowKey; dr["SystemId"] = _SourceSystemId; dr["FilePath"] = entity.Properties["FilePath"].StringValue; dt.Rows.Add(dr); Filelist += entity.Properties["FilePath"].StringValue + System.Environment.NewLine; } } while (token != null); if (dt.Rows.Count > 0) { Table t = new Table(); t.Schema = "dbo"; string TableGuid = Guid.NewGuid().ToString(); t.Name = $"#AzureStorageListing{TableGuid}"; TMD.BulkInsert(dt, t, true, _con); Dictionary <string, string> SqlParams = new Dictionary <string, string> { { "TempTable", t.QuotedSchemaAndName() }, { "SourceSystemId", _SourceSystemId.ToString() } }; string MergeSQL = GenerateSQLStatementTemplates.GetSQL(Shared.GlobalConfigs.GetStringConfig("SQLTemplateLocation"), "MergeIntoAzureStorageListing", SqlParams); _con.ExecuteWithRetry(MergeSQL, 120); if ((JArray)taskInformation["Alerts"] != null) { foreach (JObject Alert in (JArray)taskInformation["Alerts"]) { //Only Send out for Operator Level Alerts if (Alert["AlertCategory"].ToString() == "Task Specific Operator Alert") { AlertOperator(_SourceSystemId, Alert["AlertEmail"].ToString(), "", Filelist); } } } } _con.Close(); _con.Dispose(); TMD.LogTaskInstanceCompletion(System.Convert.ToInt64(_TaskInstanceId), System.Guid.Parse(_ExecutionUid), TaskMetaData.BaseTasks.TaskStatus.Complete, Guid.Empty, ""); } } catch (Exception e) { logging.LogErrors(e); TaskMetaDataDatabase TMD = new TaskMetaDataDatabase(); TMD.LogTaskInstanceCompletion(System.Convert.ToInt64(_TaskInstanceId), System.Guid.Parse(_ExecutionUid), TaskMetaData.BaseTasks.TaskStatus.FailedRetry, Guid.Empty, "Failed when trying to Generate Sas URI and Send Email"); JObject Root = new JObject { ["Result"] = "Failed" }; return(Root); } return(new { }); }
public static async Task <bool> MakeQueueAsync(ILogger logger) { Settings settings = Settings.Load(); CloudStorageAccount account = CloudStorageAccount.Parse(settings.AzureWebJobsStorage); CloudTableClient tableClient = account.CreateCloudTableClient(); CloudQueueClient queueClient = account.CreateCloudQueueClient(); CloudQueue jobQueue = queueClient.GetQueueReference(settings.JobQueueName); _ = await jobQueue.CreateIfNotExistsAsync(); CloudTable jobTable = tableClient.GetTableReference(settings.JobTableName); CloudTable jobHistoryTable = tableClient.GetTableReference(settings.JobHistoryTableName); _ = await jobHistoryTable.CreateIfNotExistsAsync(); JobEntity plan = null; { TableQuery <JobEntity> query = new TableQuery <JobEntity>().Take(8); TableQuerySegment <JobEntity> segments = await jobTable.ExecuteQuerySegmentedAsync(query, null); var jobEntities = segments.Results; var ordered = jobEntities.OrderByDescending(i => i.CreateAt); if (ordered.Count() == 0) { return(true); } plan = ordered.First(); } int step = int.Parse(settings.Step); if (step <= 0) { step = DEFAULT_STEP; } { string targetPath = settings.TargetPath; Uri uri = new Uri(targetPath); CloudBlobContainer blobContainer = new CloudBlobContainer(uri, account.Credentials); int section = plan.Current / step; string path = $"{plan.Version}/{plan.PartitionKey}/{section * step}"; long fileCount = await StorageHelper.GetBlobFileCountAsync(blobContainer, path, int.Parse(settings.MaxResult)); if (fileCount <= int.Parse(settings.StepThreshold) && fileCount > 0) { logger.LogInformation($"file count {fileCount} less than step threshold"); return(true); } } if (plan.Current >= plan.Amount) { TableOperation delete = TableOperation.Delete(plan); _ = await jobTable.ExecuteAsync(delete); plan.UpdateAt = DateTime.Now; TableOperation insertOrMerge = TableOperation.InsertOrMerge(plan); _ = await jobHistoryTable.ExecuteAsync(insertOrMerge); return(true); } else { int start = plan.Current; int current = plan.Current + step; int count = step; if (current > plan.Amount) { count = plan.Amount - plan.Current; plan.Current = plan.Amount; } else { plan.Current += step; } plan.UpdateAt = DateTime.Now; TableOperation insertOrMerge = TableOperation.InsertOrMerge(plan); _ = await jobTable.ExecuteAsync(insertOrMerge); MakeQueueMessageAsync(jobQueue, plan, start, count, settings.SourcePath, settings.TargetPath); } return(true); }
public async Task RemoveFromAllRolesAsync(T user) { bool error = false; List <TableUserRole> roles = new List <TableUserRole>(); string partitionKeyQuery = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, user.Id); TableQuery <TableUserRole> query = new TableQuery <TableUserRole>().Where(partitionKeyQuery); TableQuerySegment <TableUserRole> querySegment = null; while (querySegment == null || querySegment.ContinuationToken != null) { querySegment = await _rolesTable.ExecuteQuerySegmentedAsync(query, querySegment != null?querySegment.ContinuationToken : null); roles.AddRange(querySegment.Results); } TableBatchOperation batch = new TableBatchOperation(); foreach (TableUserRole role in roles) { role.ETag = "*"; //Delete even if it has changed batch.Add(TableOperation.Delete(role)); if (batch.Count >= 100) { try { //Try executing as a batch await _rolesTable.ExecuteBatchAsync(batch); batch.Clear(); } catch { } //If a batch wont work, try individually foreach (TableOperation op in batch) { try { await _rolesTable.ExecuteAsync(op); } catch { error = true; } } batch.Clear(); } } if (batch.Count > 0) { try { //Try executing as a batch await _rolesTable.ExecuteBatchAsync(batch); batch.Clear(); } catch { } //If a batch wont work, try individually foreach (TableOperation op in batch) { try { await _rolesTable.ExecuteAsync(op); } catch { error = true; } } } if (error) { throw new Exception(); } }
public async Task RemoveAllLoginsAsync(T user) { bool error = false; List <TableUserLogin> Logins = new List <TableUserLogin>(); string partitionKeyQuery = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, user.Id); TableQuery <TableUserLogin> query = new TableQuery <TableUserLogin>().Where(partitionKeyQuery); TableQuerySegment <TableUserLogin> querySegment = null; while (querySegment == null || querySegment.ContinuationToken != null) { querySegment = await _loginTable.ExecuteQuerySegmentedAsync(query, querySegment != null?querySegment.ContinuationToken : null); Logins.AddRange(querySegment.Results); } TableBatchOperation batch = new TableBatchOperation(); TableBatchOperation batchIndex = new TableBatchOperation(); foreach (TableUserLogin login in Logins) { login.ETag = "*"; //Delete even if it has changed batch.Add(TableOperation.Delete(login)); TableUserLoginProviderKeyIndex providerKeyIndex = new TableUserLoginProviderKeyIndex(user.Id, login.ProviderKey, login.LoginProvider); providerKeyIndex.ETag = "*"; batchIndex.Add(TableOperation.Delete(providerKeyIndex)); if (batch.Count >= 100 || batchIndex.Count >= 100) { try { //Try executing as a batch await _loginTable.ExecuteBatchAsync(batch); batch.Clear(); } catch { } //If a batch wont work, try individually foreach (TableOperation op in batch) { try { await _loginTable.ExecuteAsync(op); } catch { error = true; } } //Delete the index individually becase of the partition keys foreach (TableOperation op in batchIndex) { try { await _loginProviderKeyIndexTable.ExecuteAsync(op); } catch { error = true; } } batch.Clear(); batchIndex.Clear(); } } if (batch.Count > 0 || batchIndex.Count > 0) { try { //Try executing as a batch await _loginTable.ExecuteBatchAsync(batch); batch.Clear(); } // ReSharper disable once EmptyGeneralCatchClause catch { } //If a batch wont work, try individually foreach (TableOperation op in batch) { try { await _loginTable.ExecuteAsync(op); } catch { error = true; } } //Delete the index individually becase of the partition keys foreach (TableOperation op in batchIndex) { try { await _loginProviderKeyIndexTable.ExecuteAsync(op); } catch { error = true; } } } if (error) { throw new Exception(); } }
public AzureUtils GetSearchResult(string rowPropertyName, string expectedPropertyValue) { tableSegment = cloudTable.ExecuteQuerySegmentedAsync( new TableQuery().Where(TableQuery.GenerateFilterCondition(rowPropertyName, QueryComparisons.Equal, expectedPropertyValue)), null).Result; return(this); }
private List <CsvTableRecord> FormatRecordResults(CloudTable cloudTable, TableQuerySegment tableSegment)