public async Task Execute(params string[] args) { var t = new TableTarget(args.Length > 0 ? args[0] : null); bool allowProductionDelete = args.Length > 1 && args[1].Equals("--allow-production"); if (!t.Valid) { Console.Out.WriteLine("Invalid arguments"); Console.Out.WriteLine(Usage); return; } // Ensure, that we are not accidentally deleting production tables if (!allowProductionDelete && t.Environment.Equals(EnvironmentClient.ProductionSuffix, StringComparison.OrdinalIgnoreCase)) { Console.Out.WriteLine("Cannot delete a table from production."); return; } Console.Out.WriteLine("Deleting table {0} from environment {1}.", t.TableName, t.Environment); var targetTable = new EnvironmentClient(t.Environment).GetTable(t.TableName); if (!await targetTable.ExistsAsync()) { Console.Out.WriteLine("Target table does not exist, exiting."); return; } await targetTable.DeleteAsync(); }
public async Task Execute(params string[] args) { var parsed = DeleteRowsArguments.Parse(args); if (!parsed.Valid) { Console.Out.WriteLine("Must give target table, partitionkey, and rowid(s)."); return; } Console.Out.WriteLine( "Deleting rows \r\n{0}\r\n from environment table {1}:{2}.", string.Join("\r\n", parsed.RowIds), parsed.Target.Environment, parsed.Target.TableName); var targetTable = new EnvironmentClient(parsed.Target.Environment) .GetTable(parsed.Target.TableName); if (!await targetTable.ExistsAsync()) { Console.Out.WriteLine("Target table does not exist, exiting."); return; } var deleteOperations = parsed.RowIds .Select(rowId => new DynamicTableEntity(parsed.PartitionKey, rowId, "*", new Dictionary <string, EntityProperty>())) .Select(entity => targetTable.ExecuteAsync(TableOperation.Delete(entity))).ToList(); await Task.WhenAll(deleteOperations); }
private void Initialize(ICloudManagerServiceSettings settings, HttpClient httpClient) { Customer = new CustomerClient(settings, httpClient); Service = new ServiceClient(settings, httpClient); Update = new UpdateClient(settings, httpClient); CloudBackup = new CloudBackupClient(settings, httpClient); EnvironmentProcess = new EnvironmentProcessClient(settings, httpClient); Environment = new EnvironmentClient(settings, httpClient); ServiceProvider = new ServiceProviderClient(settings, httpClient); CustomerServiceProvider = new CustomerServiceProviderClient(settings, httpClient); ContentBackup = new ContentBackupClient(settings, httpClient); Maintenance = new MaintenanceClient(settings, httpClient); }
public Task Upload(string environment) { Console.Out.WriteLine("Uploading legacy data to " + environment); var env = new EnvironmentClient(environment); Console.Out.WriteLine("Using connection string " + env.Options.ConnectionString); var eventStore = new EventStream(env.Options); var converter = new DatabaseDumpToEventsInitializer(); var appendTasks = converter.LegacyDataAsEvents() .Select(namedEventStream => eventStore.AppendEvents(namedEventStream.Name, namedEventStream.Events)) .ToList(); return(Task.WhenAll(appendTasks)); }
public async Task Execute(params string[] args) { var parsedArguments = ChangePartitionKeyArguments.Parse(args); if (!parsedArguments.Valid) { Console.WriteLine("Must give source table, row id, and both current and new partition keys."); return; } Console.Out.WriteLine("Changing partition key to {4} for row {0}:{1} {2}-{3}.", parsedArguments.Source.Environment, parsedArguments.Source.TableName, parsedArguments.Partition, parsedArguments.Id, parsedArguments.NewPartition); var sourceTable = new EnvironmentClient(parsedArguments.Source.Environment).GetTable(parsedArguments.Source.TableName); if (!await sourceTable.ExistsAsync()) { Console.Out.WriteLine("Source table does not exist, exiting."); return; } var retrieveRow = TableOperation.Retrieve(parsedArguments.Partition, parsedArguments.Id); // TODO: Could use some error handling var rowToChange = (DynamicTableEntity)(await sourceTable.ExecuteAsync(retrieveRow)).Result; await sourceTable.ExecuteAsync(TableOperation.Delete(rowToChange)).ContinueWith( (Task t) => { rowToChange.PartitionKey = parsedArguments.NewPartition; return(sourceTable.ExecuteAsync(TableOperation.Insert(rowToChange))); }); }
public Task Upload(string environment) { Console.Out.WriteLine("Uploading satkuxii data to " + environment); var env = new EnvironmentClient(environment); Console.Out.WriteLine("Using connection string " + env.Options.ConnectionString); var jsonData = GetEmbeddedResource("satkuxii.json"); var data = JsonConvert.DeserializeObject <SatkuData>(jsonData); Console.Out.WriteLine("Deserialized, checkpoint count " + data.Checkpoints.Count); var stream = new EventStream(env.Options); stream.AppendEvents( "HappeningsInventory-" + HappeningMessageHandler.RootAggregateId, new List <IEvent> { new HappeningInventoryItemCreated( data.Happening, data.IsDefault, data.Checkpoints.First().Timestamp.AddSeconds(-10)) }).Wait(); var happeningCreated = new HappeningCreated(data.Happening) { Timestamp = data.Checkpoints.First() .Timestamp.AddSeconds(-9) }; stream.AppendEvents( "Happening-" + data.Happening, new List <IEvent> { happeningCreated }).Wait(); var checkpointListEvents = new List <IEvent>(); int order = 1; foreach (var c in data.Checkpoints) { // To keep data simpler to edit by hand, set some properties automatically c.HappeningId = data.Happening; c.Order = order; order++; var validatedEvent = new CheckpointValidated( data.Happening, c.Id, c.Order, c.CheckpointType, c.Name, c.Latitude, c.Longitude, c.DistanceFromPrevious, c.DistanceFromStart) { // Use timestamp little after the happening Timestamp = c.Timestamp.AddSeconds(-1) }; checkpointListEvents.Add(validatedEvent); stream.AppendEvents("Checkpoint-" + c.Id, new List <IEvent> { c }).Wait(); } return(stream.AppendEvents("CheckpointList-" + data.Happening, checkpointListEvents)); }
public async Task Execute(params string[] args) { var stopWatch = Stopwatch.StartNew(); string target; if (!ArgsHelper.GetTargetEnvironmentFromArgs(args, out target, true)) { return; } string queryArg = args.Length >= 2 ? args[1] : null; if (string.IsNullOrWhiteSpace(queryArg)) { Console.Out.WriteLine(this.queryArgumentMissingError); return; } Console.Out.WriteLine(this.queryingInfo, queryArg, target); var targetTable = new EnvironmentClient(target).GetTable("events"); var exists = await targetTable.ExistsAsync(); if (!exists) { Console.Out.WriteLine("Event store does not exist, exiting."); return; } var query = new TableQuery <EventEntity>().Where(this.query(queryArg)); TableQuerySegment <EventEntity> querySegment = null; var foundEntities = new List <EventEntity>(); while (querySegment == null || querySegment.ContinuationToken != null) { querySegment = await targetTable.ExecuteQuerySegmentedAsync( query, querySegment != null?querySegment.ContinuationToken : null); foundEntities.AddRange(querySegment); } foreach (var e in foundEntities) { Console.Out.WriteLine( "PartitionKey: {0}\r\n" + "RowKey: {1}\r\n" + "DataType: {2}\r\n" + "EventTimeStamp: {3}\r\n" + "{4}\r\n", e.PartitionKey, e.RowKey, e.DataType, e.EventTimestamp, e.Data); } Console.Out.WriteLine("Search complete, found {0:n0} events in {1:n1} seconds.", foundEntities.Count, stopWatch.Elapsed.TotalSeconds); return; }
public async Task Execute(params string[] args) { var stopWatch = Stopwatch.StartNew(); var parsedArguments = TableCopyArguments.Parse(args); if (!parsedArguments.Valid) { Console.WriteLine("Must give source and target, both in a form environmentName:TableName."); return; } Console.Out.WriteLine("Copying {0} from {1} to {2} at {3}.", parsedArguments.Source.TableName, parsedArguments.Source.Environment, parsedArguments.Target.TableName, parsedArguments.Target.Environment); var sourceTable = new EnvironmentClient(parsedArguments.Source.Environment).GetTable(parsedArguments.Source.TableName); var targetTable = new EnvironmentClient(parsedArguments.Target.Environment).GetTable(parsedArguments.Target.TableName); if (!await sourceTable.ExistsAsync()) { Console.Out.WriteLine("Source table does not exist, exiting."); return; } await targetTable.CreateIfNotExistsAsync(); var allQuery = new TableQuery <DynamicTableEntity>(); TableQuerySegment <DynamicTableEntity> querySegment = null; var sourceEntities = new List <DynamicTableEntity>(); while (querySegment == null || querySegment.ContinuationToken != null) { querySegment = await sourceTable.ExecuteQuerySegmentedAsync( allQuery, querySegment != null?querySegment.ContinuationToken : null); sourceEntities.AddRange(querySegment); } var batches = new List <TableBatchOperation>(); var groupedByPartitionKey = sourceEntities.GroupBy( p => p.PartitionKey, e => e, (key, list) => new KeyValuePair <string, IEnumerable <DynamicTableEntity> >(key, list)); foreach (var g in groupedByPartitionKey) { const int BatchSize = 100; int i = 1; // If batch size is more than 100, chunk it as it is the max sixe for azure storage batch operation foreach (var chunk in g.Value.Chunk(BatchSize)) { Console.Out.WriteLine(g.Key + " - Batch " + i); var batch = new TableBatchOperation(); foreach (var entity in chunk) { batch.Insert(entity); } batches.Add(batch); i++; } } Console.Out.WriteLine("This might take a while..."); var waits = batches.Select(targetTable.ExecuteBatchAsync).Cast <Task>().ToArray(); Task.WaitAll(waits, TimeSpan.FromHours(1)); Console.Out.WriteLine("\r\nCopying successful, copied {0:n0} rows in {1:n1} seconds.", sourceEntities.Count, stopWatch.Elapsed.TotalSeconds); return; }