/// <summary> /// Initiates a parallel copy operation for all of the included requests. /// </summary> /// <param name="client">The S3 client to use</param> /// <param name="requests">The copy requests to process</param> /// <param name="partSize">The size of the part to use for a multipart copy.</param> /// <param name="preferMultipart">If set to true, the method will use a multipart copy as long as the part size is less than the object size for any object, even /// those under 5 GiB.</param> /// <returns>The copy object responses.</returns> public static async Task <BulkCopyResponse> BulkCopyAsync(this IAmazonS3 client, IEnumerable <CopyObjectRequest> requests, long partSize, bool preferMultipart = false) { BulkCopyRequest request = new BulkCopyRequest(requests) { PartSize = partSize, PreferMultipart = preferMultipart }; return(await BulkCopyAsync(client, request)); }
internal DataSource(BulkCopyRequest biRequest, Position position, Dictionary <string, object> param) { _request = biRequest; _param = param; _sessionData = new BulkCopySession { ResumeOnError = biRequest.ResumeOnError, ConnectionString = position == Position.Source ? biRequest.LocalConnectionString : biRequest.RemoteConnectionString }; _biProvider = new BulkCopyProvider(_sessionData); }
public void Options_ShouldNotBeNull_UnderAllConditions() { // Arrange var input = new BulkCopyRequest(); // Act BulkCopyCommandOptionsBuilder output = new BulkCopyCommandOptionsBuilder(input); // Assert output.ShouldNotBeNull(); input.ShouldNotBeNull(); input.Options.ShouldNotBeNull(); // Print WriteLine(output); }
public void KeepIdentity_ShouldSetTheFlag_WhenFlagIsTrue() { // Arrange var input = new BulkCopyRequest(); // Act BulkCopyCommandOptionsBuilder output = new BulkCopyCommandOptionsBuilder(input).KeepIdentity(); // Assert output.ShouldNotBeNull(); input.ShouldNotBeNull(); input.Options.ShouldNotBeNull(); input.Options.KeepIdentity.ShouldNotBeNull(); input.Options.KeepIdentity.ShouldBe(true); // Print WriteLine(output); }
public void CheckConstraints_ShouldDisableTheFlag_WhenFlagIsFalse() { // Arrange var input = new BulkCopyRequest(); // Act BulkCopyCommandOptionsBuilder output = new BulkCopyCommandOptionsBuilder(input).CheckConstraints(false); // Assert output.ShouldNotBeNull(); input.ShouldNotBeNull(); input.Options.ShouldNotBeNull(); input.Options.CheckConstraints.ShouldNotBeNull(); input.Options.CheckConstraints.ShouldBe(false); // Print WriteLine(output); }
public void AllowEncryptedValueModifications_ShouldSetTheFlag_WhenFlagIsTrue() { // Arrange var input = new BulkCopyRequest(); // Act BulkCopyCommandOptionsBuilder output = new BulkCopyCommandOptionsBuilder(input).AllowEncryptedValueModifications(); // Assert output.ShouldNotBeNull(); input.ShouldNotBeNull(); input.Options.ShouldNotBeNull(); input.Options.AllowEncryptedValueModifications.ShouldNotBeNull(); input.Options.AllowEncryptedValueModifications.ShouldBe(true); // Print WriteLine(output); }
public void UseInternalTransaction_ShouldSetTheFlag_WhenFlagIsTrue() { // Arrange var input = new BulkCopyRequest(); // Act BulkCopyCommandOptionsBuilder output = new BulkCopyCommandOptionsBuilder(input).UseInternalTransaction(); // Assert output.ShouldNotBeNull(); input.ShouldNotBeNull(); input.Options.ShouldNotBeNull(); input.Options.UseInternalTransaction.ShouldNotBeNull(); input.Options.UseInternalTransaction.ShouldBe(true); // Print WriteLine(output); }
/// <summary> /// Performs a number of async copy object operations in parallel, all public methods should /// call this. /// </summary> /// <param name="client"></param> /// <param name="request"></param> /// <returns></returns> private static async Task <BulkCopyResponse> CoreBulkCopyAsync(this IAmazonS3 client, BulkCopyRequest request, bool deleteSource) { ParameterTests.NonNull(request, "request"); ParameterTests.OutOfRange(request.PartSize >= Constants.MINIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size must be at least {Constants.MINIMUM_MULTIPART_PART_SIZE} bytes."); ParameterTests.OutOfRange(request.PartSize <= Constants.MAXIMUM_MULTIPART_PART_SIZE, "partSize", $"The part size cannot exceed {Constants.MAXIMUM_MULTIPART_PART_SIZE} bytes."); // Make sure there are not requests that have the same source and destination IEnumerable <CopyObjectRequest> errors = request.Requests .Where(x => x.SourceKey == x.DestinationKey && x.SourceBucket != null && x.SourceBucket.Equals(x.DestinationBucket, StringComparison.OrdinalIgnoreCase)); if (errors.Any()) { throw new SourceDestinationSameException($"The Bulk copy/move operation contained requests that had the same source and destination and could cause the accidential loss of data.", errors); } List <CopyObjectRequestResponse> responses = new List <CopyObjectRequestResponse>(); List <FailedCopyRequest> failures = new List <FailedCopyRequest>(); // Don't copy objects that have the same source and destination // object keys are case sensitive, but bucket names are not, they // are all supposed to be lower case IEnumerable <CopyObjectRequest> filtered = request.Requests.Where(x => !(x.SourceKey == x.DestinationKey && x.SourceBucket != null && x.SourceBucket.Equals(x.DestinationBucket, StringComparison.OrdinalIgnoreCase))); int counter = 0; //IEnumerable<> foreach (List <CopyObjectRequest> chunk in filtered.Chunk(request.MaxConcurrency)) { Debug.WriteLine($"Processing request chunk {++counter}."); List <Task <CopyObjectRequestResponse> > insideLoop = new List <Task <CopyObjectRequestResponse> >(); foreach (CopyObjectRequest req in chunk) { try { if (request.PreferMultipart) { insideLoop.Add(client.CopyOrMoveObjectAsync(req, request.PartSize, deleteSource, preferMultipartLogic)); } else { insideLoop.Add(client.CopyOrMoveObjectAsync(req, request.PartSize, deleteSource, standardMultipartLogic)); } } catch (Exception e) { failures.Add(new FailedCopyRequest(req, e, FailureMode.COPY)); } } try { IEnumerable <CopyObjectRequestResponse> responseChunk = await Task.WhenAll(insideLoop); responses.AddRange(responseChunk); } catch (Exception e) { failures.Add(new FailedCopyRequest(null, e, FailureMode.COPY)); } } try { var dict = responses.ToDictionary(x => x.Request, x => x.Response); return(new BulkCopyResponse(dict, failures)); } catch (Exception e) { return(null); } }
/// <summary> /// Initiates a parallel copy operation for all of the included requests. Uses a /// 5 MiB part size for multi-part uploads. /// </summary> /// <param name="client">The S3 client to use</param> /// <param name="request">The Bulk copy request to process</param> /// <returns>The copy object responses.</returns> public static async Task <BulkCopyResponse> BulkCopyAsync(this IAmazonS3 client, BulkCopyRequest request) { return(await CoreBulkCopyAsync(client, request, false)); }
public OracleBulkCopyPreProcessor(BulkCopyRequest bulkCopyRequest) { _bulkCopyRequest = bulkCopyRequest; }
public override async Task <BulkCopyResult> BulkCopyAsync(BulkCopyRequest request, CancellationToken cancellationToken) { return(await new SqlServerBulkCopyCommand(ConnectionProvider).ExecuteAsync(request, cancellationToken)); }
public override BulkCopyResult BulkCopy(BulkCopyRequest request) { return(new SqlServerBulkCopyCommand(ConnectionProvider).Execute(request)); }
public BulkCopyResponse BulkCopy(BulkCopyRequest request) { var sourceParam = new Dictionary <string, object>(); var destParam = new Dictionary <string, object>(); try { if (request.BatchSize <= 0) { request.BatchSize = 100; } _eventArgs = new BulkCopyProgressEventArgs { TotalWork = 0, CompletedWork = 0, Result = string.Empty }; sourceParam.Add("ScriptType", new[] { ScriptType.Select }); sourceParam.Add("SkipTables", request.Skip); _sourceDb = new DataSource(request, Position.Source, sourceParam); destParam.Add("ScriptType", new[] { ScriptType.Recovery, ScriptType.Truncate, ScriptType.DisableConstraint }); destParam.Add("SkipTables", request.Skip); _destinationDb = new DataSource(request, Position.Destination, destParam); ReportStatus("Initializing process..."); if (request.CreateObjects) { Parallel.Invoke( () => _sourceDb.Initialize() , () => _sourceDb.BuildObjects() ); _destinationDb.Initialize(); ReportStatus("Executing schema.sql"); _destinationDb.ExecScript(Runtime.Path + "schema.sql"); ReportStatus("Executing tables.sql"); _destinationDb.ExecScript(Runtime.Path + "tables.sql"); _destinationDb.SetSourceScript(_sourceDb.GetSourceScript()); } else { Parallel.Invoke( () => _sourceDb.Initialize() , () => _destinationDb.Initialize() ); } if (request.CreateObjects) { } ReportStatus("Analyzing data..."); var ds = _sourceDb.GetRowCount(); var query = ds.Tables[0].AsEnumerable(); var tables = query.Select(dr => dr.Field <string>("tablename")).ToArray(); _eventArgs.TotalWork = (uint)tables.Length; var currentIndex = 1; var skip = _sourceDb.GetSkipList(); if (!request.ResumeOnError && skip.Count > 0) { foreach (var kvp in skip) { ReportStatus($"Error occured while processing table [{kvp.Key}] --{kvp.Value} ..."); } return(null); } foreach (var table in tables) { long rowsToBeStaged; try { rowsToBeStaged = query.Where(dr => dr.Field <string>("tablename").ToLower() == table.ToLower()) .Select(dr => dr.Field <long>("rowcount")) .First(); } catch { rowsToBeStaged = 0; } ReportStatus("Sending: [" + table + "]..."); if (rowsToBeStaged == 0 || skip.ContainsKey(table) || (request.Skip != null && request.Skip.Contains(table))) { if (skip.ContainsKey(table) || (request.Skip != null && request.Skip.Contains(table))) { ReportStatus("Skipping table [" + table + "] ..."); } else { ReportStatus($"Transfering: [{table}] rows [{0}/{0}]..."); } _eventArgs.CompletedWork += 1; continue; } if (rowsToBeStaged > 0) { currentIndex = (int)Math.Floor(rowsToBeStaged / (decimal)request.BatchSize); currentIndex = currentIndex > 0 ? currentIndex : 1; if (rowsToBeStaged != currentIndex * request.BatchSize && rowsToBeStaged > request.BatchSize) { currentIndex++; } } for (var i = 1; i <= currentIndex; i++) { try { var biLocalProvider = _sourceDb.GetProvider(table, i); var biRemoteProvider = _destinationDb.GetProvider(table, i); var agent = new BulkCopyAgent(); agent.ChangesSelected += agent_ChangesSelected; agent.ChangesApplied += agent_ChangesApplied; agent.LocalProvider = biLocalProvider; agent.RemoteProvider = biRemoteProvider; var writerows = agent.WriteToServer(); _changeTotal += Convert.ToUInt32(writerows); _changeFailed += 0; _changeApplied += Convert.ToUInt32(writerows); } catch (Exception ex) { ReportStatus( $"Failed Transfering: [{table}] rows [{(rowsToBeStaged < i*request.BatchSize ? rowsToBeStaged : i*request.BatchSize)}/{rowsToBeStaged}]...\nError:{ex}"); if (request.ResumeOnError) { break; } { throw; } } ReportStatus( $"Transfering: [{table}] rows [{(rowsToBeStaged < i*request.BatchSize ? rowsToBeStaged : i*request.BatchSize)}/{rowsToBeStaged}]..."); } _eventArgs.CompletedWork += 1; } _eventArgs.Result = ""; RaiseSessionProgress(_eventArgs); var status = new Dictionary <string, Dictionary <string, string> > { { "Skipped", skip } }; _response = new BulkCopyResponse(_startTime, DateTime.Now, _changeTotal, _changeApplied, _changeFailed, status); } catch (Exception ex) { WriteLine(ex.ToString()); throw; } finally { _sourceDb.Finalize(null); destParam.Add("EnableConstraintCheck", true); if (request.CreateObjects) { ReportStatus("Executing indexes.sql"); _destinationDb.ExecScript(Runtime.Path + "indexes.sql"); ReportStatus("Executing views.sql"); _destinationDb.ExecScript(Runtime.Path + "views.sql"); ReportStatus("Executing procs.sql"); _destinationDb.ExecScript(Runtime.Path + "procs.sql"); ReportStatus("Executing fks.sql"); _destinationDb.ExecScript(Runtime.Path + "fks.sql"); } _destinationDb.Finalize(destParam); } return(_response); }
/// <summary> /// Inserts a set of records from a <see cref="DataTable"/> into a database table in a single transaction /// </summary> /// <param name="request">The data needed to execute the bulk copy command</param> /// <param name="cancellationToken">The CancellationToken from the caller</param> /// <returns>The result of the command</returns> public abstract Task <BulkCopyResult> BulkCopyAsync(BulkCopyRequest request, CancellationToken cancellationToken);
/// <summary> /// Inserts a set of records from a <see cref="DataTable"/> into a database table in a single transaction /// </summary> /// <param name="request">The data needed to execute the bulk copy command</param> /// <returns>The result of the command</returns> public abstract BulkCopyResult BulkCopy(BulkCopyRequest request);