private void DownloadUntilFinished(PermissionWindowCacheDownloader downloader, IDataLoadEventListener listener, GracefulCancellationToken cancellationToken) { try { while (true) { cancellationToken.ThrowIfCancellationRequested(); var result = RetryMode ? downloader.RetryDownload(listener, cancellationToken) : downloader.Download(listener, cancellationToken); switch (result) { case RetrievalResult.NotPermitted: if (TerminateIfOutsidePermissionWindow) { listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Download not permitted at this time so exitting")); return; } listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Download not permitted at this time, sleeping for 60 seconds")); // Sleep for a while, but keep one eye open for cancellation requests const int sleepTime = 60000; const int cancellationCheckInterval = 1000; var elapsedTime = 0; while (elapsedTime < sleepTime) { Task.Delay(cancellationCheckInterval).Wait(); cancellationToken.ThrowIfCancellationRequested(); elapsedTime += cancellationCheckInterval; } break; case RetrievalResult.Complete: listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Download completed successfully.")); return; default: listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Download ended: " + result)); return; } } } catch (OperationCanceledException) { listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Warning, "Cache download cancelled: " + downloader)); } }
public void Execute(IDataLoadJob job, IEnumerable <MigrationColumnSet> toMigrate, IDataLoadInfo dataLoadInfo, GracefulCancellationToken cancellationToken) { _dataLoadInfo = dataLoadInfo; // Column set for each table we are migrating foreach (var columnsToMigrate in toMigrate) { var inserts = 0; var updates = 0; var tableLoadInfo = dataLoadInfo.CreateTableLoadInfo("", columnsToMigrate.DestinationTable.GetFullyQualifiedName(), new[] { new DataSource(columnsToMigrate.SourceTable.GetFullyQualifiedName(), DateTime.Now) }, 0); try { MigrateTable(job, columnsToMigrate, dataLoadInfo.ID, cancellationToken, ref inserts, ref updates); OnTableMigrationCompleteHandler(columnsToMigrate.DestinationTable.GetFullyQualifiedName(), inserts, updates); tableLoadInfo.Inserts = inserts; tableLoadInfo.Updates = updates; tableLoadInfo.Notes = "Part of Transaction"; } finally { tableLoadInfo.CloseAndArchive(); } cancellationToken.ThrowIfCancellationRequested(); } }
public void Execute(IEnumerable <IDataFlowPipelineEngine> engines, GracefulCancellationToken cancellationToken, IDataLoadEventListener listener) { // Execute one pass through a pipeline before moving to the next. Continue until completion. var engineList = engines.ToList(); listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Round robin executor has " + engineList.Count + " pipeline(s) to run.")); var allComplete = false; while (!allComplete) { allComplete = true; foreach (var engine in engineList) { cancellationToken.ThrowIfCancellationRequested(); // assigned to temporary variable here to make the logic a bit more explicit var hasMoreData = engine.ExecuteSinglePass(cancellationToken); allComplete = !hasMoreData && allComplete; } } listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Round robin executor is finished, all pipelines have run to completion.")); }
public override void MigrateTable(IDataLoadJob job, MigrationColumnSet columnsToMigrate, int dataLoadInfoID, GracefulCancellationToken cancellationToken, ref int inserts, ref int updates) { var server = columnsToMigrate.DestinationTable.Database.Server; //see CrossDatabaseMergeCommandTest /* ------------MIGRATE NEW RECORDS (novel by primary key)-------- * * * INSERT INTO CrossDatabaseMergeCommandTo..ToTable (Name,Age,Postcode,hic_dataLoadRunID) * SELECT * [CrossDatabaseMergeCommandFrom]..CrossDatabaseMergeCommandTo_ToTable_STAGING.Name, * [CrossDatabaseMergeCommandFrom]..CrossDatabaseMergeCommandTo_ToTable_STAGING.Age, * [CrossDatabaseMergeCommandFrom]..CrossDatabaseMergeCommandTo_ToTable_STAGING.Postcode, * 1 * FROM * [CrossDatabaseMergeCommandFrom]..CrossDatabaseMergeCommandTo_ToTable_STAGING * left join * CrossDatabaseMergeCommandTo..ToTable * on * [CrossDatabaseMergeCommandFrom]..CrossDatabaseMergeCommandTo_ToTable_STAGING.Age = CrossDatabaseMergeCommandTo..ToTable.Age * AND * [CrossDatabaseMergeCommandFrom]..CrossDatabaseMergeCommandTo_ToTable_STAGING.Name = CrossDatabaseMergeCommandTo..ToTable.Name * WHERE * CrossDatabaseMergeCommandTo..ToTable.Age is null */ StringBuilder sbInsert = new StringBuilder(); var syntax = server.GetQuerySyntaxHelper(); sbInsert.AppendLine(string.Format("INSERT INTO {0} ({1}", columnsToMigrate.DestinationTable.GetFullyQualifiedName(), string.Join(",", columnsToMigrate.FieldsToUpdate.Select(c => syntax.EnsureWrapped(c.GetRuntimeName()))))); //if we are not ignoring the trigger then we should record the data load run ID if (!job.LoadMetadata.IgnoreTrigger) { sbInsert.AppendLine($",{syntax.EnsureWrapped(SpecialFieldNames.DataLoadRunID)}"); } sbInsert.AppendLine(")"); sbInsert.AppendLine("SELECT"); // Add the columns we are migrating sbInsert.AppendLine(string.Join("," + Environment.NewLine, columnsToMigrate.FieldsToUpdate.Select(c => c.GetFullyQualifiedName()))); // If we are using trigger also add the run ID e.g. ",50" if (!job.LoadMetadata.IgnoreTrigger) { sbInsert.AppendLine("," + dataLoadInfoID.ToString()); } sbInsert.AppendLine("FROM"); sbInsert.AppendLine(columnsToMigrate.SourceTable.GetFullyQualifiedName()); sbInsert.AppendLine("LEFT JOIN"); sbInsert.AppendLine(columnsToMigrate.DestinationTable.GetFullyQualifiedName()); sbInsert.AppendLine("ON"); sbInsert.AppendLine( string.Join(" AND " + Environment.NewLine, columnsToMigrate.PrimaryKeys.Select( pk => string.Format("{0}.{1}={2}.{1}", columnsToMigrate.SourceTable.GetFullyQualifiedName(), syntax.EnsureWrapped(pk.GetRuntimeName()), columnsToMigrate.DestinationTable.GetFullyQualifiedName())))); sbInsert.AppendLine("WHERE"); sbInsert.AppendLine(string.Format("{0}.{1} IS NULL", columnsToMigrate.DestinationTable.GetFullyQualifiedName(), syntax.EnsureWrapped(columnsToMigrate.PrimaryKeys.First().GetRuntimeName()))); //right at the end of the SELECT if (columnsToMigrate.DestinationTable.Database.Server.DatabaseType == DatabaseType.MySql) { sbInsert.Append(" FOR UPDATE"); } string insertSql = sbInsert.ToString(); var cmd = server.GetCommand(insertSql, _managedConnection); cmd.CommandTimeout = Timeout; job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "INSERT query: " + Environment.NewLine + insertSql)); cancellationToken.ThrowIfCancellationRequested(); try { inserts = cmd.ExecuteNonQuery(); List <CustomLine> sqlLines = new List <CustomLine>(); var toSet = columnsToMigrate.FieldsToUpdate.Where(c => !c.IsPrimaryKey).Select(c => string.Format("t1.{0} = t2.{0}", syntax.EnsureWrapped(c.GetRuntimeName()))).ToArray(); if (!toSet.Any()) { job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Warning, "Table " + columnsToMigrate.DestinationTable + " is entirely composed of PrimaryKey columns or hic_ columns so UPDATE will NOT take place")); return; } var toDiff = columnsToMigrate.FieldsToDiff.Where(c => !c.IsPrimaryKey).ToArray(); if (!toDiff.Any()) { job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Warning, "Table " + columnsToMigrate.DestinationTable + " is entirely composed of PrimaryKey columns or hic_ columns/ other non DIFF columns that will not result in an UPDATE will NOT take place")); return; } //t1.Name = t2.Name, t1.Age=T2.Age etc sqlLines.Add(new CustomLine(string.Join(",", toSet), QueryComponent.SET)); //also update the hic_dataLoadRunID field if (!job.LoadMetadata.IgnoreTrigger) { sqlLines.Add(new CustomLine(string.Format("t1.{0}={1}", syntax.EnsureWrapped(SpecialFieldNames.DataLoadRunID) , dataLoadInfoID), QueryComponent.SET)); } //t1.Name <> t2.Name AND t1.Age <> t2.Age etc sqlLines.Add(new CustomLine(string.Join(" OR ", toDiff.Select(c => GetORLine(c, syntax))), QueryComponent.WHERE)); //the join sqlLines.AddRange(columnsToMigrate.PrimaryKeys.Select(p => new CustomLine(string.Format("t1.{0} = t2.{0}", syntax.EnsureWrapped(p.GetRuntimeName())), QueryComponent.JoinInfoJoin))); var updateHelper = columnsToMigrate.DestinationTable.Database.Server.GetQuerySyntaxHelper().UpdateHelper; var updateQuery = updateHelper.BuildUpdate( columnsToMigrate.DestinationTable, columnsToMigrate.SourceTable, sqlLines); job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Update query:" + Environment.NewLine + updateQuery)); var updateCmd = server.GetCommand(updateQuery, _managedConnection); updateCmd.CommandTimeout = Timeout; cancellationToken.ThrowIfCancellationRequested(); try { updates = updateCmd.ExecuteNonQuery(); } catch (Exception e) { job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Error, "Did not successfully perform the update queries: " + updateQuery, e)); throw new Exception("Did not successfully perform the update queries: " + updateQuery + " - " + e); } } catch (OperationCanceledException) { throw; // have to catch and rethrow this because of the catch-all below } catch (Exception e) { job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Error, "Failed to migrate " + columnsToMigrate.SourceTable + " to " + columnsToMigrate.DestinationTable, e)); throw new Exception("Failed to migrate " + columnsToMigrate.SourceTable + " to " + columnsToMigrate.DestinationTable + ": " + e); } }
public DataTable GetChunk(IDataLoadEventListener job, GracefulCancellationToken cancellationToken) { if (_reader == null) { _con = DatabaseCommandHelper.GetConnection(_builder); _con.Open(); job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Running SQL:" + Environment.NewLine + Sql)); cmd = DatabaseCommandHelper.GetCommand(Sql, _con); cmd.CommandTimeout = _timeout; CommandAdjuster?.Invoke(cmd); _reader = cmd.ExecuteReaderAsync(cancellationToken.AbortToken).Result; _numberOfColumns = _reader.FieldCount; } int readThisBatch = 0; timer.Start(); try { DataTable chunk = GetChunkSchema(_reader); while (_reader.HasRows && _reader.Read()) { cancellationToken.ThrowIfCancellationRequested(); AddRowToDataTable(chunk, _reader); readThisBatch++; //we reached batch limit if (readThisBatch == BatchSize) { return(chunk); } } //if data was read if (readThisBatch > 0) { return(chunk); } //data is exhausted //if data was exhausted on first read and we are allowing empty result sets if (firstChunk && AllowEmptyResultSets) { return(chunk);//return the empty chunk } //data exhausted return(null); } catch (Exception e) { job.OnNotify(this, new NotifyEventArgs(ProgressEventType.Error, "Source read failed", e)); throw; } finally { firstChunk = false; timer.Stop(); job.OnProgress(this, new ProgressEventArgs(_taskBeingPerformed, new ProgressMeasurement(TotalRowsRead, ProgressType.Records), timer.Elapsed)); } }