private static async Task <bool> ImportTableDelta(string SqlInstance, string TableName) { Stopwatch sw = Stopwatch.StartNew(); using (SqlConnection repoConnection = SqlWatchRepo.Connection()) { await repoConnection.OpenAsync(); string snapshotTime = "1970-01-01"; using (SqlCommand cmdGetLastSnapshotTime = new SqlCommand($"select [snapshot_time]=isnull(convert(varchar(23),max([snapshot_time]),121),'{ snapshotTime }') from { TableName } (nolock)", repoConnection)) { Stopwatch ls = Stopwatch.StartNew(); snapshotTime = (await cmdGetLastSnapshotTime.ExecuteScalarAsync()).ToString(); t3 += ls.Elapsed.TotalMilliseconds; } //Logger tables must have a valid header record. Despite the dependency order, it may happen that we will try to import new logger records after we have alrady imported header. //This could happen if the import is running for a long time and enough time has passed between header and the logger records that new data was inserted at source. //We have to make sure we're only importing logger records up to the max snapshot time in the repository. For this to happen, we need to know the snapshot_type_id for each table. using (SqlConnection remoteConnection = SqlWatchRemote.Connection(SqlInstance)) { await remoteConnection.OpenAsync(); using (SqlCommand remoteCommand = new SqlCommand($"select * " + $"from { TableName } " + $"where [snapshot_time] > '{ snapshotTime }' " + $"and [snapshot_time] <= '{ SqlWatchRepo.LastHeaderSnapshotDate(SqlInstance, SqlWatchRepo.TableSnapshotType(TableName)) }'", remoteConnection)) { remoteCommand.CommandTimeout = Config.DataCopyExecTimeout; Stopwatch bk2 = Stopwatch.StartNew(); using (SqlDataReader reader = await remoteCommand.ExecuteReaderAsync()) { using (SqlBulkCopy sqlBulkCopy = new SqlBulkCopy(repoConnection, SqlBulkCopyOptions.KeepIdentity, null)) { sqlBulkCopy.DestinationTableName = TableName; sqlBulkCopy.BulkCopyTimeout = Config.DataCopyExecTimeout; if (Config.StreamData) { sqlBulkCopy.EnableStreaming = true; } int rowCount = reader.Cast <object>().Count(); Logger.LogVerbose("Writing remote data from [" + SqlInstance + "]." + TableName); try { await sqlBulkCopy.WriteToServerAsync(reader); t4 += bk2.Elapsed.TotalMilliseconds; if (rowCount == 1) { Logger.LogVerbose($"Bulk copied { rowCount.ToString() } row from [{ SqlInstance }].{ TableName } in { sw.Elapsed.TotalMilliseconds }ms"); } else { Logger.LogVerbose($"Bulk copied { rowCount.ToString() } rows from [{ SqlInstance }].{ TableName } in { sw.Elapsed.TotalMilliseconds }ms"); } Logger.LogSuccess($"Imported { TableName } from { SqlInstance } in { sw.Elapsed.TotalMilliseconds }ms"); } catch (Exception e) { Logger.LogError("Failed to Bulk Copy data from [" + SqlInstance + "]." + TableName); Logger.LogError(e.ToString()); } } } } } } return(true); }
static void Main(string[] args) { #region prerun config Stopwatch sw = Stopwatch.StartNew(); if (Config.MinThreads != 0) { ThreadPool.SetMinThreads(Config.MinThreads, Config.MinThreads); } if (Config.MaxThreads != 0) { ThreadPool.SetMaxThreads(Config.MaxThreads, Config.MaxThreads); } Trace.Listeners.Clear(); Tools.RotateLogFile(Config.LogFile); TextWriterTraceListener twtl = new TextWriterTraceListener(Config.LogFile); twtl.Name = "TextLogger"; twtl.TraceOutputOptions = TraceOptions.ThreadId | TraceOptions.DateTime; if (Config.PrintToConsole == true) { ConsoleTraceListener ctl = new ConsoleTraceListener(false); ctl.TraceOutputOptions = TraceOptions.DateTime; Trace.Listeners.Add(ctl); } if (Config.PrintToLogFile == true) { Trace.Listeners.Add(twtl); } Trace.AutoFlush = true; var version = Assembly.GetExecutingAssembly().GetName().Version; var buildDate = new DateTime(2000, 1, 1).AddDays(version.Build).AddSeconds(version.Revision * 2); var displayableVersion = $"{version} ({buildDate})"; #endregion if (args.Length == 0) { #region importdata string message = $"{ Config.ApplicationTitle } "; message += $"\r\n{ Config.tsplaceholder } { Config.ApplicationDescription }"; message += $"\r\n{ Config.tsplaceholder } { Config.ApplicationCopyright }"; message += $"\r\n{ Config.tsplaceholder } Version: { displayableVersion }"; Logger.LogMessage(message); //print all config: message = "Application Configuration"; foreach (string s in ConfigurationManager.AppSettings.AllKeys) { if (s.Contains("Secret") && ConfigurationManager.AppSettings[s] != "") { message += $"\r\n{ Config.tsplaceholder } { s } : ***** "; } else { message += $"\r\n{ Config.tsplaceholder } { s } : { ConfigurationManager.AppSettings[s] } "; } } Logger.LogVerbose(message); bool isOnlineResult = false; Task tIsOnline = Task.Run(async() => { isOnlineResult = await SqlWatchRepo.IsOnline(); }); Task.WaitAll(tIsOnline); Stopwatch sdt = Stopwatch.StartNew(); Task tRemoteTables = Task.Run(() => { SqlWatchRepo.GetRemoteTables(); }); Task tSnapshotTypes = Task.Run(async() => { await SqlWatchRepo.GetTableSnapshoTypes(); }); // populate Servers DataTable, SqlAdapters Task tRemoteServers = Task.Run(() => { SqlWatchRepo.GetRemoteServers(); }); // wait until we have got all the reference data: Task.WaitAll(tRemoteServers, tRemoteTables, tSnapshotTypes); double t0 = sdt.Elapsed.TotalMilliseconds; List <Task> tasks = new List <Task>(); // import remote serveres: SqlWatchRepo.RemoteServers().ForEach(delegate(String SqlInstance) { Task task = Task.Run(async() => { await SqlWatchRemote.Import(SqlInstance); }); tasks.Add(task); }); Task.WaitAll(tasks.ToArray()); Logger.LogMessage("Import completed in " + sw.Elapsed.TotalMilliseconds + "ms"); Logger.LogVerbose($"Total time spent on populating reference DataTables: { t0 }ms"); Logger.LogVerbose($"Total time spent on Bulk Copy Full load: { SqlWatchRemote.t1 }ms"); Logger.LogVerbose($"Total time spent on Merge: { SqlWatchRemote.t2 }ms"); Logger.LogVerbose($"Total time spent on Bulk Copy Delta load: { SqlWatchRemote.t4 }ms"); Logger.LogVerbose($"Total time spent on querying Central Repo for the last snapshot: { SqlWatchRemote.t3 }ms"); Logger.LogVerbose($"Total time spent on eveyrthing else: { (t0+SqlWatchRemote.t1+SqlWatchRemote.t2+SqlWatchRemote.t4+SqlWatchRemote.t3)-sw.Elapsed.TotalMilliseconds }ms "); if (Config.hasErrors == true) { Environment.ExitCode = 1; } else { Environment.ExitCode = 0; } #endregion } else { #region interactive Parser.Default.ParseArguments <Options>(args) .WithParsed <Options>(options => { if (options.Add) { SqlWatchRemote.Add( options.RemoteSqlWatchInstance, options.RemoteSqlWatchDatabase, options.RemoteHostname, options.RemoteSqlPort, options.RemoteSqlUser, options.RemoteSqlPassword ); } if (options.Update) { SqlWatchRemote.Update( options.RemoteSqlWatchInstance, options.RemoteSqlUser, options.RemoteSqlPassword ); } }); #endregion } }
private static async Task <bool> ImportTableFull(string SqlInstance, string TableName) { Stopwatch tt = Stopwatch.StartNew(); using (SqlConnection repoConnection = SqlWatchRepo.Connection()) { repoConnection.Open(); string PkColumns = SqlWatchRepo.TablePrimaryKey(TableName); string sql = $"select top 0 * into [#{ TableName }] from { TableName };"; if (PkColumns != "") { sql += $"alter table [#{ TableName }] add primary key ({ PkColumns }); "; } using (SqlCommand repoCommand = new SqlCommand(sql, repoConnection)) { Logger.LogVerbose($"Preparing landing table for [{ SqlInstance }].{ TableName }"); try { await repoCommand.ExecuteNonQueryAsync(); } catch (SqlException e) { Logger.LogError($"Failed to prepare table for [{ SqlInstance }].{ TableName}", e.Errors[0].Message); return(false); } } using (SqlConnection remoteConnection = SqlWatchRemote.Connection(SqlInstance)) { remoteConnection.Open(); //Logger tables must have a valid header record. Despite the dependency order, it may happen that we will try to import new logger records after we have alrady imported header. //This could happen if the import is running for a long time and enough time has passed between header and the logger records that new data was inserted at source. //We have to make sure we're only importing logger records up to the max snapshot time in the repository. For this to happen, we need to know the snapshot_type_id for each table. string lastSeenInRepo = ""; if (Config.respectDateLastSeen == true) { if (SqlWatchRepo.Table.HasColumnLastSeen(TableName) == true) { sql = $"select isnull(max(date_last_seen),'1970-01-01') from { TableName }"; using (SqlCommand cmdGetRepoLastSeen = new SqlCommand(sql, repoConnection)) { lastSeenInRepo = (await cmdGetRepoLastSeen.ExecuteScalarAsync()).ToString(); } } } if (TableName.Contains("sqlwatch_logger")) { sql = $"select * from { TableName } where snapshot_time <= '{ SqlWatchRepo.LastHeaderSnapshotDate(SqlInstance, SqlWatchRepo.TableSnapshotType(TableName)) }'"; if (lastSeenInRepo != "") { sql += $" and date_last_seen > '{ lastSeenInRepo }'"; } } else { sql = $"select * from { TableName }"; if (lastSeenInRepo != "") { sql += $" where date_last_seen > '{ lastSeenInRepo }'"; } } using (SqlCommand cmdGetData = new SqlCommand(sql, remoteConnection)) { cmdGetData.CommandTimeout = Config.DataCopyExecTimeout; //import data into #t table try { Stopwatch bk1 = Stopwatch.StartNew(); using (SqlDataReader reader = await cmdGetData.ExecuteReaderAsync()) { Logger.LogVerbose($"Preparing to Bulk Copy remote data from [{ SqlInstance }].{ TableName } to landing table #{ TableName }"); using (SqlBulkCopy sqlBulkCopy = new SqlBulkCopy(repoConnection, SqlBulkCopyOptions.KeepIdentity, null)) { sqlBulkCopy.DestinationTableName = $"[#{ TableName }]"; sqlBulkCopy.BulkCopyTimeout = Config.DataCopyExecTimeout; if (Config.StreamData) { sqlBulkCopy.EnableStreaming = true; } try { Logger.LogVerbose($"Copying remote data from [{ SqlInstance }].{ TableName } to landing table #{ TableName } using BulkCopy."); await sqlBulkCopy.WriteToServerAsync(reader); t1 += bk1.Elapsed.TotalMilliseconds; Logger.LogVerbose($"Bulk Copied remote data from [{ SqlInstance }].{ TableName } to landing table #{ TableName } in { bk1.Elapsed.TotalMilliseconds }ms. Awaiting Merge."); } catch (SqlException e) { Logger.LogError($"Failed to Bulk Copy data from [{ SqlInstance }].{ TableName }", e.Errors[0].Message); return(false); } } } } catch (SqlException e) { Logger.LogError($"Failed to populate DataReader with remote Data from [{ SqlInstance }].{ TableName }", e.Errors[0].Message, sql); return(false); } } sql = ""; if (SqlWatchRepo.TableHasIdentity(TableName) == true) { sql += $"\nset identity_insert { TableName } on;"; } string allColumns = SqlWatchRepo.TableColumns(TableName); sql += $@" ;merge { TableName } as target using [#{ TableName }] as source on ({ SqlWatchRepo.TableMergeJoins(TableName) }) when not matched then insert ({ allColumns }) values (source.{ allColumns.Replace(",", ",source.") })" ; string updateColumns = SqlWatchRepo.TableMergeUpdateColumns(TableName); if (updateColumns != "") { sql += $@" when matched then update set { updateColumns }" ; } sql += ";"; if (SqlWatchRepo.TableHasIdentity(TableName) == true) { sql += $"\nset identity_insert { TableName } off;"; } using (SqlCommand cmdMergeTable = new SqlCommand(sql, repoConnection)) { cmdMergeTable.CommandTimeout = Config.DataCopyExecTimeout; Logger.LogVerbose($"Merging [{ SqlInstance }]." + TableName); try { Stopwatch mg = Stopwatch.StartNew(); int nRows = await cmdMergeTable.ExecuteNonQueryAsync(); t2 += mg.Elapsed.TotalMilliseconds; if (nRows == 1) { Logger.LogVerbose($"Merged { nRows } row from [{ SqlInstance }].{ TableName } in { mg.Elapsed.TotalMilliseconds }ms"); } else { Logger.LogVerbose($"Merged { nRows } rows from [{ SqlInstance }].{ TableName } in { mg.Elapsed.TotalMilliseconds }ms"); } Logger.LogSuccess($"Imported { TableName } from { SqlInstance } in { tt.Elapsed.TotalMilliseconds }ms"); return(true); } catch (SqlException e) { Logger.LogError($"Failed to merge table [{ SqlInstance }].{ TableName }", e.Errors[0].Message, sql); //dump # table to physical table to help debugging sql = $"select * into [_DUMP_{ string.Format("{0:yyyyMMddHHmmssfff}", DateTime.Now) }_{ SqlInstance }.{ TableName }] from [#{ TableName }]"; using (SqlCommand cmdDumpData = new SqlCommand(sql, repoConnection)) { try { cmdDumpData.ExecuteNonQuery(); } catch (SqlException x) { Logger.LogError("Failed to dump data into a table for debugging", x.Errors[0].Message, sql); return(false); } } return(false); } } } } }