/// <summary> /// Copies change tables from the relay to the slave server /// </summary> /// <param name="tables">Array of table config objects</param> /// <param name="sourceCTDB">Source CT database</param> /// <param name="destCTDB">Dest CT database</param> /// <param name="CTID">CT batch ID this is for</param> /// <param name="existingCTTables">List of existing CT tables</param> /// <param name="isConsolidated">Whether source CT table is consolidated</param> private void CopyChangeTables(IEnumerable <TableConf> tables, string sourceCTDB, string destCTDB, Int64 CTID, List <ChangeTable> existingCTTables, bool isConsolidated = false) { if (Config.Slave == Config.RelayServer && sourceCTDB == destCTDB) { logger.Log("Skipping download because slave is equal to relay.", LogLevel.Debug); return; } var actions = new List <Action>(); foreach (TableConf t in tables) { if (Config.SlaveType == SqlFlavor.Vertica) { // NOTE: bug fix for ticket # 1699344 // Currently we are testing this fix for Vertica only ChangeTable changeTable = existingCTTables.Where(ctbl => String.Compare(ctbl.name, t.Name, StringComparison.OrdinalIgnoreCase) == 0).OrderBy(ctbl => ctbl.CTID).LastOrDefault(); if (changeTable == null) { continue; } } IDataCopy dataCopy = DataCopyFactory.GetInstance(Config.RelayType, Config.SlaveType, sourceDataUtils, destDataUtils, logger); var ct = new ChangeTable(t.Name, CTID, t.SchemaName, Config.Slave); string sourceCTTable = isConsolidated ? ct.consolidatedName : ct.ctName; string destCTTable = ct.ctName; TableConf tLocal = t; Action act = () => { try { //hard coding timeout at 1 hour for bulk copy logger.Log(new { message = "Copying table to slave", Table = tLocal.SchemaName + "." + sourceCTTable }, LogLevel.Trace); var sw = Stopwatch.StartNew(); dataCopy.CopyTable(sourceCTDB, sourceCTTable, tLocal.SchemaName, destCTDB, Config.DataCopyTimeout, destCTTable, tLocal.Name); logger.Log(new { message = "CopyTable: " + sw.Elapsed, Table = tLocal.SchemaName + "." + sourceCTTable }, LogLevel.Trace); } catch (DoesNotExistException) { //this is a totally normal and expected case since we only publish changetables when data actually changed logger.Log("No changes to pull for table " + tLocal.SchemaName + "." + sourceCTTable + " because it does not exist ", LogLevel.Debug); } catch (Exception e) { HandleException(e, tLocal); } }; actions.Add(act); } logger.Log("Parallel invocation of " + actions.Count + " changetable downloads", LogLevel.Trace); var options = new ParallelOptions(); options.MaxDegreeOfParallelism = Config.MaxThreads; Parallel.Invoke(options, actions.ToArray()); return; }
protected void PublishChangeTable(TableConf table, string sourceCTDB, string destCTDB, Int64 CTID) { IDataCopy dataCopy = DataCopyFactory.GetInstance((SqlFlavor)Config.MasterType, (SqlFlavor)Config.RelayType, sourceDataUtils, destDataUtils, logger); logger.Log("Publishing changes for table " + table.SchemaName + "." + table.Name, LogLevel.Trace); try { dataCopy.CopyTable(sourceCTDB, table.ToCTName(CTID), table.SchemaName, destCTDB, Config.DataCopyTimeout, originalTableName: table.Name); logger.Log("Publishing changes succeeded for " + table.SchemaName + "." + table.Name, LogLevel.Trace); } catch (Exception e) { if (table.StopOnError) { throw; } else { logger.Log("Copying change data for table " + table.SchemaName + "." + table.Name + " failed with error: " + e.Message, LogLevel.Error); } } }
private IEnumerable <ChangeTable> ConsolidateBatches(IList <ChangeTable> tables) { var lu = new Dictionary <string, List <ChangeTable> >(); var actions = new List <Action>(); foreach (var changeTable in tables) { if (!lu.ContainsKey(changeTable.name)) { lu[changeTable.name] = new List <ChangeTable>(); } lu[changeTable.name].Add(changeTable); } var consolidatedTables = new List <ChangeTable>(); foreach (var table in Config.Tables) { if (!lu.ContainsKey(table.Name)) { //ugly case sensitivity hack var kvp = lu.FirstOrDefault(l => String.Compare(l.Key, table.Name, StringComparison.OrdinalIgnoreCase) == 0); if (kvp.Key != null) { lu[table.Name] = lu[kvp.Key]; } else { logger.Log("No changes captured for " + table.Name, LogLevel.Info); var ct = new ChangeTable(table.Name, null, table.SchemaName, Config.Slave); string sourceConsolidatedCTTable = ct.consolidatedName; logger.Log("Dropping (if exists) consolidated CT table " + sourceConsolidatedCTTable, LogLevel.Info); sourceDataUtils.DropTableIfExists(Config.RelayDB, sourceConsolidatedCTTable, table.SchemaName); continue; } } var lastChangeTable = lu[table.Name].OrderByDescending(c => c.CTID).First(); consolidatedTables.Add(lastChangeTable); TableConf tLocal = table; IDataCopy dataCopy = DataCopyFactory.GetInstance(Config.RelayType, Config.RelayType, sourceDataUtils, sourceDataUtils, logger); Action act = () => { try { logger.Log("Copying " + lastChangeTable.ctName, LogLevel.Debug); dataCopy.CopyTable(Config.RelayDB, lastChangeTable.ctName, tLocal.SchemaName, Config.RelayDB, Config.DataCopyTimeout, lastChangeTable.consolidatedName, tLocal.Name); //skipping the first one because dataCopy.CopyTable already copied it). foreach (var changeTable in lu[lastChangeTable.name].OrderByDescending(c => c.CTID).Skip(1)) { logger.Log("Consolidating " + changeTable.ctName, LogLevel.Debug); sourceDataUtils.Consolidate(changeTable.ctName, changeTable.consolidatedName, Config.RelayDB, tLocal.SchemaName); } sourceDataUtils.RemoveDuplicatePrimaryKeyChangeRows(tLocal, lastChangeTable.consolidatedName, Config.RelayDB); } catch (Exception e) { HandleException(e, tLocal); } }; actions.Add(act); } logger.Log("Parallel invocation of " + actions.Count + " changetable consolidations", LogLevel.Trace); var options = new ParallelOptions(); options.MaxDegreeOfParallelism = Config.MaxThreads; Parallel.Invoke(options, actions.ToArray()); return(consolidatedTables); }