private void SyncHistoryTables(string slaveCTDB, List <ChangeTable> existingCTTables, bool isConsolidated) { var actions = new List <Action>(); foreach (var t in existingCTTables) { var s = Config.Tables.First(tc => tc.Name.Equals(t.name, StringComparison.InvariantCultureIgnoreCase)); if (!s.RecordHistoryTable) { logger.Log(new { message = "Skipping writing history table because it is not configured", Table = t.name }, LogLevel.Debug); continue; } ChangeTable tLocal = t; Action act = () => { logger.Log(new { message = "Writing history table", table = tLocal.name }, LogLevel.Debug); try { destDataUtils.CopyIntoHistoryTable(tLocal, slaveCTDB, isConsolidated); logger.Log(new { message = "Successfully wrote history", Table = tLocal.name }, LogLevel.Debug); } catch (Exception e) { HandleException(e, s); } }; actions.Add(act); } logger.Log("Parallel invocation of " + actions.Count + " history table syncs", LogLevel.Trace); var options = new ParallelOptions(); options.MaxDegreeOfParallelism = Config.MaxThreads; Parallel.Invoke(options, actions.ToArray()); }
/// <summary> /// New table expression. /// </summary> /// <example> /// Database.ChangeTable("Roles", t => { /// t.ChangeColumn("Description").Text; // Changes description field data type. /// t.RemoveColumn("Name"); // Removes name field. /// t.Integer("Priority"); // Adds priority integer field. /// }); /// </example> /// <param name="transformationProvider">The transformation provider.</param> /// <param name="tableName">Name of the table.</param> /// <param name="tableDefinition">The table change expressions.</param> public static void ChangeTable(this ITransformationProvider transformationProvider, String tableName, Action <ChangeTable> tableDefinition) { var table = new ChangeTable(tableName); tableDefinition(table); table.Migrate(transformationProvider); }
public void AlterTable(ChangeTable changeTable, string IdColumnName) { using (MySqlConnection connection = WebApiConfig.Connection()) using (MySqlCommand command = new MySqlCommand($"UPDATE `{changeTable.TableName}` SET `{changeTable.ColumnName}` = @Value WHERE {IdColumnName} = {changeTable.Id};", connection)) { connection.Open(); command.Parameters.AddWithValue("@Value", changeTable.Value); command.ExecuteNonQuery(); } }
/// <summary> /// Copies change tables from the relay to the slave server /// </summary> /// <param name="tables">Array of table config objects</param> /// <param name="sourceCTDB">Source CT database</param> /// <param name="destCTDB">Dest CT database</param> /// <param name="CTID">CT batch ID this is for</param> /// <param name="existingCTTables">List of existing CT tables</param> /// <param name="isConsolidated">Whether source CT table is consolidated</param> private void CopyChangeTables(IEnumerable <TableConf> tables, string sourceCTDB, string destCTDB, Int64 CTID, List <ChangeTable> existingCTTables, bool isConsolidated = false) { if (Config.Slave == Config.RelayServer && sourceCTDB == destCTDB) { logger.Log("Skipping download because slave is equal to relay.", LogLevel.Debug); return; } var actions = new List <Action>(); foreach (TableConf t in tables) { if (Config.SlaveType == SqlFlavor.Vertica) { // NOTE: bug fix for ticket # 1699344 // Currently we are testing this fix for Vertica only ChangeTable changeTable = existingCTTables.Where(ctbl => String.Compare(ctbl.name, t.Name, StringComparison.OrdinalIgnoreCase) == 0).OrderBy(ctbl => ctbl.CTID).LastOrDefault(); if (changeTable == null) { continue; } } IDataCopy dataCopy = DataCopyFactory.GetInstance(Config.RelayType, Config.SlaveType, sourceDataUtils, destDataUtils, logger); var ct = new ChangeTable(t.Name, CTID, t.SchemaName, Config.Slave); string sourceCTTable = isConsolidated ? ct.consolidatedName : ct.ctName; string destCTTable = ct.ctName; TableConf tLocal = t; Action act = () => { try { //hard coding timeout at 1 hour for bulk copy logger.Log(new { message = "Copying table to slave", Table = tLocal.SchemaName + "." + sourceCTTable }, LogLevel.Trace); var sw = Stopwatch.StartNew(); dataCopy.CopyTable(sourceCTDB, sourceCTTable, tLocal.SchemaName, destCTDB, Config.DataCopyTimeout, destCTTable, tLocal.Name); logger.Log(new { message = "CopyTable: " + sw.Elapsed, Table = tLocal.SchemaName + "." + sourceCTTable }, LogLevel.Trace); } catch (DoesNotExistException) { //this is a totally normal and expected case since we only publish changetables when data actually changed logger.Log("No changes to pull for table " + tLocal.SchemaName + "." + sourceCTTable + " because it does not exist ", LogLevel.Debug); } catch (Exception e) { HandleException(e, tLocal); } }; actions.Add(act); } logger.Log("Parallel invocation of " + actions.Count + " changetable downloads", LogLevel.Trace); var options = new ParallelOptions(); options.MaxDegreeOfParallelism = Config.MaxThreads; Parallel.Invoke(options, actions.ToArray()); return; }
private void SetFieldListsSlave(string dbName, IEnumerable <TableConf> tables, ChangeTrackingBatch batch, List <ChangeTable> existingCTTables) { //map each table to the last appropriate CT table, ditching tableconfs with no corresponding CT tables var tableCTName = new Dictionary <TableConf, string>(); foreach (var table in tables) { ChangeTable changeTable = existingCTTables.Where(ct => String.Compare(ct.name, table.Name, StringComparison.OrdinalIgnoreCase) == 0).OrderBy(ct => ct.CTID).LastOrDefault(); if (changeTable == null) { continue; } long lastCTIDWithChanges = changeTable.CTID.Value; tableCTName[table] = table.ToCTName(lastCTIDWithChanges); } Dictionary <TableConf, IList <TColumn> > allColumnsByTable = sourceDataUtils.GetAllFields(dbName, tableCTName); //even though GetAllFields returns whether it's part of the PK, the PK info will always say false on the relay since the relay //doesn't necessarily define primary keys Dictionary <TableConf, IList <string> > primaryKeysByTable = sourceDataUtils.GetAllPrimaryKeys(dbName, tableCTName.Keys, batch); //tableCTName.Keys instead of tables because we've already filtered this for tables that don't have change tables //note: allColumnsByTable.Keys or primaryKeysByTable.Keys should work just as well foreach (var table in tableCTName.Keys) { IEnumerable <TColumn> columns; try { //this is a hacky solution but we will have these columns in CT tables but actually are not interested in them here. columns = allColumnsByTable[table].Where(c => (c.name != "SYS_CHANGE_VERSION" && c.name != "SYS_CHANGE_OPERATION")); } catch (KeyNotFoundException) { var e = new Exception("Column list for table " + tableCTName[table] + " not found in " + dbName); HandleException(e, table); //if we handled the exception by just logging an error, this table is still broken so we need to continue continue; } IList <string> pks; try { pks = primaryKeysByTable[table]; } catch (KeyNotFoundException) { var e = new Exception("Primary keys for table " + table.FullName + " not found in " + dbName + ".dbo.tblCTTableInfo_" + batch.CTID); HandleException(e, table); //if we handled the exception by just logging an error, this table is still broken so we need to continue continue; } foreach (var pk in pks) { columns.First((c => c.name == pk)).isPk = true; } SetFieldList(table, columns); } }
/// <summary> /// For the specified list of tables, populate a list of which CT tables exist /// </summary> private List <ChangeTable> PopulateTableList(IEnumerable <TableConf> tables, string dbName, IList <ChangeTrackingBatch> batches) { var tableList = new List <ChangeTable>(); DataTable result = sourceDataUtils.GetTablesWithChanges(dbName, batches); foreach (DataRow row in result.Rows) { var changeTable = new ChangeTable(row.Field <string>("CtiTableName"), row.Field <long>("CTID"), row.Field <string>("CtiSchemaName"), Config.Slave); //only add the table if it's in our config, using case insensitive comparison if (tables.Any(t => String.Compare(t.Name, changeTable.name, StringComparison.OrdinalIgnoreCase) == 0)) { tableList.Add(changeTable); } } return(tableList); }
public void CopyIntoHistoryTable(ChangeTable t, string dbName, bool isConsolidated) { string sql; var fields = GetFieldList(dbName, t.ctName, t.schemaName); string insertColumns = "CTHistID, " + string.Join(",", fields.Select(col => col.name)); string selectColumns = "CAST(" + t.CTID + " AS BIGINT) AS CTHistID, " + string.Join(",", fields.Select(col => col.name)); if (CheckTableExists(dbName, t.historyName, t.schemaName)) { logger.Log("table " + t.historyName + " already exists; selecting into it", LogLevel.Trace); sql = string.Format("INSERT INTO {0} ({1}) SELECT {2} FROM {3}", t.historyName, insertColumns, selectColumns, t.ctName); logger.Log(sql, LogLevel.Debug); } else { logger.Log("table " + t.historyName + " does not exist, inserting into it", LogLevel.Trace); sql = string.Format("CREATE TABLE {0} AS SELECT {1} FROM {2}", t.historyName, selectColumns, t.ctName); logger.Log(sql, LogLevel.Debug); } var cmd = new OleDbCommand(sql); SqlNonQuery(dbName, cmd); }
public BaseResponse SaveChange(ChangeTable model, string userId) { var response = new BaseResponse(); model.TimeStamp = DateTime.UtcNow; model.SearchTitle = model.Title.Replace(" ", "_"); model.ViewCounter = 0; _context.Change.Add(model); var userChange = new UserChangeTable(); int userInternalId = _context.UserTable.Where(x => x.AspNetUserId == userId).Single().Id; userChange.UserId = userInternalId; try { _context.SaveChanges(); //TODO jugaad here use single save changes and make aspnetuserid as universal remove userinternalid == more optimised userChange.ChangeId = model.Id; _context.UserChangesTable.Add(userChange); _context.SaveChanges(); response.Success = true; } catch (Exception ex) { response.Success = false; response.Message = ex.Message; } return(response); }
public void CopyIntoHistoryTable(ChangeTable t, string dbName, bool isConsolidated) { string sql; if (CheckTableExists(dbName, t.historyName, t.schemaName)) { logger.Log("table " + t.historyName + " already exists; selecting into it", LogLevel.Trace); sql = string.Format("INSERT INTO {0} SELECT {1} AS CTHistID, * FROM {2}", t.historyName, t.CTID, t.ctName); logger.Log(sql, LogLevel.Debug); } else { logger.Log("table " + t.historyName + " does not exist, inserting into it", LogLevel.Trace); sql = string.Format("CREATE TABLE {0} AS SELECT {1} AS CTHistID, * FROM {2}", t.historyName, t.CTID, t.ctName); logger.Log(sql, LogLevel.Debug); } var cmd = new OleDbCommand(sql); SqlNonQuery(dbName, cmd); }
/// <summary> /// For the specified list of tables, populate a list of which CT tables exist /// </summary> private List<ChangeTable> PopulateTableList(IEnumerable<TableConf> tables, string dbName, IList<ChangeTrackingBatch> batches) { var tableList = new List<ChangeTable>(); DataTable result = sourceDataUtils.GetTablesWithChanges(dbName, batches); foreach (DataRow row in result.Rows) { var changeTable = new ChangeTable(row.Field<string>("CtiTableName"), row.Field<long>("CTID"), row.Field<string>("CtiSchemaName"), Config.Slave); //only add the table if it's in our config if (tables.Where(t => t.Name == changeTable.name).Count() == 1) { tableList.Add(changeTable); } } return tableList; }
/// <summary> /// Copies change tables from the master to the relay server /// </summary> /// <param name="tables">Array of table config objects</param> /// <param name="sourceCTDB">Source CT database</param> /// <param name="destCTDB">Dest CT database</param> /// <param name="CTID">CT batch ID this is for</param> private void CopyChangeTables(IEnumerable<TableConf> tables, string sourceCTDB, string destCTDB, Int64 CTID, bool isConsolidated = false) { if (Config.Slave == Config.RelayServer && sourceCTDB == destCTDB) { logger.Log("Skipping download because slave is equal to relay.", LogLevel.Debug); return; } var actions = new List<Action>(); foreach (TableConf t in tables) { IDataCopy dataCopy = DataCopyFactory.GetInstance(Config.RelayType, Config.SlaveType, sourceDataUtils, destDataUtils, logger); var ct = new ChangeTable(t.Name, CTID, t.SchemaName, Config.Slave); string sourceCTTable = isConsolidated ? ct.consolidatedName : ct.ctName; string destCTTable = ct.ctName; TableConf tLocal = t; Action act = () => { try { //hard coding timeout at 1 hour for bulk copy logger.Log(new { message = "Copying table to slave", Table = tLocal.SchemaName + "." + sourceCTTable }, LogLevel.Trace); var sw = Stopwatch.StartNew(); dataCopy.CopyTable(sourceCTDB, sourceCTTable, tLocal.SchemaName, destCTDB, Config.DataCopyTimeout, destCTTable, tLocal.Name); logger.Log(new { message = "CopyTable: " + sw.Elapsed, Table = tLocal.SchemaName + "." + sourceCTTable }, LogLevel.Trace); } catch (DoesNotExistException) { //this is a totally normal and expected case since we only publish changetables when data actually changed logger.Log("No changes to pull for table " + tLocal.SchemaName + "." + sourceCTTable + " because it does not exist ", LogLevel.Debug); } catch (Exception e) { HandleException(e, tLocal); } }; actions.Add(act); } logger.Log("Parallel invocation of " + actions.Count + " changetable downloads", LogLevel.Trace); var options = new ParallelOptions(); options.MaxDegreeOfParallelism = Config.MaxThreads; Parallel.Invoke(options, actions.ToArray()); return; }
public void CreateHistoryTable(ChangeTable t, string slaveCTDB) { throw new NotImplementedException(); }
public void CopyIntoHistoryTable(ChangeTable t, string slaveCTDB, bool isConsolidated) { throw new NotImplementedException(); }
/// <summary> /// For the specified list of tables, populate a list of which CT tables exist /// </summary> private List<ChangeTable> PopulateTableList(IEnumerable<TableConf> tables, string dbName, IList<ChangeTrackingBatch> batches) { var tableList = new List<ChangeTable>(); DataTable result = sourceDataUtils.GetTablesWithChanges(dbName, batches); foreach (DataRow row in result.Rows) { var changeTable = new ChangeTable(row.Field<string>("CtiTableName"), row.Field<long>("CTID"), row.Field<string>("CtiSchemaName"), Config.Slave); //only add the table if it's in our config, using case insensitive comparison if (tables.Any(t => String.Compare(t.Name, changeTable.name, StringComparison.OrdinalIgnoreCase) == 0)) { tableList.Add(changeTable); } } return tableList; }
private IEnumerable<ChangeTable> ConsolidateBatches(IList<ChangeTable> tables) { var lu = new Dictionary<string, List<ChangeTable>>(); var actions = new List<Action>(); foreach (var changeTable in tables) { if (!lu.ContainsKey(changeTable.name)) { lu[changeTable.name] = new List<ChangeTable>(); } lu[changeTable.name].Add(changeTable); } var consolidatedTables = new List<ChangeTable>(); foreach (var table in Config.Tables) { if (!lu.ContainsKey(table.Name)) { //ugly case sensitivity hack var kvp = lu.FirstOrDefault(l => String.Compare(l.Key, table.Name, StringComparison.OrdinalIgnoreCase) == 0); if (kvp.Key != null) { lu[table.Name] = lu[kvp.Key]; } else { logger.Log("No changes captured for " + table.Name, LogLevel.Info); var ct = new ChangeTable(table.Name, null, table.SchemaName, Config.Slave); string sourceConsolidatedCTTable = ct.consolidatedName; logger.Log("Dropping (if exists) consolidated CT table " + sourceConsolidatedCTTable, LogLevel.Info); sourceDataUtils.DropTableIfExists(Config.RelayDB, sourceConsolidatedCTTable, table.SchemaName); continue; } } var lastChangeTable = lu[table.Name].OrderByDescending(c => c.CTID).First(); consolidatedTables.Add(lastChangeTable); TableConf tLocal = table; IDataCopy dataCopy = DataCopyFactory.GetInstance(Config.RelayType, Config.RelayType, sourceDataUtils, sourceDataUtils, logger); Action act = () => { try { logger.Log("Copying " + lastChangeTable.ctName, LogLevel.Debug); dataCopy.CopyTable(Config.RelayDB, lastChangeTable.ctName, tLocal.SchemaName, Config.RelayDB, Config.DataCopyTimeout, lastChangeTable.consolidatedName, tLocal.Name); //skipping the first one because dataCopy.CopyTable already copied it). foreach (var changeTable in lu[lastChangeTable.name].OrderByDescending(c => c.CTID).Skip(1)) { logger.Log("Consolidating " + changeTable.ctName, LogLevel.Debug); sourceDataUtils.Consolidate(changeTable.ctName, changeTable.consolidatedName, Config.RelayDB, tLocal.SchemaName); } sourceDataUtils.RemoveDuplicatePrimaryKeyChangeRows(tLocal, lastChangeTable.consolidatedName, Config.RelayDB); } catch (Exception e) { HandleException(e, tLocal); } }; actions.Add(act); } logger.Log("Parallel invocation of " + actions.Count + " changetable consolidations", LogLevel.Trace); var options = new ParallelOptions(); options.MaxDegreeOfParallelism = Config.MaxThreads; Parallel.Invoke(options, actions.ToArray()); return consolidatedTables; }
private IEnumerable <ChangeTable> ConsolidateBatches(IList <ChangeTable> tables) { var lu = new Dictionary <string, List <ChangeTable> >(); var actions = new List <Action>(); foreach (var changeTable in tables) { if (!lu.ContainsKey(changeTable.name)) { lu[changeTable.name] = new List <ChangeTable>(); } lu[changeTable.name].Add(changeTable); } var consolidatedTables = new List <ChangeTable>(); foreach (var table in Config.Tables) { if (!lu.ContainsKey(table.Name)) { //ugly case sensitivity hack var kvp = lu.FirstOrDefault(l => String.Compare(l.Key, table.Name, StringComparison.OrdinalIgnoreCase) == 0); if (kvp.Key != null) { lu[table.Name] = lu[kvp.Key]; } else { logger.Log("No changes captured for " + table.Name, LogLevel.Info); var ct = new ChangeTable(table.Name, null, table.SchemaName, Config.Slave); string sourceConsolidatedCTTable = ct.consolidatedName; logger.Log("Dropping (if exists) consolidated CT table " + sourceConsolidatedCTTable, LogLevel.Info); sourceDataUtils.DropTableIfExists(Config.RelayDB, sourceConsolidatedCTTable, table.SchemaName); continue; } } var lastChangeTable = lu[table.Name].OrderByDescending(c => c.CTID).First(); consolidatedTables.Add(lastChangeTable); TableConf tLocal = table; IDataCopy dataCopy = DataCopyFactory.GetInstance(Config.RelayType, Config.RelayType, sourceDataUtils, sourceDataUtils, logger); Action act = () => { try { logger.Log("Copying " + lastChangeTable.ctName, LogLevel.Debug); dataCopy.CopyTable(Config.RelayDB, lastChangeTable.ctName, tLocal.SchemaName, Config.RelayDB, Config.DataCopyTimeout, lastChangeTable.consolidatedName, tLocal.Name); //skipping the first one because dataCopy.CopyTable already copied it). foreach (var changeTable in lu[lastChangeTable.name].OrderByDescending(c => c.CTID).Skip(1)) { logger.Log("Consolidating " + changeTable.ctName, LogLevel.Debug); sourceDataUtils.Consolidate(changeTable.ctName, changeTable.consolidatedName, Config.RelayDB, tLocal.SchemaName); } sourceDataUtils.RemoveDuplicatePrimaryKeyChangeRows(tLocal, lastChangeTable.consolidatedName, Config.RelayDB); } catch (Exception e) { HandleException(e, tLocal); } }; actions.Add(act); } logger.Log("Parallel invocation of " + actions.Count + " changetable consolidations", LogLevel.Trace); var options = new ParallelOptions(); options.MaxDegreeOfParallelism = Config.MaxThreads; Parallel.Invoke(options, actions.ToArray()); return(consolidatedTables); }