public IEnumerable <XElement> GetElements(int tenant, string[] configs, IDataWriteOperator writer) { processedTables.Clear(); var xml = new List <XElement>(); var connectionKeys = new Dictionary <string, string>(); foreach (var connectionString in GetConnectionStrings(configs)) { //do not save the base, having the same provider and connection string is not to duplicate //data, but also expose the ref attribute of repetitive bases for the correct recovery var node = new XElement(connectionString.Name); xml.Add(node); var connectionKey = connectionString.ProviderName + connectionString.ConnectionString; if (connectionKeys.ContainsKey(connectionKey)) { node.Add(new XAttribute("ref", connectionKeys[connectionKey])); } else { connectionKeys.Add(connectionKey, connectionString.Name); node.Add(BackupDatabase(tenant, connectionString, writer)); } } return(xml); }
private void DoBackupStorage(IDataWriteOperator writer, List <IGrouping <string, BackupFileInfo> > fileGroups) { Logger.Debug("begin backup storage"); foreach (var group in fileGroups) { var filesProcessed = 0; var filesCount = group.Count(); var storage = StorageFactory.GetStorage(ConfigPath, TenantId.ToString(), group.Key); foreach (var file in group) { ActionInvoker.Try(state => { var f = (BackupFileInfo)state; using (var fileStream = storage.GetReadStream(f.Domain, f.Path)) { var tmp = Path.GetTempFileName(); try { using (var tmpFile = File.OpenWrite(tmp)) { fileStream.CopyTo(tmpFile); } writer.WriteEntry(KeyHelper.GetFileZipKey(file), tmp); } finally { if (File.Exists(tmp)) { File.Delete(tmp); } } } }, file, 5, error => Logger.Warn("can't backup file ({0}:{1}): {2}", file.Module, file.Path, error)); SetCurrentStepProgress((int)(++filesProcessed * 100 / (double)filesCount)); } } var restoreInfoXml = new XElement( "storage_restore", fileGroups .SelectMany(group => group.Select(file => (object)file.ToXElement())) .ToArray()); var tmpPath = Path.GetTempFileName(); using (var tmpFile = File.OpenWrite(tmpPath)) { restoreInfoXml.WriteTo(tmpFile); } writer.WriteEntry(KeyHelper.GetStorageRestoreInfoZipKey(), tmpPath); File.Delete(tmpPath); Logger.Debug("end backup storage"); }
public IEnumerable<XElement> GetElements(int tenant, string[] configs, IDataWriteOperator writer) { var xml = new List<XElement>(); var connectionKeys = new Dictionary<string, string>(); foreach (ConnectionStringSettings connectionString in GetConnectionStrings(configs)) { //do not save the base, having the same provider and connection string is not to duplicate //data, but also expose the ref attribute of repetitive bases for the correct recovery var node = new XElement(connectionString.Name); xml.Add(node); var connectionKey = connectionString.ProviderName + connectionString.ConnectionString; if (connectionKeys.ContainsKey(connectionKey)) { node.Add(new XAttribute("ref", connectionKeys[connectionKey])); } else { connectionKeys.Add(connectionKey, connectionString.Name); OnProgressChanged("Saving database " + connectionString.Name, -1); node.Add(BackupDatabase(tenant, connectionString, writer)); OnProgressChanged("OK", 100); } } return xml; }
public IEnumerable <XElement> GetElements(int tenant, string[] configs, IDataWriteOperator writer) { InvokeProgressChanged("Saving files...", 0); var config = GetWebConfig(configs); var files = ComposeFiles(tenant, config); var elements = new List <XElement>(); var backupKeys = new List <string>(); var counter = 0; var totalCount = (double)files.Count(); foreach (var file in files) { var backupPath = GetBackupPath(file); if (!backupKeys.Contains(backupPath)) { var storage = StorageFactory.GetStorage(config, tenant.ToString(), file.Module); var zipStream = writer.BeginWriteEntry(backupPath); var errors = 0; var offset = 0; while (true) { try { using (var stream = storage.GetReadStream(file.Domain, file.Path, offset)) { var buffer = new byte[2048]; var readed = 0; while (0 < (readed = stream.Read(buffer, 0, buffer.Length))) { zipStream.Write(buffer, 0, readed); offset += readed; } } break; } catch (Exception error) { errors++; if (20 < errors) { log.ErrorFormat("Can not backup file {0}: {1}", file.Path, error); break; } } } writer.EndWriteEntry(); elements.Add(file.ToXElement()); backupKeys.Add(backupPath); log.DebugFormat("Backup file {0}", file.Path); } InvokeProgressChanged("Saving file " + file.Path, counter++ / totalCount * 100); } return(elements); }
public IEnumerable<XElement> GetElements(int tenant, string[] configs, IDataWriteOperator writer) { InvokeProgressChanged("Saving files...", 0); var config = GetWebConfig(configs); var files = ComposeFiles(tenant, config); var elements = new List<XElement>(); var backupKeys = new List<string>(); var counter = 0; var totalCount = (double)files.Count(); foreach (var file in files) { var backupPath = GetBackupPath(file); if (!backupKeys.Contains(backupPath)) { var storage = StorageFactory.GetStorage(config, tenant.ToString(), file.Module); var zipStream = writer.BeginWriteEntry(backupPath); var errors = 0; var offset = 0; while (true) { try { using (var stream = storage.GetReadStream(file.Domain, file.Path, offset)) { var buffer = new byte[2048]; var readed = 0; while (0 < (readed = stream.Read(buffer, 0, buffer.Length))) { zipStream.Write(buffer, 0, readed); offset += readed; } } break; } catch (Exception error) { errors++; if (20 < errors) { log.ErrorFormat("Can not backup file {0}: {1}", file.Path, error); break; } } } writer.EndWriteEntry(); elements.Add(file.ToXElement()); backupKeys.Add(backupPath); log.DebugFormat("Backup file {0}", file.Path); } InvokeProgressChanged("Saving file " + file.Path, counter++ / totalCount * 100); } return elements; }
public IEnumerable <XElement> GetElements(int tenant, string[] configs, IDataWriteOperator writer) { InvokeProgressChanged("Saving files...", 0); var config = GetWebConfig(configs); var files = ComposeFiles(tenant, config); var elements = new List <XElement>(); var backupKeys = new List <string>(); var counter = 0; var totalCount = (double)files.Count(); foreach (var file in files) { var backupPath = GetBackupPath(file); if (!backupKeys.Contains(backupPath)) { var storage = StorageFactory.GetStorage(config, tenant.ToString(), file.Module); var errors = 0; while (true) { try { using (var stream = storage.GetReadStream(file.Domain, file.Path)) { var tmpPath = Path.GetTempFileName(); using (var tmpFile = File.OpenWrite(tmpPath)) { stream.CopyTo(tmpFile); } writer.WriteEntry(backupPath, tmpPath); File.Delete(tmpPath); } break; } catch (Exception error) { errors++; if (20 < errors) { log.ErrorFormat("Can not backup file {0}: {1}", file.Path, error); break; } } } elements.Add(file.ToXElement()); backupKeys.Add(backupPath); log.DebugFormat("Backup file {0}", file.Path); } InvokeProgressChanged("Saving file " + file.Path, counter++ / totalCount * 100); } return(elements); }
private void DoBackupModule(IDataWriteOperator writer, DbFactory dbFactory, IModuleSpecifics module) { Logger.Debug("begin saving data for module {0}", module.ModuleName); var tablesToProcess = module.Tables.Where(t => !IgnoredTables.Contains(t.Name) && t.InsertMethod != InsertMethod.None).ToList(); int tablesCount = tablesToProcess.Count; int tablesProcessed = 0; using (var connection = dbFactory.OpenConnection()) { foreach (var table in tablesToProcess) { Logger.Debug("begin load table {0}", table.Name); using (var data = new DataTable(table.Name)) { ActionInvoker.Try( state => { data.Clear(); var t = (TableInfo)state; var dataAdapter = dbFactory.CreateDataAdapter(); dataAdapter.SelectCommand = module.CreateSelectCommand(connection.Fix(), TenantId, t).WithTimeout(600); ((DbDataAdapter)dataAdapter).Fill(data); }, table, maxAttempts: 5, onFailure: error => { throw ThrowHelper.CantBackupTable(table.Name, error); }, onAttemptFailure: error => Logger.Warn("backup attempt failure: {0}", error)); foreach (var col in data.Columns.Cast <DataColumn>().Where(col => col.DataType == typeof(DateTime))) { col.DateTimeMode = DataSetDateTime.Unspecified; } module.PrepareData(data); Logger.Debug("end load table {0}", table.Name); Logger.Debug("begin saving table {0}", table.Name); var tmp = Path.GetTempFileName(); using (var file = File.OpenWrite(tmp)) { data.WriteXml(file, XmlWriteMode.WriteSchema); data.Clear(); } writer.WriteEntry(KeyHelper.GetTableZipKey(module, data.TableName), tmp); File.Delete(tmp); Logger.Debug("end saving table {0}", table.Name); } SetCurrentStepProgress((int)((++tablesProcessed * 100) / (double)tablesCount)); } } Logger.Debug("end saving data for module {0}", module.ModuleName); }
private void ArchiveDir(IDataWriteOperator writer, string subDir) { Logger.DebugFormat("archive dir start {0}", subDir); foreach (var enumerateFile in Directory.EnumerateFiles(subDir, "*", SearchOption.AllDirectories)) { writer.WriteEntry(enumerateFile.Substring(subDir.Length), enumerateFile); File.Delete(enumerateFile); SetStepCompleted(); } Logger.DebugFormat("archive dir end {0}", subDir); }
private void DoBackupStorage(IDataWriteOperator writer) { InvokeInfo("begin backup storage", Tenant.TenantAlias); var fileGroups = GetFilesToProcess().GroupBy(file => file.Module).ToList(); int groupsProcessed = 0; foreach (var group in fileGroups) { IDataStore storage = StorageFactory.GetStorage(ConfigPath, Tenant.TenantId.ToString(), group.Key, null, null); foreach (BackupFileInfo file in group) { Stream stream = writer.BeginWriteEntry(KeyHelper.GetFileZipKey(file)); int offset = 0; ActionInvoker.Try(state => { var f = (BackupFileInfo)state; using (var fileStream = storage.GetReadStream(f.Domain, f.Path, offset)) { var buffer = new byte[2048]; int readed; while ((readed = fileStream.Read(buffer, 0, buffer.Length)) > 0) { stream.Write(buffer, 0, readed); offset += readed; } } }, file, 5, error => InvokeWarning("can't backup file ({0}:{1}): {2}", file.Module, file.Path, error)); writer.EndWriteEntry(); } SetStepProgress((int)(++groupsProcessed * 100 / (double)fileGroups.Count)); } if (fileGroups.Count == 0) { SetStepCompleted(); } var restoreInfoXml = new XElement( "storage_restore", fileGroups .SelectMany(group => group.Select(file => (object)file.ToXElement())) .ToArray()); Stream restoreInfoStream = writer.BeginWriteEntry(KeyHelper.GetStorageRestoreInfoZipKey()); restoreInfoXml.WriteTo(restoreInfoStream); writer.EndWriteEntry(); InvokeInfo("end backup storage", Tenant.TenantAlias); }
private void DoDumpStorage(IDataWriteOperator writer, IReadOnlyList <BackupFileInfo> files) { Logger.Debug("begin backup storage"); var dir = Path.GetDirectoryName(BackupFilePath); var subDir = Path.Combine(dir, Path.GetFileNameWithoutExtension(BackupFilePath)); for (var i = 0; i < files.Count; i += TasksLimit) { var storageDir = Path.Combine(subDir, KeyHelper.GetStorage()); if (!Directory.Exists(storageDir)) { Directory.CreateDirectory(storageDir); } var tasks = new List <Task>(TasksLimit); for (var j = 0; j < TasksLimit && i + j < files.Count; j++) { var t = files[i + j]; tasks.Add(Task.Run(() => DoDumpFile(t, storageDir))); } Task.WaitAll(tasks.ToArray()); ArchiveDir(writer, subDir); Directory.Delete(storageDir, true); } var restoreInfoXml = new XElement("storage_restore", files.Select(file => (object)file.ToXElement()).ToArray()); var tmpPath = Path.Combine(subDir, KeyHelper.GetStorageRestoreInfoZipKey()); Directory.CreateDirectory(Path.GetDirectoryName(tmpPath)); using (var tmpFile = File.OpenWrite(tmpPath)) { restoreInfoXml.WriteTo(tmpFile); } writer.WriteEntry(KeyHelper.GetStorageRestoreInfoZipKey(), tmpPath); File.Delete(tmpPath); SetStepCompleted(); Directory.Delete(subDir, true); Logger.Debug("end backup storage"); }
private void ArchiveDir(IDataWriteOperator writer, string subDir) { Logger.DebugFormat("archive dir start {0}", subDir); foreach (var enumerateFile in Directory.EnumerateFiles(subDir, "*", SearchOption.AllDirectories)) { var f = enumerateFile; if (!WorkContext.IsMono && enumerateFile.Length > MaxLength) { f = @"\\?\" + f; } writer.WriteEntry(enumerateFile.Substring(subDir.Length), f); File.Delete(f); SetStepCompleted(); } Logger.DebugFormat("archive dir end {0}", subDir); }
private void ArchiveDir(IDataWriteOperator writer, string subDir) { Logger.DebugFormat("archive dir start {0}", subDir); foreach (var enumerateFile in Directory.EnumerateFiles(subDir, "*", SearchOption.AllDirectories)) { var f = enumerateFile; if (!WorkContext.IsMono && enumerateFile.Length > MaxLength) { f = @"\\?\" + f; } using (var tmpFile = new FileStream(f, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read, 4096, FileOptions.DeleteOnClose)) { writer.WriteEntry(enumerateFile.Substring(subDir.Length), tmpFile); } SetStepCompleted(); } Logger.DebugFormat("archive dir end {0}", subDir); }
private void DoBackupModule(IDataWriteOperator writer, DbFactory dbFactory, IModuleSpecifics module) { InvokeInfo("begin saving data for module {0}", module.ModuleName); int tablesCount = module.Tables.Count(); int tablesProcessed = 0; using (var connection = dbFactory.OpenConnection(module.ConnectionStringName)) { foreach (var table in module.Tables) { InvokeInfo("begin saving table {0}", table.Name); using (var data = new DataTable(table.Name)) { ActionInvoker.Try( state => { data.Clear(); var t = (TableInfo)state; var dataAdapter = dbFactory.CreateDataAdapter(module.ConnectionStringName); dataAdapter.SelectCommand = module.CreateSelectCommand(connection.Fix(), Tenant.TenantId, t).WithTimeout(600); ((DbDataAdapter)dataAdapter).Fill(data); }, table, maxAttempts: 5, onFailure: error => { throw ThrowHelper.CantBackupTable(table.Name, error); }, onAttemptFailure: error => InvokeWarning("backup attempt failure: {0}", error)); foreach (var col in data.Columns.Cast<DataColumn>().Where(col => col.DataType == typeof(DateTime))) { col.DateTimeMode = DataSetDateTime.Unspecified; } var stream = writer.BeginWriteEntry(KeyHelper.GetTableZipKey(module, data.TableName)); data.WriteXml(stream, XmlWriteMode.WriteSchema); writer.EndWriteEntry(); data.Clear(); } SetStepProgress((int)((++tablesProcessed*100)/(double)tablesCount)); } } InvokeInfo("end saving data for module {0}", module.ModuleName); }
private void DoBackupModule(IDataWriteOperator writer, DbFactory dbFactory, IModuleSpecifics module) { InvokeInfo("begin saving data for module {0}", module.ModuleName); int tablesCount = module.Tables.Count(); int tablesProcessed = 0; using (var connection = dbFactory.OpenConnection(module.ConnectionStringName)) { foreach (var table in module.Tables) { InvokeInfo("begin saving table {0}", table.Name); using (var data = new DataTable(table.Name)) { ActionInvoker.Try( state => { data.Clear(); var t = (TableInfo)state; var dataAdapter = dbFactory.CreateDataAdapter(module.ConnectionStringName); dataAdapter.SelectCommand = module.CreateSelectCommand(connection.Fix(), Tenant.TenantId, t).WithTimeout(600); ((DbDataAdapter)dataAdapter).Fill(data); }, table, maxAttempts: 5, onFailure: error => { throw ThrowHelper.CantBackupTable(table.Name, error); }, onAttemptFailure: error => InvokeWarning("backup attempt failure: {0}", error)); foreach (var col in data.Columns.Cast <DataColumn>().Where(col => col.DataType == typeof(DateTime))) { col.DateTimeMode = DataSetDateTime.Unspecified; } var stream = writer.BeginWriteEntry(KeyHelper.GetTableZipKey(module, data.TableName)); data.WriteXml(stream, XmlWriteMode.WriteSchema); writer.EndWriteEntry(); data.Clear(); } SetStepProgress((int)((++tablesProcessed * 100) / (double)tablesCount)); } } InvokeInfo("end saving data for module {0}", module.ModuleName); }
private void DoBackupStorage(IDataWriteOperator writer) { InvokeInfo("begin backup storage", Tenant.TenantAlias); var fileGroups = GetFilesToProcess().GroupBy(file => file.Module).ToList(); int groupsProcessed = 0; foreach (var group in fileGroups) { IDataStore storage = StorageFactory.GetStorage(ConfigPath, Tenant.TenantId.ToString(), group.Key, null, null); foreach (BackupFileInfo file in group) { Stream stream = writer.BeginWriteEntry(KeyHelper.GetFileZipKey(file)); int offset = 0; ActionInvoker.Try(state => { var f = (BackupFileInfo)state; using (var fileStream = storage.GetReadStream(f.Domain, f.Path, offset)) { var buffer = new byte[2048]; int readed; while ((readed = fileStream.Read(buffer, 0, buffer.Length)) > 0) { stream.Write(buffer, 0, readed); offset += readed; } } }, file, 5, error => InvokeWarning("can't backup file ({0}:{1}): {2}", file.Module, file.Path, error)); writer.EndWriteEntry(); } SetStepProgress((int)(++groupsProcessed*100/(double)fileGroups.Count)); } if (fileGroups.Count == 0) SetStepCompleted(); var restoreInfoXml = new XElement( "storage_restore", fileGroups .SelectMany(group => group.Select(file => (object)file.ToXElement())) .ToArray()); Stream restoreInfoStream = writer.BeginWriteEntry(KeyHelper.GetStorageRestoreInfoZipKey()); restoreInfoXml.WriteTo(restoreInfoStream); writer.EndWriteEntry(); InvokeInfo("end backup storage", Tenant.TenantAlias); }
private List<XElement> BackupDatabase(int tenant, ConnectionStringSettings connectionString, IDataWriteOperator writer) { var xml = new List<XElement>(); var errors = 0; var timeout = TimeSpan.FromSeconds(1); using (var dbHelper = new DbHelper(tenant, connectionString)) { var tables = dbHelper.GetTables(); for (int i = 0; i < tables.Count; i++) { var table = tables[i]; OnProgressChanged(table, (int)(i / (double)tables.Count * 100)); xml.Add(new XElement(table)); var stream = writer.BeginWriteEntry(string.Format("{0}\\{1}\\{2}", Name, connectionString.Name, table).ToLower()); DataTable dataTable = null; while (true) { try { dataTable = dbHelper.GetTable(table); break; } catch { errors++; if (20 < errors) throw; Thread.Sleep(timeout); } } foreach (DataColumn c in dataTable.Columns) { if (c.DataType == typeof(DateTime)) c.DateTimeMode = DataSetDateTime.Unspecified; } dataTable.WriteXml(stream, XmlWriteMode.WriteSchema); writer.EndWriteEntry(); } } return xml; }
private void DumpTableData(string t, string dir, int count, string dbName, IDataWriteOperator writer) { try { if (count == 0) { Logger.DebugFormat("dump table data stop {0}", t); SetStepCompleted(2); return; } Logger.DebugFormat("dump table data start {0}", t); var searchWithPrimary = false; string primaryIndex; int primaryIndexStep = 0; int primaryIndexStart = 0; List <string> columns; using (var dbManager = DbManager.FromHttpContext(dbName, 100000)) { var columnsData = dbManager.ExecuteList(string.Format("SHOW COLUMNS FROM `{0}`;", t)); columns = columnsData .Select(r => "`" + Convert.ToString(r[0]) + "`") .ToList(); primaryIndex = dbManager .ExecuteList( new SqlQuery("information_schema.`COLUMNS`") .Select("COLUMN_NAME") .Where("TABLE_SCHEMA", dbManager.Connection.Database) .Where("TABLE_NAME", t) .Where("COLUMN_KEY", "PRI") .Where("DATA_TYPE", "int")) .ConvertAll(r => Convert.ToString(r[0])) .FirstOrDefault(); var isLeft = dbManager.ExecuteList(string.Format("SHOW INDEXES FROM {0} WHERE COLUMN_NAME='{1}' AND seq_in_index=1", t, primaryIndex)); searchWithPrimary = isLeft.Count == 1; if (searchWithPrimary) { var minMax = dbManager .ExecuteList(new SqlQuery(t).SelectMax(primaryIndex).SelectMin(primaryIndex)) .ConvertAll(r => new Tuple <int, int>(Convert.ToInt32(r[0]), Convert.ToInt32(r[1]))) .FirstOrDefault(); primaryIndexStart = minMax.Item2; primaryIndexStep = (minMax.Item1 - minMax.Item2) / count; if (primaryIndexStep < Limit) { primaryIndexStep = Limit; } } } var path = Path.Combine(dir, t); var offset = 0; do { List <object[]> result; if (searchWithPrimary) { result = GetDataWithPrimary(t, columns, primaryIndex, primaryIndexStart, primaryIndexStep, dbName); primaryIndexStart += primaryIndexStep; } else { result = GetData(t, columns, offset, dbName); } offset += Limit; var resultCount = result.Count; if (resultCount == 0) { break; } SaveToFile(path, t, columns, result); } while (true); SetStepCompleted(); Logger.DebugFormat("dump table data stop {0}", t); } catch (Exception e) { Logger.Error(e); throw; } }
public IEnumerable<XElement> GetElements(int tenant, string[] configs, IDataWriteOperator writer) { return null; }
private List <XElement> BackupDatabase(int tenant, ConnectionStringSettings connectionString, IDataWriteOperator writer) { var xml = new List <XElement>(); var errors = 0; var timeout = TimeSpan.FromSeconds(1); var tables = dbHelper.GetTables(); for (var i = 0; i < tables.Count; i++) { var table = tables[i]; OnProgressChanged(table, (int)(i / (double)tables.Count * 100)); if (processedTables.Contains(table, StringComparer.InvariantCultureIgnoreCase)) { continue; } xml.Add(new XElement(table)); DataTable dataTable = null; while (true) { try { dataTable = dbHelper.GetTable(table, tenant); break; } catch { errors++; if (20 < errors) { throw; } Thread.Sleep(timeout); } } foreach (DataColumn c in dataTable.Columns) { if (c.DataType == typeof(DateTime)) { c.DateTimeMode = DataSetDateTime.Unspecified; } } var tmp = Path.GetTempFileName(); using (var file = File.OpenWrite(tmp)) { dataTable.WriteXml(file, XmlWriteMode.WriteSchema); } writer.WriteEntry(string.Format("{0}\\{1}\\{2}", Name, connectionString.Name, table).ToLower(), tmp); File.Delete(tmp); processedTables.Add(table); } return(xml); }
private void DoDump(IDataWriteOperator writer) { var tmp = Path.GetTempFileName(); File.AppendAllText(tmp, true.ToString()); writer.WriteEntry(KeyHelper.GetDumpKey(), tmp); List <string> tables; var files = new List <BackupFileInfo>(); using (var dbManager = new DbManager("default", 100000)) { tables = dbManager.ExecuteList("show tables;").Select(r => Convert.ToString(r[0])).ToList(); } var stepscount = tables.Count * 4; // (schema + data) * (dump + zip) if (ProcessStorage) { var tenants = CoreContext.TenantManager.GetTenants(false).Select(r => r.TenantId); foreach (var t in tenants) { files.AddRange(GetFiles(t)); } stepscount += files.Count * 2 + 1; Logger.Debug("files:" + files.Count); } SetStepsCount(stepscount); var excluded = ModuleProvider.AllModules.Where(r => IgnoredModules.Contains(r.ModuleName)).SelectMany(r => r.Tables).Select(r => r.Name).ToList(); excluded.AddRange(IgnoredTables); excluded.Add("res_"); var dir = Path.GetDirectoryName(BackupFilePath); var subDir = Path.Combine(dir, Path.GetFileNameWithoutExtension(BackupFilePath)); var schemeDir = Path.Combine(subDir, KeyHelper.GetDatabaseSchema()); var dataDir = Path.Combine(subDir, KeyHelper.GetDatabaseData()); if (!Directory.Exists(schemeDir)) { Directory.CreateDirectory(schemeDir); } if (!Directory.Exists(dataDir)) { Directory.CreateDirectory(dataDir); } var dict = tables.ToDictionary(t => t, SelectCount); tables.Sort((pair1, pair2) => dict[pair1].CompareTo(dict[pair2])); for (var i = 0; i < tables.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < tables.Count; j++) { var t = tables[i + j]; tasks.Add(Task.Run(() => DumpTableScheme(t, schemeDir))); if (!excluded.Any(t.StartsWith)) { tasks.Add(Task.Run(() => DumpTableData(t, dataDir, dict[t]))); } else { SetStepCompleted(2); } } Task.WaitAll(tasks.ToArray()); ArchiveDir(writer, subDir); } Logger.DebugFormat("dir remove start {0}", subDir); Directory.Delete(subDir, true); Logger.DebugFormat("dir remove end {0}", subDir); if (ProcessStorage) { DoDumpStorage(writer, files); } }
private void DoDump(IDataWriteOperator writer, string dbName, List <string> tables) { var excluded = ModuleProvider.AllModules.Where(r => IgnoredModules.Contains(r.ModuleName)).SelectMany(r => r.Tables).Select(r => r.Name).ToList(); excluded.AddRange(IgnoredTables); excluded.Add("res_"); var dir = Path.GetDirectoryName(BackupFilePath); var subDir = Path.Combine(dir, Path.GetFileNameWithoutExtension(BackupFilePath)); var schemeDir = ""; var dataDir = ""; if (dbName == "default") { schemeDir = Path.Combine(subDir, KeyHelper.GetDatabaseSchema()); dataDir = Path.Combine(subDir, KeyHelper.GetDatabaseData()); } else { schemeDir = Path.Combine(subDir, dbName, KeyHelper.GetDatabaseSchema()); dataDir = Path.Combine(subDir, dbName, KeyHelper.GetDatabaseData()); } if (!Directory.Exists(schemeDir)) { Directory.CreateDirectory(schemeDir); } if (!Directory.Exists(dataDir)) { Directory.CreateDirectory(dataDir); } var dict = new Dictionary <string, int>(); foreach (var table in tables) { dict.Add(table, SelectCount(table, dbName)); } tables.Sort((pair1, pair2) => dict[pair1].CompareTo(dict[pair2])); for (var i = 0; i < tables.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < tables.Count; j++) { var t = tables[i + j]; tasks.Add(Task.Run(() => DumpTableScheme(t, schemeDir, dbName))); if (!excluded.Any(t.StartsWith)) { tasks.Add(Task.Run(() => DumpTableData(t, dataDir, dict[t], dbName, writer))); } else { SetStepCompleted(2); } } Task.WaitAll(tasks.ToArray()); ArchiveDir(writer, subDir); } }
private List <XElement> BackupDatabase(int tenant, ConnectionStringSettings connectionString, IDataWriteOperator writer) { var xml = new List <XElement>(); var errors = 0; var timeout = TimeSpan.FromSeconds(1); using (var dbHelper = new DbHelper(tenant, connectionString)) { var tables = dbHelper.GetTables(); for (int i = 0; i < tables.Count; i++) { var table = tables[i]; OnProgressChanged(table, (int)(i / (double)tables.Count * 100)); xml.Add(new XElement(table)); var stream = writer.BeginWriteEntry(string.Format("{0}\\{1}\\{2}", Name, connectionString.Name, table).ToLower()); DataTable dataTable = null; while (true) { try { dataTable = dbHelper.GetTable(table); break; } catch { errors++; if (20 < errors) { throw; } Thread.Sleep(timeout); } } foreach (DataColumn c in dataTable.Columns) { if (c.DataType == typeof(DateTime)) { c.DateTimeMode = DataSetDateTime.Unspecified; } } dataTable.WriteXml(stream, XmlWriteMode.WriteSchema); writer.EndWriteEntry(); } } return(xml); }
private void DoBackupStorage(IDataWriteOperator writer, DbFactory dbFactory) { Logger.Debug("begin backup storage"); var files = GetFilesToProcess(); var exclude = new List <string>(); using (var db = dbFactory.OpenConnection()) using (var command = db.CreateCommand()) { command.CommandText = "select storage_path from backup_backup where tenant_id = " + TenantId + " and storage_type = 0 and storage_path is not null"; using (var reader = command.ExecuteReader()) { while (reader.Read()) { exclude.Add(reader.GetString(0)); } } } files = files.Where(f => !exclude.Any(e => f.Path.Contains(string.Format("/file_{0}/", e)))); var fileGroups = files.GroupBy(file => file.Module).ToList(); var groupsProcessed = 0; foreach (var group in fileGroups) { var storage = StorageFactory.GetStorage(ConfigPath, TenantId.ToString(), group.Key); foreach (var file in group) { ActionInvoker.Try(state => { var f = (BackupFileInfo)state; using (var fileStream = storage.GetReadStream(f.Domain, f.Path)) { var tmp = Path.GetTempFileName(); try { using (var tmpFile = File.OpenWrite(tmp)) { fileStream.CopyTo(tmpFile); } writer.WriteEntry(KeyHelper.GetFileZipKey(file), tmp); } finally { if (File.Exists(tmp)) { File.Delete(tmp); } } } }, file, 5, error => Logger.Warn("can't backup file ({0}:{1}): {2}", file.Module, file.Path, error)); } SetCurrentStepProgress((int)(++groupsProcessed * 100 / (double)fileGroups.Count)); } if (fileGroups.Count == 0) { SetStepCompleted(); } var restoreInfoXml = new XElement( "storage_restore", fileGroups .SelectMany(group => group.Select(file => (object)file.ToXElement())) .ToArray()); var tmpPath = Path.GetTempFileName(); using (var tmpFile = File.OpenWrite(tmpPath)) { restoreInfoXml.WriteTo(tmpFile); } writer.WriteEntry(KeyHelper.GetStorageRestoreInfoZipKey(), tmpPath); File.Delete(tmpPath); Logger.Debug("end backup storage"); }
public IEnumerable <XElement> GetElements(int tenant, string[] configs, IDataWriteOperator writer) { return(null); }
private void DoDump(IDataWriteOperator writer) { Dictionary <string, List <string> > databases = new Dictionary <string, List <string> >(); using (var dbManager = DbManager.FromHttpContext("default", 100000)) { dbManager.ExecuteList("select id, connection_string from mail_server_server").ForEach((r => { var dbName = GetDbName((int)r[0], JsonConvert.DeserializeObject <Dictionary <string, object> >(Convert.ToString(r[1]))["DbConnection"].ToString()); using (var dbManager1 = DbManager.FromHttpContext(dbName, 100000)) { var tables = dbManager1.ExecuteList("show tables;").Select(res => Convert.ToString(res[0])).ToList(); databases.Add(dbName, tables); } })); } using (var dbManager = DbManager.FromHttpContext("default", 100000)) { var tables = dbManager.ExecuteList("show tables;").Select(res => Convert.ToString(res[0])).ToList(); databases.Add("default", tables); } using (var stream = new MemoryStream(Encoding.UTF8.GetBytes(true.ToString()))) { writer.WriteEntry(KeyHelper.GetDumpKey(), stream); } var files = new List <BackupFileInfo>(); var stepscount = 0; foreach (var db in databases) { stepscount += db.Value.Count * 4;// (schema + data) * (dump + zip) } if (ProcessStorage) { var tenants = CoreContext.TenantManager.GetTenants(false).Select(r => r.TenantId); foreach (var t in tenants) { files.AddRange(GetFiles(t)); } stepscount += files.Count * 2 + 1; Logger.Debug("files:" + files.Count); } SetStepsCount(stepscount); foreach (var db in databases) { DoDump(writer, db.Key, db.Value); } var dir = Path.GetDirectoryName(BackupFilePath); var subDir = Path.Combine(dir, Path.GetFileNameWithoutExtension(BackupFilePath)); Logger.DebugFormat("dir remove start {0}", subDir); Directory.Delete(subDir, true); Logger.DebugFormat("dir remove end {0}", subDir); if (ProcessStorage) { DoDumpStorage(writer, files); } }