private void DoBackupModule(IDataWriteOperator writer, DbFactory dbFactory, IModuleSpecifics module) { Logger.Debug("begin saving data for module {0}", module.ModuleName); var tablesToProcess = module.Tables.Where(t => !IgnoredTables.Contains(t.Name) && t.InsertMethod != InsertMethod.None).ToList(); var tablesCount = tablesToProcess.Count; var tablesProcessed = 0; using (var connection = dbFactory.OpenConnection()) { foreach (var table in tablesToProcess) { Logger.Debug("begin load table {0}", table.Name); using (var data = new DataTable(table.Name)) { ActionInvoker.Try( state => { data.Clear(); int counts; var offset = 0; do { var t = (TableInfo)state; var dataAdapter = dbFactory.CreateDataAdapter(); dataAdapter.SelectCommand = module.CreateSelectCommand(connection.Fix(), TenantId, t, Limit, offset).WithTimeout(600); counts = ((DbDataAdapter)dataAdapter).Fill(data); offset += Limit; } while (counts == Limit); }, table, maxAttempts: 5, onFailure: error => { throw ThrowHelper.CantBackupTable(table.Name, error); }, onAttemptFailure: error => Logger.Warn("backup attempt failure: {0}", error)); foreach (var col in data.Columns.Cast <DataColumn>().Where(col => col.DataType == typeof(DateTime))) { col.DateTimeMode = DataSetDateTime.Unspecified; } module.PrepareData(data); Logger.Debug("end load table {0}", table.Name); Logger.Debug("begin saving table {0}", table.Name); var tmp = Path.GetTempFileName(); using (var file = File.OpenWrite(tmp)) { data.WriteXml(file, XmlWriteMode.WriteSchema); data.Clear(); } writer.WriteEntry(KeyHelper.GetTableZipKey(module, data.TableName), tmp); File.Delete(tmp); Logger.Debug("end saving table {0}", table.Name); } SetCurrentStepProgress((int)((++tablesProcessed * 100) / (double)tablesCount)); } } Logger.Debug("end saving data for module {0}", module.ModuleName); }
private void DoDump(IDataWriteOperator writer) { var tmp = Path.GetTempFileName(); File.AppendAllText(tmp, true.ToString()); writer.WriteEntry(KeyHelper.GetDumpKey(), tmp); List <string> tables; var files = new List <BackupFileInfo>(); using (var dbManager = new DbManager("default", 100000)) { tables = dbManager.ExecuteList("show tables;").Select(r => Convert.ToString(r[0])).ToList(); } var stepscount = tables.Count * 4; // (schema + data) * (dump + zip) if (ProcessStorage) { var tenants = CoreContext.TenantManager.GetTenants(false).Select(r => r.TenantId); foreach (var t in tenants) { files.AddRange(GetFiles(t)); } stepscount += files.Count * 2 + 1; Logger.Debug("files:" + files.Count); } SetStepsCount(stepscount); var excluded = ModuleProvider.AllModules.Where(r => IgnoredModules.Contains(r.ModuleName)).SelectMany(r => r.Tables).Select(r => r.Name).ToList(); excluded.AddRange(IgnoredTables); excluded.Add("res_"); var dir = Path.GetDirectoryName(BackupFilePath); var subDir = Path.Combine(dir, Path.GetFileNameWithoutExtension(BackupFilePath)); var schemeDir = Path.Combine(subDir, KeyHelper.GetDatabaseSchema()); var dataDir = Path.Combine(subDir, KeyHelper.GetDatabaseData()); if (!Directory.Exists(schemeDir)) { Directory.CreateDirectory(schemeDir); } if (!Directory.Exists(dataDir)) { Directory.CreateDirectory(dataDir); } var dict = tables.ToDictionary(t => t, SelectCount); tables.Sort((pair1, pair2) => dict[pair1].CompareTo(dict[pair2])); for (var i = 0; i < tables.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < tables.Count; j++) { var t = tables[i + j]; tasks.Add(Task.Run(() => DumpTableScheme(t, schemeDir))); if (!excluded.Any(t.StartsWith)) { tasks.Add(Task.Run(() => DumpTableData(t, dataDir, dict[t]))); } else { SetStepCompleted(2); } } Task.WaitAll(tasks.ToArray()); ArchiveDir(writer, subDir); } Logger.DebugFormat("dir remove start {0}", subDir); Directory.Delete(subDir, true); Logger.DebugFormat("dir remove end {0}", subDir); if (ProcessStorage) { DoDumpStorage(writer, files); } }
private void DoBackupStorage(IDataWriteOperator writer, List <IGrouping <string, BackupFileInfo> > fileGroups) { Logger.Debug("begin backup storage"); foreach (var group in fileGroups) { var filesProcessed = 0; var filesCount = group.Count(); foreach (var file in group) { var storage = StorageFactory.GetStorage(ConfigPath, TenantId.ToString(), group.Key); var file1 = file; ActionInvoker.Try(state => { var f = (BackupFileInfo)state; using (var fileStream = storage.GetReadStream(f.Domain, f.Path)) { var tmp = Path.GetTempFileName(); try { using (var tmpFile = File.OpenWrite(tmp)) { fileStream.CopyTo(tmpFile); } writer.WriteEntry(file1.GetZipKey(), tmp); } finally { if (File.Exists(tmp)) { File.Delete(tmp); } } } }, file, 5, error => Logger.WarnFormat("can't backup file ({0}:{1}): {2}", file1.Module, file1.Path, error)); SetCurrentStepProgress((int)(++filesProcessed * 100 / (double)filesCount)); } } var restoreInfoXml = new XElement( "storage_restore", fileGroups .SelectMany(group => group.Select(file => (object)file.ToXElement())) .ToArray()); var tmpPath = Path.GetTempFileName(); using (var tmpFile = File.OpenWrite(tmpPath)) { restoreInfoXml.WriteTo(tmpFile); } writer.WriteEntry(KeyHelper.GetStorageRestoreInfoZipKey(), tmpPath); File.Delete(tmpPath); Logger.Debug("end backup storage"); }
private void RestoreFromDump(IDataReadOperator dataReader) { var keyBase = KeyHelper.GetDatabaseSchema(); var keys = dataReader.GetEntries(keyBase).Select(r => Path.GetFileName(r)).ToList(); var upgrades = new List <string>(); var upgradesPath = Path.GetFullPath(Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), UpgradesPath)); if (!string.IsNullOrEmpty(upgradesPath) && Directory.Exists(upgradesPath)) { upgrades = Directory.GetFiles(upgradesPath).ToList(); } var stepscount = keys.Count * 2 + upgrades.Count; if (ProcessStorage) { var storageModules = StorageFactory.GetModuleList(ConfigPath).Where(IsStorageModuleAllowed); var tenants = CoreContext.TenantManager.GetTenants(false); stepscount += storageModules.Count() * tenants.Count; SetStepsCount(stepscount + 1); DoDeleteStorage(storageModules, tenants); } else { SetStepsCount(stepscount); } for (var i = 0; i < keys.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < keys.Count; j++) { var key1 = Path.Combine(KeyHelper.GetDatabaseSchema(), keys[i + j]); tasks.Add(RestoreFromDumpFile(dataReader, key1).ContinueWith(r => RestoreFromDumpFile(dataReader, KeyHelper.GetDatabaseData(key1.Substring(keyBase.Length + 1))))); } Task.WaitAll(tasks.ToArray()); } var comparer = new SqlComparer(); foreach (var u in upgrades.OrderBy(Path.GetFileName, comparer)) { using (var s = File.OpenRead(u)) { if (u.Contains(".upgrade.")) { RunMysqlFile(s, null).Wait(); } else if (u.Contains(".data") || u.Contains(".upgrade")) { RunMysqlProcedure(s).Wait(); } else { RunMysqlFile(s).Wait(); } } SetStepCompleted(); } }
public override void RunJob() { Logger.Debug("begin restore portal"); Logger.Debug("begin restore data"); using (var dataReader = new ZipReadOperator(BackupFilePath)) { using (var entry = dataReader.GetEntry(KeyHelper.GetDumpKey())) { Dump = entry != null && CoreContext.Configuration.Standalone; } var dbFactory = new DbFactory(ConfigPath); if (Dump) { RestoreFromDump(dataReader); } else { var modulesToProcess = GetModulesToProcess().ToList(); SetStepsCount(ProcessStorage ? modulesToProcess.Count + 1 : modulesToProcess.Count); foreach (var module in modulesToProcess) { var restoreTask = new RestoreDbModuleTask(Logger, module, dataReader, _columnMapper, dbFactory, ReplaceDate, Dump); restoreTask.ProgressChanged += (sender, args) => SetCurrentStepProgress(args.Progress); foreach (var tableName in IgnoredTables) { restoreTask.IgnoreTable(tableName); } restoreTask.RunJob(); } } Logger.Debug("end restore data"); if (ProcessStorage) { if (CoreContext.Configuration.Standalone) { Logger.Debug("clear cache"); AscCache.ClearCache(); } DoRestoreStorage(dataReader); } if (UnblockPortalAfterCompleted) { SetTenantActive(dbFactory, _columnMapper.GetTenantMapping()); } } if (CoreContext.Configuration.Standalone) { Logger.Debug("refresh license"); try { LicenseReader.RejectLicense(); } catch (Exception ex) { Logger.Error(ex); } Logger.Debug("clear cache"); AscCache.ClearCache(); } Logger.Debug("end restore portal"); }
private void RestoreFromDump(IDataReadOperator dataReader, DbFactory dbFactory) { var keyBase = KeyHelper.GetDatabaseSchema(); var keys = dataReader.Entries.Where(r => r.StartsWith(keyBase)).ToList(); var upgrades = new List <string>(); if (!string.IsNullOrEmpty(UpgradesPath) && Directory.Exists(UpgradesPath)) { upgrades = Directory.GetFiles(UpgradesPath).ToList(); } var stepscount = keys.Count * 2 + upgrades.Count; SetStepsCount(ProcessStorage ? stepscount + 1 : stepscount); for (var i = 0; i < keys.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < keys.Count; j++) { var key1 = keys[i + j]; tasks.Add(RestoreFromDumpFile(dataReader, key1).ContinueWith(r => RestoreFromDumpFile(dataReader, KeyHelper.GetDatabaseData(key1.Substring(keyBase.Length + 1))))); } Task.WaitAll(tasks.ToArray()); } var comparer = new SqlComparer(); foreach (var u in upgrades.OrderBy(Path.GetFileName, comparer)) { RunMysqlFile(dbFactory, u, true); SetStepCompleted(); } }
private void RestoreTable(IDbConnection connection, TableInfo tableInfo, ref int transactionsCommited, ref int rowsInserted) { using (var stream = _reader.GetEntry(KeyHelper.GetTableZipKey(_module, tableInfo.Name))) { var lowImportanceRelations = _module .TableRelations .Where(r => string.Equals(r.ParentTable, tableInfo.Name, StringComparison.InvariantCultureIgnoreCase)) .Where(r => r.Importance == RelationImportance.Low && !r.IsSelfRelation()) .Select(r => Tuple.Create(r, _module.Tables.Single(t => t.Name == r.ChildTable))) .ToList(); foreach (IEnumerable <DataRowInfo> rows in GetRows(tableInfo, stream).Skip(transactionsCommited * TransactionLength).MakeParts(TransactionLength)) { using (var transaction = connection.BeginTransaction()) { int rowsSuccess = 0; foreach (DataRowInfo row in rows) { if (_replaceDate) { foreach (var column in tableInfo.DateColumns) { _columnMapper.SetDateMapping(tableInfo.Name, column, row[column.Key]); } } object oldIdValue = null; object newIdValue = null; if (tableInfo.HasIdColumn()) { oldIdValue = row[tableInfo.IdColumn]; newIdValue = _columnMapper.GetMapping(tableInfo.Name, tableInfo.IdColumn, oldIdValue); if (newIdValue == null) { if (tableInfo.IdType == IdType.Guid) { newIdValue = Guid.NewGuid().ToString("D"); } else if (tableInfo.IdType == IdType.Integer) { newIdValue = connection .CreateCommand(string.Format("select max({0}) from {1};", tableInfo.IdColumn, tableInfo.Name)) .WithTimeout(120) .ExecuteScalar <int>() + 1; } } if (newIdValue != null) { _columnMapper.SetMapping(tableInfo.Name, tableInfo.IdColumn, oldIdValue, newIdValue); } } var insertCommand = _module.CreateInsertCommand(connection, _columnMapper, tableInfo, row); if (insertCommand == null) { WarnCantInsertRow(row); _columnMapper.Rollback(); continue; } insertCommand.WithTimeout(120).ExecuteNonQuery(); rowsSuccess++; if (tableInfo.HasIdColumn() && tableInfo.IdType == IdType.Autoincrement) { var lastIdCommand = _factory.CreateLastInsertIdCommand(_module.ConnectionStringName); lastIdCommand.Connection = connection; newIdValue = Convert.ToInt32(lastIdCommand.ExecuteScalar()); _columnMapper.SetMapping(tableInfo.Name, tableInfo.IdColumn, oldIdValue, newIdValue); } _columnMapper.Commit(); foreach (Tuple <RelationInfo, TableInfo> relation in lowImportanceRelations) { if (!relation.Item2.HasTenantColumn()) { InvokeWarning("Table {0} does not contain tenant id column. Can't apply low importance relations on such tables.", relation.Item2.Name); continue; } object oldValue = row[relation.Item1.ParentColumn]; object newValue = _columnMapper.GetMapping(relation.Item1.ParentTable, relation.Item1.ParentColumn, oldValue); connection.CreateCommand(string.Format("update {0} set {1} = {2} where {1} = {3} and {4} = {5}", relation.Item1.ChildTable, relation.Item1.ChildColumn, newValue is string? "'" + newValue + "'" : newValue, oldValue is string? "'" + oldValue + "'" : oldValue, relation.Item2.TenantColumn, _columnMapper.GetTenantMapping())).WithTimeout(120).ExecuteNonQuery(); } } transaction.Commit(); transactionsCommited++; rowsInserted += rowsSuccess; } } } }
private void DoBackupStorage(IDataWriteOperator writer, DbFactory dbFactory) { Logger.Debug("begin backup storage"); var files = GetFilesToProcess(); var exclude = new List <string>(); using (var db = dbFactory.OpenConnection()) using (var command = db.CreateCommand()) { command.CommandText = "select storage_path from backup_backup where tenant_id = " + TenantId + " and storage_type = 0 and storage_path is not null"; using (var reader = command.ExecuteReader()) { while (reader.Read()) { exclude.Add(reader.GetString(0)); } } } files = files.Where(f => !exclude.Any(e => f.Path.Contains(string.Format("/file_{0}/", e)))); var fileGroups = files.GroupBy(file => file.Module).ToList(); var groupsProcessed = 0; foreach (var group in fileGroups) { var storage = StorageFactory.GetStorage(ConfigPath, TenantId.ToString(), group.Key); foreach (var file in group) { ActionInvoker.Try(state => { var f = (BackupFileInfo)state; using (var fileStream = storage.GetReadStream(f.Domain, f.Path)) { var tmp = Path.GetTempFileName(); try { using (var tmpFile = File.OpenWrite(tmp)) { fileStream.CopyTo(tmpFile); } writer.WriteEntry(KeyHelper.GetFileZipKey(file), tmp); } finally { if (File.Exists(tmp)) { File.Delete(tmp); } } } }, file, 5, error => Logger.Warn("can't backup file ({0}:{1}): {2}", file.Module, file.Path, error)); } SetCurrentStepProgress((int)(++groupsProcessed * 100 / (double)fileGroups.Count)); } if (fileGroups.Count == 0) { SetStepCompleted(); } var restoreInfoXml = new XElement( "storage_restore", fileGroups .SelectMany(group => group.Select(file => (object)file.ToXElement())) .ToArray()); var tmpPath = Path.GetTempFileName(); using (var tmpFile = File.OpenWrite(tmpPath)) { restoreInfoXml.WriteTo(tmpFile); } writer.WriteEntry(KeyHelper.GetStorageRestoreInfoZipKey(), tmpPath); File.Delete(tmpPath); Logger.Debug("end backup storage"); }
private void DoRestoreStorage(IDataReadOperator dataReader) { Logger.Debug("begin restore storage"); var fileGroups = GetFilesToProcess(dataReader).GroupBy(file => file.Module).ToList(); var groupsProcessed = 0; foreach (var group in fileGroups) { var storage = StorageFactory.GetStorage(ConfigPath, _columnMapper.GetTenantMapping().ToString(), group.Key); var quotaController = storage.QuotaController; try { storage.SetQuotaController(null); foreach (var file in group) { var adjustedPath = file.Path; var module = ModuleProvider.GetByStorageModule(file.Module, file.Domain); if (module == null || module.TryAdjustFilePath(_columnMapper, ref adjustedPath)) { using (var stream = dataReader.GetEntry(KeyHelper.GetFileZipKey(file))) { try { storage.Save(file.Domain, adjustedPath, module != null ? module.PrepareData(KeyHelper.GetFileZipKey(file), stream, _columnMapper) : stream); } catch (Exception error) { Logger.Warn("can't restore file ({0}:{1}): {2}", file.Module, file.Path, error); } } } } } finally { if (quotaController != null) { storage.SetQuotaController(quotaController); } } SetCurrentStepProgress((int)(++groupsProcessed * 100 / (double)fileGroups.Count)); } if (fileGroups.Count == 0) { SetStepCompleted(); } Logger.Debug("end restore storage"); }
private void RestoreFromDump(IDataReadOperator dataReader) { var keyBase = KeyHelper.GetDatabaseSchema(); var keys = dataReader.GetEntries(keyBase).Select(r => Path.GetFileName(r)).ToList(); var dbs = dataReader.GetDirectories("").Where(r => Path.GetFileName(r).StartsWith("mailservice")).Select(r => Path.GetFileName(r)).ToList(); var upgrades = new List <string>(); var upgradesPath = Path.GetFullPath(Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), UpgradesPath)); if (!string.IsNullOrEmpty(upgradesPath) && Directory.Exists(upgradesPath)) { upgrades = Directory.GetFiles(upgradesPath).ToList(); } var stepscount = keys.Count * 2 + upgrades.Count; Dictionary <string, List <string> > databases = new Dictionary <string, List <string> >(); foreach (var db in dbs) { var keys1 = dataReader.GetEntries(db + "/" + keyBase).Select(k => Path.GetFileName(k)).ToList(); stepscount += keys1.Count() * 2; databases.Add(db, keys1); } if (ProcessStorage) { var storageModules = StorageFactory.GetModuleList(ConfigPath).Where(IsStorageModuleAllowed); var tenants = CoreContext.TenantManager.GetTenants(false); stepscount += storageModules.Count() * tenants.Count; SetStepsCount(stepscount + 1); DoDeleteStorage(storageModules, tenants); } else { SetStepsCount(stepscount); } for (var i = 0; i < keys.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < keys.Count; j++) { var key1 = Path.Combine(KeyHelper.GetDatabaseSchema(), keys[i + j]); var key2 = Path.Combine(KeyHelper.GetDatabaseData(), keys[i + j]); tasks.Add(RestoreFromDumpFile(dataReader, key1, key2)); } Task.WaitAll(tasks.ToArray()); } using (var dbManager = DbManager.FromHttpContext("default", 100000)) { dbManager.ExecuteList("select id, connection_string from mail_server_server").ForEach(r => { RegisterDatabase((int)r[0], JsonConvert.DeserializeObject <Dictionary <string, object> >(Convert.ToString(r[1]))["DbConnection"].ToString()); }); } foreach (var database in databases) { for (var i = 0; i < database.Value.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < database.Value.Count; j++) { var key1 = Path.Combine(database.Key, KeyHelper.GetDatabaseSchema(), database.Value[i + j]); var key2 = Path.Combine(database.Key, KeyHelper.GetDatabaseData(), database.Value[i + j]); tasks.Add(RestoreFromDumpFile(dataReader, key1, key2, database.Key)); } Task.WaitAll(tasks.ToArray()); } } var comparer = new SqlComparer(); foreach (var u in upgrades.OrderBy(Path.GetFileName, comparer)) { using (var s = File.OpenRead(u)) { if (u.Contains(".upgrade.")) { RunMysqlFile(s, null).Wait(); } else if (u.Contains(".data") || u.Contains(".upgrade")) { RunMysqlProcedure(s).Wait(); } else { RunMysqlFile(s, "default").Wait(); } } SetStepCompleted(); } }
private void DoDump(IDataWriteOperator writer, string dbName, List <string> tables) { var excluded = ModuleProvider.AllModules.Where(r => IgnoredModules.Contains(r.ModuleName)).SelectMany(r => r.Tables).Select(r => r.Name).ToList(); excluded.AddRange(IgnoredTables); excluded.Add("res_"); var dir = Path.GetDirectoryName(BackupFilePath); var subDir = Path.Combine(dir, Path.GetFileNameWithoutExtension(BackupFilePath)); var schemeDir = ""; var dataDir = ""; if (dbName == "default") { schemeDir = Path.Combine(subDir, KeyHelper.GetDatabaseSchema()); dataDir = Path.Combine(subDir, KeyHelper.GetDatabaseData()); } else { schemeDir = Path.Combine(subDir, dbName, KeyHelper.GetDatabaseSchema()); dataDir = Path.Combine(subDir, dbName, KeyHelper.GetDatabaseData()); } if (!Directory.Exists(schemeDir)) { Directory.CreateDirectory(schemeDir); } if (!Directory.Exists(dataDir)) { Directory.CreateDirectory(dataDir); } var dict = new Dictionary <string, int>(); foreach (var table in tables) { dict.Add(table, SelectCount(table, dbName)); } tables.Sort((pair1, pair2) => dict[pair1].CompareTo(dict[pair2])); for (var i = 0; i < tables.Count; i += TasksLimit) { var tasks = new List <Task>(TasksLimit * 2); for (var j = 0; j < TasksLimit && i + j < tables.Count; j++) { var t = tables[i + j]; tasks.Add(Task.Run(() => DumpTableScheme(t, schemeDir, dbName))); if (!excluded.Any(t.StartsWith)) { tasks.Add(Task.Run(() => DumpTableData(t, dataDir, dict[t], dbName, writer))); } else { SetStepCompleted(2); } } Task.WaitAll(tasks.ToArray()); ArchiveDir(writer, subDir); } }
private void DoDump(IDataWriteOperator writer) { Dictionary <string, List <string> > databases = new Dictionary <string, List <string> >(); using (var dbManager = DbManager.FromHttpContext("default", 100000)) { dbManager.ExecuteList("select id, connection_string from mail_server_server").ForEach((r => { var dbName = GetDbName((int)r[0], JsonConvert.DeserializeObject <Dictionary <string, object> >(Convert.ToString(r[1]))["DbConnection"].ToString()); using (var dbManager1 = DbManager.FromHttpContext(dbName, 100000)) { var tables = dbManager1.ExecuteList("show tables;").Select(res => Convert.ToString(res[0])).ToList(); databases.Add(dbName, tables); } })); } using (var dbManager = DbManager.FromHttpContext("default", 100000)) { var tables = dbManager.ExecuteList("show tables;").Select(res => Convert.ToString(res[0])).ToList(); databases.Add("default", tables); } using (var stream = new MemoryStream(Encoding.UTF8.GetBytes(true.ToString()))) { writer.WriteEntry(KeyHelper.GetDumpKey(), stream); } var files = new List <BackupFileInfo>(); var stepscount = 0; foreach (var db in databases) { stepscount += db.Value.Count * 4;// (schema + data) * (dump + zip) } if (ProcessStorage) { var tenants = CoreContext.TenantManager.GetTenants(false).Select(r => r.TenantId); foreach (var t in tenants) { files.AddRange(GetFiles(t)); } stepscount += files.Count * 2 + 1; Logger.Debug("files:" + files.Count); } SetStepsCount(stepscount); foreach (var db in databases) { DoDump(writer, db.Key, db.Value); } var dir = Path.GetDirectoryName(BackupFilePath); var subDir = Path.Combine(dir, Path.GetFileNameWithoutExtension(BackupFilePath)); Logger.DebugFormat("dir remove start {0}", subDir); Directory.Delete(subDir, true); Logger.DebugFormat("dir remove end {0}", subDir); if (ProcessStorage) { DoDumpStorage(writer, files); } }