protected override void ProcessRecord() { RecoveryOperationStatus status = StartRecoveryJob(SourceDatabaseName, DestinationDatabaseName, Type, Path); PrintInfo(status); PrintJobState(status.JobIdentifier); }
protected void PrintInfo(RecoveryOperationStatus opStatus) { //Console.WriteLine("____________________________________"); WriteVerbose("ID: " + opStatus.JobIdentifier); WriteVerbose("Status: " + opStatus.Status); //Console.WriteLine("Message: " + opStatus.Message); //Console.WriteLine("____________________________________"); }
public override IEnumerable <List <JSONDocument> > Read(EXIMDataType dataType, string path) { List <JSONDocument> items = new List <JSONDocument>(); RecoveryOperationStatus state = base.ValidatePath(path, RecoveryJobType.Import); if (state.Status == RecoveryStatus.Success) { if (ValidateExtension(path)) { using (Stream stream = new FileStream(path, FileMode.Open, FileAccess.Read)) using (StreamReader reader = new StreamReader(stream)) using (JsonReader jsonReader = new JsonTextReader(reader)) { Newtonsoft.Json.JsonSerializer serializer = new Newtonsoft.Json.JsonSerializer(); var itemList = serializer.Deserialize <JSONDocument[]>(jsonReader); if (itemList != null) { foreach (var item in itemList) { if (items.Count <= base.ChunkSize) { items.Add(item); } else { items.Add(item); yield return(items); items.Clear(); } } // if data is less than chunk size if (items.Count > 0) { yield return(items); } } stream.Close(); } } else { throw new ArgumentException("Invalid file extension"); } } else { throw new ArgumentException("Invalid file path provided"); } }
protected override void ProcessRecord() { Database db = Client.NosDB.InitializeDatabase(ProviderUtil.GetConnectionString(_databaseName)); Collection <JSONDocument> collection = db.GetCollection(_collectionName); RecoveryOperationStatus status = collection.Export(_databaseName, _collectionName, Query, _queryParam, Path, FileName, _dataType); if (status.Status == RecoveryStatus.Success) { WriteObject("Exported successfully"); } else { WriteObject("failed to export data " + status.Message); } db.Dispose(); }
public override RecoveryOperationStatus Write(EXIMDataType dataType, string path, string collection, string fileName, string database, List <IJSONDocument> docList) { RecoveryOperationStatus state = base.ValidatePath(path, RecoveryJobType.Export); if (state.Status == RecoveryStatus.Success) { try { string file = string.Empty; if (!string.IsNullOrEmpty(fileName)) { file = Path.Combine(path, fileName + _fileExtension); } else { string defaultName = database + "_" + collection; file = Path.Combine(path, defaultName + _fileExtension); } using (StreamWriter sw = new StreamWriter(file)) using (JsonWriter jw = new JsonTextWriter(sw)) { jw.Formatting = Formatting.Indented; Newtonsoft.Json.JsonSerializer serializer = new Newtonsoft.Json.JsonSerializer(); serializer.Serialize(jw, docList); jw.Close(); } } catch (Exception exp) { if (LoggerManager.Instance.EXIMLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.EXIMLogger.Error("JSONEXIMUtil.Export()", exp.ToString()); } state.Status = RecoveryStatus.Failure; state.Message = exp.ToString(); } return(state); } else { return(state); } }
private RecoveryOperationStatus StartRecoveryJob(RecoveryOperation opContext) { RecoveryOperationStatus status = new RecoveryOperationStatus(RecoveryStatus.Executing); status.JobIdentifier = opContext.JobIdentifer; status.Message = "Recovery Job has successfully started"; if (opContext != null) { try { if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { // initialize only data job will be executed,oplog job will start only after datajob has been executed completely RecoveryJobBase job = _databaseJobMap[opContext.JobIdentifer].JobList.Where(x => (x.JobType == RecoveryJobType.DataBackup) || (x.JobType == RecoveryJobType.DataRestore)).First(); job.Start(); if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.StartRecoveryJob()", opContext.JobIdentifer + "_" + _databaseJobMap[opContext.JobIdentifer].JobType + " Started"); } _databaseJobMap[opContext.JobIdentifer].RecoveryPersistenceManager.IsJobActive = true; } } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.StartRecoveryJob()", opContext.JobIdentifer + "_" + _databaseJobMap[opContext.JobIdentifer].ExecutionState.JobType + " : " + exp.ToString()); } if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { _databaseJobMap[opContext.JobIdentifer].ExecutionState.Status = RecoveryStatus.Failure; _databaseJobMap[opContext.JobIdentifer].ExecutionState.Message = "Failure during Starting phase"; CheckJobState(opContext.JobIdentifer, true); } } } return(status); }
private RecoveryOperationStatus CancelAllJobs() { RecoveryOperationStatus status = new RecoveryOperationStatus(RecoveryStatus.Success); foreach (RecoveryJobBase job in _databaseJobMap.Values.SelectMany(x => x.JobList)) { try { status.JobIdentifier = job.JobIdentifier; try { job.Stop(); } catch (ThreadAbortException) { Thread.ResetAbort(); } switch (job.JobType) { case RecoveryJobType.DataBackup: case RecoveryJobType.DataRestore: _databaseJobMap[job.JobIdentifier].RecoveryPersistenceManager.CloseBackupFile(job.Database, RecoveryFileState.Cancelled); break; } _databaseJobMap[job.JobIdentifier].RecoveryPersistenceManager.IsJobActive = false; } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.CancelAllRecoveryJob()", job.JobIdentifier + " : " + exp.ToString()); } if (_databaseJobMap.ContainsKey(job.JobIdentifier)) { _databaseJobMap[job.JobIdentifier].ExecutionState.Status = RecoveryStatus.Failure; _databaseJobMap[job.JobIdentifier].ExecutionState.Message = "Failure during Cancel phase"; CheckJobState(job.JobIdentifier, true); } } } return(status); }
/// <summary> /// Exports data against a specified collection, incase no query is specified entire collection data will be dumped to file /// </summary> /// <param name="database"></param> /// <param name="collection"></param> /// <param name="query"></param> /// <param name="path"></param> /// <returns></returns> public RecoveryOperationStatus Export(string database, string collection, string query,ICollection<IParameter> parameters, string path,string filename, EXIMDataType dataType) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Failure); try { //create query if not provided if (!string.IsNullOrEmpty(collection)) { if (string.IsNullOrEmpty(query)) { if (!string.IsNullOrEmpty(collection)) { query = "Select * from $" + collection + "$"; parameters = new List<IParameter>(); } } // query data from the collection ICollectionReader reader = ExecuteReader(query, parameters); List<IJSONDocument> exportList = new List<IJSONDocument>(); while (reader.ReadNext()) { exportList.Add(reader.GetObject<IJSONDocument>()); } //write json documents to the file state = Export(collection, exportList, path,filename,database, dataType); } else { throw new ArgumentException("Invalid Collection provided"); } } catch(Exception exp) { if (LoggerManager.Instance.EXIMLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) LoggerManager.Instance.EXIMLogger.Error("Export()",exp.ToString()); state.Message = exp.ToString(); } return state; }
/// <summary> /// Exports IJSONDocuments to export file. /// </summary> /// <param name="collection"></param> /// <param name="docList"></param> /// <param name="path"></param> /// <param name="dataType"></param> /// <returns></returns> public RecoveryOperationStatus Export(string collection, List<IJSONDocument> docList, string path,string fileName,string database, EXIMDataType dataType) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Failure); EXIMBase eximBase = null; switch (dataType) { case EXIMDataType.CSV: eximBase = new CSVEXIMUtil(); break; case EXIMDataType.JSON: eximBase = new JSONEXIMUtil(); break; } if (eximBase != null) { state = eximBase.Write(dataType, path, collection,fileName,database, docList); } return state; }
private RecoveryOperationStatus ResumeRecoveryJob(RecoveryOperation opContext) { RecoveryOperationStatus status = new RecoveryOperationStatus(RecoveryStatus.Failure); status.JobIdentifier = opContext.JobIdentifer; if (opContext != null) { if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { List <RecoveryJobBase> _jobList = _databaseJobMap[opContext.JobIdentifer].JobList; if (_jobList.Count == 1) { _jobList[0].Resume(); } else { // M_TODO: complete sharded backup } } } return(status); }
RecoveryOperationStatus IRecoveryManager.CancelRecoveryJob(string identifier) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Failure); state.Message = "Failure during cancellation"; // change this default message try { RecoveryConfiguration config = new RecoveryConfiguration(); config.Identifier = identifier; state.JobIdentifier = identifier; if (_runningClusteredJobMap.ContainsKey(identifier)) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("RecoveryManager.CancelRecoveryJob()", "Explicit canceling initiated"); } IClusteredRecoveryJob _job = ((IClusteredRecoveryJob)_runningClusteredJobMap[identifier]); state = _job.Cancel(config, explicitCancel: true); // remove job from active config RemoveRunningJob(identifier); } else { state.Message = "Invalid identifier provided"; } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.Cancel()", ex.ToString()); } state.Message = ex.Message; } return(state); }
private RecoveryOperationStatus GetJobStatistics(RecoveryOperation opContext) { RecoveryOperationStatus status = new RecoveryOperationStatus(RecoveryStatus.Success); status.JobIdentifier = opContext.JobIdentifer; if (opContext != null) { try { if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { foreach (RecoveryJobBase job in _databaseJobMap[opContext.JobIdentifer].JobList) { // query latest job status _databaseJobMap[opContext.JobIdentifer].ExecutionState.UpdateEntityState((RecoveryJobStateBase)job.JobStatistics());// job statistics returning mechanism } } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.GetStatistics()", opContext.JobIdentifer + "_" + _databaseJobMap[opContext.JobIdentifer].ExecutionState.JobType + " : " + ex.ToString()); } if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { _databaseJobMap[opContext.JobIdentifer].ExecutionState.Status = RecoveryStatus.Failure; _databaseJobMap[opContext.JobIdentifer].ExecutionState.Message = "Failure during get statistics call"; } } finally { CheckJobState(opContext.JobIdentifer, true); } } return(status); }
RecoveryOperationStatus[] IRecoveryManager.CancelAllRecoveryJobs() { RecoveryOperationStatus[] state = new RecoveryOperationStatus[_runningClusteredJobMap.Count];// new RecoveryOperationStatus(RecoveryStatusType.failure); //state.Message = "Failure during cancellation"; // change this default message try { foreach (string _key in _runningClusteredJobMap.Keys) { IClusteredRecoveryJob _job = ((IClusteredRecoveryJob)_runningClusteredJobMap[_key]); //state = _job.Cancel(_key); } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.CancelAll()", ex.ToString()); } // state.Message = ex.Message; } return(state); }
internal RecoveryOperationStatus RecoveryOperationReceived(RecoveryOperation opContext) { RecoveryOperationStatus status = new RecoveryOperationStatus(RecoveryStatus.Failure); status.JobIdentifier = opContext.JobIdentifer; try { switch (opContext.OpCode) { case RecoveryOpCodes.SubmitBackupJob: case RecoveryOpCodes.SubmitRestoreJob: if (ValidatePrequisites(opContext).Status == RecoveryStatus.Success) { status.Status = RecoveryStatus.Submitted; status.Message = "Submitted successfuly"; SubmitRecoveryJob(opContext); } else { status.Message = "Failed to submit task on node"; } break; case RecoveryOpCodes.StartJob: status = StartRecoveryJob(opContext); break; case RecoveryOpCodes.EndJob: case RecoveryOpCodes.CancelJob: status = CancelRecoveryJob(opContext); break; case RecoveryOpCodes.PauseJob: status = PauseRecoveryJob(opContext); break; case RecoveryOpCodes.ResumeJob: status = ResumeRecoveryJob(opContext); break; case RecoveryOpCodes.CancelAllJobs: status = CancelAllJobs(); break; case RecoveryOpCodes.SubmitShardBackupJob: case RecoveryOpCodes.SubmitShardRecoveryJob: if (ValidatePrequisites(opContext).Status == RecoveryStatus.Success) { status.Status = RecoveryStatus.Submitted; status.Message = "Submitted successfuly"; SubmitShardRecoveryJob(opContext); } else { status.Message = "Failed to submit task on node"; } break; case RecoveryOpCodes.GetJobStatistics: status = GetJobStatistics(opContext); break; } } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.Receive()", exp.ToString()); } } return(status); }
public RecoveryOperationStatus Import(string database, string collection, string path, bool updateMode, EXIMDataType dataType) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Success); EXIMBase eximBase = null; switch (dataType) { case EXIMDataType.CSV: eximBase = new CSVEXIMUtil(); break; case EXIMDataType.JSON: eximBase = new JSONEXIMUtil(); break; } if (eximBase != null) { try { foreach (List<JSONDocument> docList in eximBase.Read(dataType, path)) { if (docList != null) { if (docList.Count > 0) { // create insert operation if mode is update perform replace operation on List<FailedDocument> failedDoc = InsertDocuments(docList); if (failedDoc != null) { if (failedDoc.Count > 0) { if (updateMode) { List<JSONDocument> retryList = new List<JSONDocument>(); foreach (FailedDocument failed in failedDoc) { foreach (JSONDocument orgDoc in docList) { if (orgDoc.Key.Equals(failed.DocumentKey)) { retryList.Add(orgDoc); } } } failedDoc = ReplaceDocuments(retryList); if (failedDoc != null) { if (failedDoc.Count > 0) { if (LoggerManager.Instance.EXIMLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) LoggerManager.Instance.EXIMLogger.Error("Import()", failedDoc.Count + "failed to import in collection" + collection); state.Status = RecoveryStatus.Failure; state.Message = failedDoc.Count + "failed to import in collection" + collection; } } } else { if (LoggerManager.Instance.EXIMLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) LoggerManager.Instance.EXIMLogger.Error("Import()", failedDoc.Count + "failed to import in collection" + collection); state.Status = RecoveryStatus.Failure; state.Message = failedDoc.Count + "failed to import in collection" + collection; } } } } } } } catch (Exception exp) { if (LoggerManager.Instance.EXIMLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) LoggerManager.Instance.EXIMLogger.Error("Import()", exp.ToString()); state.Status = RecoveryStatus.Failure; state.Message = exp.Message.ToString() +" For details kindly review the log file"; } } return state; }
private RecoveryOperationStatus ValidatePrequisites(RecoveryOperation opContext) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Success); state.JobIdentifier = opContext.JobIdentifer; try { // must check state transfer //if (_context.StatusLatch.IsAnyBitsSet(BucketStatus.UnderStateTxfr)) //{ // state.Status = RecoveryStatus.Success;//M_TODO: change this to false, once this state transfer code is complete // if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) // LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.Validate()", opContext.JobIdentifer + " failed because node is in state transfer"); //} if (state.Status == RecoveryStatus.Success) { string path = string.Empty; string username = ""; string password = ""; Dictionary <string, Dictionary <string, string[]> > dbMap = new Dictionary <string, Dictionary <string, string[]> >(); switch (opContext.OpCode) { case RecoveryOpCodes.SubmitBackupJob: SubmitBackupOpParams _bckpParam = (SubmitBackupOpParams)opContext.Parameter; path = _bckpParam.PersistenceConfiguration.FilePath; dbMap = _bckpParam.PersistenceConfiguration.DbCollectionMap; username = _bckpParam.PersistenceConfiguration.UserName; password = _bckpParam.PersistenceConfiguration.Password; break; case RecoveryOpCodes.SubmitRestoreJob: SubmitRestoreOpParams _resParam = (SubmitRestoreOpParams)opContext.Parameter; path = Path.Combine(_resParam.PersistenceConfiguration.FilePath); username = _resParam.PersistenceConfiguration.UserName; password = _resParam.PersistenceConfiguration.Password; dbMap = _resParam.PersistenceConfiguration.DbCollectionMap; List <string> shardNameList = new List <string>(); shardNameList.Add(_context.LocalShardName); Impersonation impersonation = null; bool isSharedPath = RecoveryFolderStructure.PathIsNetworkPath(path); if (isSharedPath) { impersonation = new Impersonation(username, password); } state = RecoveryFolderStructure.ValidateFolderStructure(path, RecoveryJobType.DataRestore, false, shardNameList); if (isSharedPath) { impersonation.Dispose(); } if (state.Status == RecoveryStatus.Failure) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.Validate()", state.Message); } return(state); } else { #region validate files string folderName = string.Empty; string fileName = dbMap.First().Key; string folderPath = Path.Combine(path, folderName); string filePath = Path.Combine(folderPath, fileName); isSharedPath = RecoveryFolderStructure.PathIsNetworkPath(folderPath); bool fileExists = false; if (isSharedPath) { BackupFile file = new BackupFile(fileName, folderPath, null, null); impersonation = new Impersonation(username, password); fileExists = File.Exists(filePath); impersonation.Dispose(); } else { fileExists = File.Exists(filePath); } if (fileExists) { BackupFile file = new BackupFile(fileName, folderPath, username, password); Alachisoft.NosDB.Core.Recovery.Persistence.BackupFile.Header header = file.ReadFileHeader(); if (!header.Database.ToLower().Equals(dbMap.First().Key)) { state.Status = RecoveryStatus.Failure; state.Message = "Provided file does not contain the source database " + fileName; file.Close(); return(state); } file.Close(); } else { state.Status = RecoveryStatus.Failure; state.Message = "No file exists in the given folder"; return(state); } #endregion } break; } // this will only be false for backup if (!Directory.Exists(path)) { try { Impersonation impersonation = null; if (RecoveryFolderStructure.PathIsNetworkPath(path)) { impersonation = new Impersonation(username, password); } Directory.CreateDirectory(path); if (dbMap.Count > 0) { // check space for backup job long size = 0; foreach (string db in dbMap.Keys) { DatabaseStore database = _context.DatabasesManager.GetDatabase(db) as DatabaseStore; size = database.Size; } ulong availableFreeSpace = Util.DirectoryUtil.GetDiskFreeSpace(path); ulong requiredSize = (ulong)size; if (availableFreeSpace > requiredSize) { state.Status = RecoveryStatus.Success; } else { state.Status = RecoveryStatus.Failure; state.Message = "Available memory is less than the required memory for backup"; return(state); } } if (impersonation != null) { impersonation.Dispose(); } } catch (Exception ex) { state.Status = RecoveryStatus.Failure; state.Message = ex.Message; if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.Validate()", ex.ToString()); } } } } } catch (Exception exp) { state.Status = RecoveryStatus.Failure; state.Message = exp.ToString(); if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.Validate()", opContext.JobIdentifer + " : " + exp.ToString()); } } return(state); }
public override RecoveryOperationStatus Write(EXIMDataType dataType, string path, string collection, string fileName, string database, List <Server.Engine.IJSONDocument> docList) { RecoveryOperationStatus state = base.ValidatePath(path, RecoveryJobType.Export); if (state.Status == RecoveryStatus.Success) { try { bool first = true; IDictionary <int, string> headerPositionMap = null; string file = string.Empty; if (!string.IsNullOrEmpty(fileName)) { file = Path.Combine(path, fileName + _fileExtension); } else { string defaultName = database + "_" + collection; file = Path.Combine(path, defaultName + _fileExtension); } using (StreamWriter writer = new StreamWriter(file)) { if (docList.Count > 0) { foreach (JSONDocument document in docList) { if (first) { WriteHeaderRow(document, out headerPositionMap, writer); first = false; } else { IDictionary <string, object> valueMap = GetCsvReadyValues(document); string _delim = ","; string vString = string.Empty; int pos = 0; foreach (int hPos in headerPositionMap.Keys) { if (pos != 0) { vString += _delim; } if (valueMap.ContainsKey(headerPositionMap[hPos])) { vString += valueMap[headerPositionMap[hPos]].ToString(); } else { vString += _delim; } pos++; } writer.WriteLine(vString); } } writer.Close(); } } } catch (Exception exp) { if (LoggerManager.Instance.EXIMLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.EXIMLogger.Error("CSVEXIMUtil.Export()", exp.ToString()); } state.Status = RecoveryStatus.Failure; state.Message = exp.ToString(); } return(state); } else { return(state); } }
public override IEnumerable <List <JSONDocument> > Read(EXIMDataType dataType, string path) { List <JSONDocument> items = new List <JSONDocument>(); RecoveryOperationStatus state = base.ValidatePath(path, RecoveryJobType.Import); if (state.Status == RecoveryStatus.Success) { if (ValidateExtension(path)) { using (Stream stream = new FileStream(path, FileMode.Open, FileAccess.Read)) using (StreamReader reader = new StreamReader(stream)) { // if file is not empty if (reader.Peek() > 0) { position = 0; IDictionary <int, string> header = ReadHeaderRow(reader); IDictionary <int, object> value = null; while (!reader.EndOfStream) { value = ReadValueRow(reader); if (header.Count != value.Count) { throw new InvalidDataException("Invalid data provided the number of columns is not equal to header row"); } JSONDocument doc = CreateJSONDocument(header, value); if (doc != null) { if (items.Count <= base.ChunkSize) { items.Add(doc); } else { items.Add(doc); yield return(items); items.Clear(); } } } if (items.Count > 0) { yield return(items); } } else { yield return(items); } } } else { throw new ArgumentException("Invalid file extension"); } } else { throw new ArgumentException("Invalid file path provided"); } }
private RecoveryOperationStatus CancelRecoveryJob(RecoveryOperation opContext) { RecoveryOperationStatus status = new RecoveryOperationStatus(RecoveryStatus.Success); status.JobIdentifier = opContext.JobIdentifer; if (opContext != null) { try { #region End/Cancel job if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { List <RecoveryJobBase> jobList = _databaseJobMap[opContext.JobIdentifer].JobList; foreach (RecoveryJobBase job in jobList) { if (opContext.OpCode == RecoveryOpCodes.CancelJob) { switch (job.JobType) { case RecoveryJobType.DataBackup: _databaseJobMap[opContext.JobIdentifer].RecoveryPersistenceManager.CloseBackupFile(job.Database, RecoveryFileState.Cancelled); break; //NOTE: diffbackup job is ignored under the assumption oplog file will close the file for it } } //if (job.JobType != RecoveryJobType.DifferentialRestore) { try { if (job.State != ThreadState.Unstarted && job.State != ThreadState.Stopped && job.State != ThreadState.WaitSleepJoin) { job.Stop(); } } catch (ThreadAbortException) { Thread.ResetAbort(); } catch (Exception exp) { } try { job.Dispose(); } catch (ThreadAbortException) { Thread.ResetAbort(); } catch (Exception exp) { } } //else //{ // _databaseJobMap[opContext.JobIdentifer].RecoveryPersistenceManager.IsJobActive = false; // _databaseJobMap[opContext.JobIdentifer].RecoveryPersistenceManager.Dispose(); //} } #region rename folder JobInfoObject infoObj = _databaseJobMap[opContext.JobIdentifer]; if (infoObj.JobType == RecoveryJobType.DataBackup) { switch (opContext.OpCode) { case RecoveryOpCodes.CancelJob: infoObj.RenameRootFolder(RecoveryFileState.Cancelled); break; } } #endregion // removing job from databaseJobMap _databaseJobMap[opContext.JobIdentifer].RecoveryPersistenceManager.IsJobActive = false; _databaseJobMap[opContext.JobIdentifer].Dispose(); _databaseJobMap.Remove(opContext.JobIdentifer); } #endregion } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.CancelRecoveryJob()", opContext.JobIdentifer + "_" + _databaseJobMap[opContext.JobIdentifer].ExecutionState.JobType + " : " + exp.ToString()); } if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { _databaseJobMap[opContext.JobIdentifer].ExecutionState.Status = RecoveryStatus.Failure; _databaseJobMap[opContext.JobIdentifer].ExecutionState.Message = "Failure during Cancel phase"; // commented because the config server will not have any job handler to recieve this message //CheckJobState(opContext.JobIdentifer, true); } status.Status = RecoveryStatus.Failure; status.Message = exp.ToString(); } } if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.CancelRecoveryJob()", status.ToString()); } return(status); }
public void SubmitConfigChanged(object changeConfig) { try { bool failed = true;; // get all clustered jobs working on given config foreach (ClusterRecoveryJob job in _runningClusteredJobMap.Values) { if (job.ActiveConfig.JobType == RecoveryJobType.Restore || job.ActiveConfig.JobType == RecoveryJobType.ConfigRestore) { string db = string.Empty; // check if exception or actual data if (changeConfig is RecoveryOperationStatus) { RecoveryOperationStatus status = (RecoveryOperationStatus)changeConfig; string[] splitString = status.JobIdentifier.Split('_'); if (!string.IsNullOrEmpty(splitString[1])) { db = splitString[1]; } else { db = splitString[0]; } } if (changeConfig is CsBackupableEntities) { CsBackupableEntities entity = (CsBackupableEntities)changeConfig; db = entity.Database.First().Key.ToLower(); failed = false; } KeyValuePair <string, string> dbMap = job.ActiveConfig.DatabaseMap.First(); bool valid = false; if (!string.IsNullOrEmpty(dbMap.Value)) { if (dbMap.Value.ToLower().Equals(db)) { valid = true; } } else { if (dbMap.Key.ToLower().Equals(db)) { valid = true; } } if (valid && !failed) { RecoveryConfiguration config = new RecoveryConfiguration(); config.Identifier = job.JobIdentifier; RecoveryStatus state = (job.CurrentState(config) as ClusteredRecoveryJobState).Status; if (state != RecoveryStatus.Failure || state != RecoveryStatus.Cancelled || state != RecoveryStatus.Completed) { if (changeConfig != null) { job.SubmitConfigChanged(changeConfig); } else { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.SubmitConfigChanged()", "failing"); } job.Cancel(config);// job has failed // remove job from active config RemoveRunningJob(config.Identifier); break; } } } else if (failed) { RecoveryConfiguration config = new RecoveryConfiguration(); config.Identifier = job.JobIdentifier; if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.SubmitConfigChanged()", "overall failure"); } job.Cancel(config); // remove job from active config RemoveRunningJob(config.Identifier); break; } //job. } } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.SubmitConfigChanged()", ex.ToString()); } } }
RecoveryOperationStatus IRecoveryManager.SubmitRecoveryJob(RecoveryConfiguration config, object additionalParams) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Failure); state.Message = "Failure during submission state"; try { // Ensure the prereqs provided are valid RecoveryOperationStatus valid = this.EnsurePreRequisites(config, additionalParams); if (valid.Status == RecoveryStatus.Success) { // 1. create and register clustered job ClusterRecoveryJob _clusteredJob = new ClusterRecoveryJob(this.AssignJobUID(), config, this, _configurationStore); _clusteredJob.RegisterRecoveryCommunicationHandler(this); _runningClusteredJobMap.Add(_clusteredJob.JobIdentifier, _clusteredJob); state.JobIdentifier = _clusteredJob.JobIdentifier; // create rootfolder for recovery switch (config.JobType) { case RecoveryJobType.ConfigBackup: case RecoveryJobType.DataBackup: case RecoveryJobType.FullBackup: // RecoveryOperationStatus folderStatus = _clusteredJob.CreateRecoveryFolder(config.RecoveryPath, config.UserName, config.Password); if (folderStatus.Status == RecoveryStatus.Failure) { RemoveRunningJob(_clusteredJob.JobIdentifier); _clusteredJob.Dispose(); return(folderStatus); } break; } //2. call prepare for this job state = _clusteredJob.Initialize(config, additionalParams); //3. verify status if (state.Status == RecoveryStatus.Failure) { RemoveRunningJob(_clusteredJob.JobIdentifier); return(state); } if (state.Status == RecoveryStatus.Failure) { // remove job from active config RemoveRunningJob(_clusteredJob.JobIdentifier); return(state); } else { state = _clusteredJob.Start(config); } } else { state = valid; } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.Submit()", ex.ToString()); } state.Message = ex.Message; // log exception } return(state); }
private RecoveryOperationStatus DatabaseExists(Dictionary <string, string> dbMap, CsBackupableEntities entity, RecoveryJobType jobType) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Success); if (dbMap.Count > 0) { switch (jobType) { case RecoveryJobType.ConfigRestore: case RecoveryJobType.Restore: foreach (string db in dbMap.Keys) { string destination = dbMap[db]; if (!string.IsNullOrEmpty(destination) && !string.IsNullOrWhiteSpace(destination)) { if (entity.Database.Keys.Contains(destination)) { state.Status = RecoveryStatus.Failure; state.Message = "Destination Database {" + destination + "} already exists"; return(state); } } else if (!string.IsNullOrEmpty(db)) { if (entity.Database.Keys.Contains(db)) { state.Status = RecoveryStatus.Failure; state.Message = "Database {" + db + "} already exists with the Name provided"; return(state); } } else { state.Status = RecoveryStatus.Failure; state.Message = "In-valid name provided for Backup"; return(state); } } break; case RecoveryJobType.ConfigBackup: case RecoveryJobType.DataBackup: case RecoveryJobType.FullBackup: foreach (string db in dbMap.Keys) { if (!string.IsNullOrEmpty(db)) { if (!entity.Database.Keys.Contains(db)) { state.Status = RecoveryStatus.Failure; state.Message = "Database {" + db + "} does not exists"; return(state); } } else { state.Status = RecoveryStatus.Failure; state.Message = "In-valid name provided for Restore"; return(state); } } break; } } return(state); }
private RecoveryOperationStatus EnsurePreRequisites(RecoveryConfiguration config, object additionalParams) { RecoveryOperationStatus state = new RecoveryOperationStatus(RecoveryStatus.Failure); state.JobIdentifier = config.Identifier; Impersonation impersonation = null; if (RecoveryFolderStructure.PathIsNetworkPath(config.RecoveryPath)) { impersonation = new Impersonation(config.UserName, config.Password); } List <string> shardNameList = new List <string>(); ClusterInfo clusterInfo = GetConfiguredClusters(config.Cluster); shardNameList.AddRange(clusterInfo.ShardInfo.Keys); state = RecoveryFolderStructure.ValidateFolderStructure(config.RecoveryPath, config.JobType, true, shardNameList); if (state.Status == RecoveryStatus.Failure) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.EnsurePreRequisites()", config.RecoveryPath + " : " + state.Message); } } if (state.Status == RecoveryStatus.Success) { CsBackupableEntities entity = (CsBackupableEntities)additionalParams; #region validate db Name switch (config.JobType) { case RecoveryJobType.ConfigRestore: case RecoveryJobType.Restore: state = DatabaseExists(config.DatabaseMap, entity, config.JobType); if (state.Status == RecoveryStatus.Failure) { return(state); } break; case RecoveryJobType.ConfigBackup: case RecoveryJobType.DataBackup: case RecoveryJobType.FullBackup: state = DatabaseExists(config.DatabaseMap, entity, config.JobType); if (state.Status == RecoveryStatus.Failure) { return(state); } break; } state.Status = RecoveryStatus.Success; #endregion #region validate files if (config.JobType == RecoveryJobType.Restore) { string configPath = Path.Combine(config.RecoveryPath, RecoveryFolderStructure.CONFIG_SERVER); string filePath = Path.Combine(configPath, RecoveryFolderStructure.CONFIG_SERVER); if (File.Exists(filePath)) { BackupFile file = new BackupFile(RecoveryFolderStructure.CONFIG_SERVER, configPath, config.UserName, config.Password); Alachisoft.NosDB.Core.Recovery.Persistence.BackupFile.Header header = file.ReadFileHeader(); if (!header.Database.ToLower().Equals(config.DatabaseMap.First().Key)) { state.Status = RecoveryStatus.Failure; state.Message = "Provided file does not contain the source database " + config.DatabaseMap.First().Key; } file.Close(); } else { state.Status = RecoveryStatus.Failure; state.Message = "No file exists in the given folder"; } } #endregion if (impersonation != null) { impersonation.Dispose(); } } if (state.Status == RecoveryStatus.Failure) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("RecoveryManager.EnsurePreRequisites()", state.Message); } } return(state); }