private JobInfoObject CreateBackupJob(RecoveryOperation opContext) { JobInfoObject infoObject = null; RecoveryJobBase dataJob = null; RecoveryJobBase opLogJob = null; #region BackupJob SubmitBackupOpParams bckpParam = (SubmitBackupOpParams)opContext.Parameter; // create handler object infoObject = new JobInfoObject(opContext.JobIdentifer, _context.LocalShardName, _context.LocalAddress.ip, _context.ClusterName, RecoveryJobType.DataBackup, bckpParam.PersistenceConfiguration.FilePath); foreach (string db in bckpParam.PersistenceConfiguration.DbCollectionMap.Keys) { //assuming one database is sent dataJob = new DatabaseBackupJob(opContext.JobIdentifer, _context, db, bckpParam.PersistenceConfiguration.DbCollectionMap[db][db].ToList(), infoObject.RecoveryPersistenceManager, _context.ClusterName); dataJob.RegisterProgressHandler(this); infoObject.AddJob(dataJob); // set persistence configuration if (bckpParam.PersistenceConfiguration.FileName == null) { bckpParam.PersistenceConfiguration.FileName = new List <string>(); bckpParam.PersistenceConfiguration.FileName.Add(db);//add name of all databases for shard level job } infoObject.RecoveryPersistenceManager.SetJobConfiguration(RecoveryJobType.DataBackup, bckpParam.PersistenceConfiguration, db, 1); } #endregion return(infoObject); }
internal void AddJob(RecoveryJobBase job) { if (job != null) { if (!_jobList.Contains(job)) { lock (_mutex) { _jobList.Add(job); _executionState.UpdateEntityState(job.ExecutionStatus); } } } }
public void Dispose() { if (_currentJob != null) { _currentJob = null; } if (_databaseJobMap != null) { foreach (JobInfoObject infoObj in _databaseJobMap.Values) { infoObj.Dispose(); } _databaseJobMap = null; } }
private RecoveryOperationStatus StartRecoveryJob(RecoveryOperation opContext) { RecoveryOperationStatus status = new RecoveryOperationStatus(RecoveryStatus.Executing); status.JobIdentifier = opContext.JobIdentifer; status.Message = "Recovery Job has successfully started"; if (opContext != null) { try { if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { // initialize only data job will be executed,oplog job will start only after datajob has been executed completely RecoveryJobBase job = _databaseJobMap[opContext.JobIdentifer].JobList.Where(x => (x.JobType == RecoveryJobType.DataBackup) || (x.JobType == RecoveryJobType.DataRestore)).First(); job.Start(); if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.StartRecoveryJob()", opContext.JobIdentifer + "_" + _databaseJobMap[opContext.JobIdentifer].JobType + " Started"); } _databaseJobMap[opContext.JobIdentifer].RecoveryPersistenceManager.IsJobActive = true; } } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.StartRecoveryJob()", opContext.JobIdentifer + "_" + _databaseJobMap[opContext.JobIdentifer].ExecutionState.JobType + " : " + exp.ToString()); } if (_databaseJobMap.ContainsKey(opContext.JobIdentifer)) { _databaseJobMap[opContext.JobIdentifer].ExecutionState.Status = RecoveryStatus.Failure; _databaseJobMap[opContext.JobIdentifer].ExecutionState.Message = "Failure during Starting phase"; CheckJobState(opContext.JobIdentifer, true); } } } return(status); }
//M_TODO: must Specify DB name in case of shard job, so a separate job is created, for now this will run multiple times and override job if more //than one db is specified. private JobInfoObject CreateRestoreJob(RecoveryOperation opContext) { JobInfoObject infoObject = null; RecoveryJobBase dataJob = null; #region RestoreJob SubmitRestoreOpParams resParam = (SubmitRestoreOpParams)opContext.Parameter; foreach (string db in resParam.PersistenceConfiguration.DbCollectionMap.Keys) { // create handler object // Note: this is kept inside the loop with the assumption that a seperate info object is to be kept for each database in complete cluster job infoObject = new JobInfoObject(opContext.JobIdentifer, _context.LocalShardName, _context.LocalAddress.ip, _context.ClusterName, RecoveryJobType.DataRestore, resParam.PersistenceConfiguration.FilePath); string destination = resParam.PersistenceConfiguration.DbCollectionMap[db].Keys.First(); // create DataJob dataJob = new DatabaseRestoreJob(opContext.JobIdentifer, _context, destination, resParam.PersistenceConfiguration.DbCollectionMap[db][destination].ToList <string>(), infoObject.RecoveryPersistenceManager, _context.ClusterName); dataJob.RegisterProgressHandler(this); infoObject.AddJob(dataJob); // set persistence configuration if (resParam.PersistenceConfiguration.FileName == null) { resParam.PersistenceConfiguration.FileName = new List <string>(); resParam.PersistenceConfiguration.FileName.Add(db);//add name of all databases for shard level job } infoObject.RecoveryPersistenceManager.SetJobConfiguration(RecoveryJobType.DataRestore, resParam.PersistenceConfiguration, db, 1); } #endregion return(infoObject); }
/// <summary> /// Manages job threads against any submission of status /// </summary> /// <param name="id"></param> private void CheckJobState(string id, bool ensureSend) { if (_databaseJobMap.ContainsKey(id)) { // send status to config server switch (_databaseJobMap[id].ExecutionState.Status) { case RecoveryStatus.Failure: case RecoveryStatus.Completed: case RecoveryStatus.Cancelled: foreach (RecoveryJobBase job in _databaseJobMap[id].JobList) { try { // close all opened backup files switch (job.JobType) { case RecoveryJobType.DataBackup: if (_databaseJobMap[id].ExecutionState.Status == RecoveryStatus.Cancelled) { _databaseJobMap[job.JobIdentifier].RecoveryPersistenceManager.CloseBackupFile(job.Database, RecoveryFileState.Cancelled); } else if (_databaseJobMap[id].ExecutionState.Status == RecoveryStatus.Failure) { _databaseJobMap[job.JobIdentifier].RecoveryPersistenceManager.CloseBackupFile(job.Database, RecoveryFileState.Failed); } else if (_databaseJobMap[id].ExecutionState.Status == RecoveryStatus.Completed) { _databaseJobMap[job.JobIdentifier].RecoveryPersistenceManager.CloseBackupFile(job.Database, RecoveryFileState.Completed); } break; //ASSUMPTION:commented out under the assumption that during diffbackup oplog and dif job will create same file, //so only oplog job will close the file. // //case RecoveryJobType.DifferentialBackup: // _databaseJobMap[_job.JobIdentifier].RecoveryPersistenceManager.CloseBackupFile(RecoveryFileNames.Diflog); // break; // } try { if (job.State != ThreadState.Unstarted && job.State != ThreadState.Stopped) { job.Stop(); } } catch (ThreadAbortException) { Thread.ResetAbort(); // ignore it } try { job.Dispose(); } catch (ThreadAbortException) { Thread.ResetAbort(); // ignore it } try { #region rename folder JobInfoObject infoObj = _databaseJobMap[job.JobIdentifier]; switch (job.JobType) { case RecoveryJobType.DataBackup: if (infoObj.JobType == RecoveryJobType.DataBackup) { if (_databaseJobMap[id].ExecutionState.Status == RecoveryStatus.Failure) { infoObj.RenameRootFolder(RecoveryFileState.Failed); } else if (_databaseJobMap[id].ExecutionState.Status == RecoveryStatus.Cancelled) { infoObj.RenameRootFolder(RecoveryFileState.Cancelled); } else { infoObj.RenameRootFolder(RecoveryFileState.Completed); } } break; } #endregion } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.CheckJobState()", id + " : " + job.JobType + " : Renaming Folder : " + ex.ToString()); } } if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.CheckJobState()", id + " : " + job.JobType + " : End"); } } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.CheckJobState()", id + " : " + job.JobType + " : " + exp.ToString()); } } } _databaseJobMap[id].ExecutionState.StopTime = DateTime.Now; _databaseJobMap[id].RecoveryPersistenceManager.SharedQueue.CompleteAdding(); _databaseJobMap[id].RecoveryPersistenceManager.IsJobActive = false; _databaseJobMap[id].ExecutionState.MessageTime = DateTime.Now; _context.ConfigurationSession.SubmitShardJobStatus(_databaseJobMap[id].ExecutionState); break; case RecoveryStatus.Executing: case RecoveryStatus.uninitiated: case RecoveryStatus.Waiting: if (ensureSend) { _databaseJobMap[id].ExecutionState.MessageTime = DateTime.Now; if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.SubmitState()", _databaseJobMap[id].ExecutionState.ToString()); } _context.ConfigurationSession.SubmitShardJobStatus(_databaseJobMap[id].ExecutionState); } else { try { // check status of datajob and diffjob RecoveryJobBase dataJob = _databaseJobMap[id].JobList.Where(x => (x.JobType == RecoveryJobType.DataBackup) || (x.JobType == RecoveryJobType.DataRestore)).First(); RecoveryJobBase oplogJob = null; if (dataJob != null) { if (dataJob.ExecutionStatus.Status == RecoveryStatus.Completed) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.CheckJobState()", id + "WARNING: Else case executed for DIFbackup"); } try { if (dataJob.JobType == RecoveryJobType.DataBackup) { //close file _databaseJobMap[dataJob.JobIdentifier].RecoveryPersistenceManager.CloseBackupFile(dataJob.Database, RecoveryFileState.Completed); } // stop data job dataJob.Stop(); } catch (ThreadAbortException) { Thread.ResetAbort(); // ignore it } _databaseJobMap[id].RecoveryPersistenceManager.SharedQueue.Consumed = false; _databaseJobMap[id].RecoveryPersistenceManager.SharedQueue.PauseProducing = false; SendStatus(id); } } } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("ShardRecoveryManager.SubmitState_StartOplog()", id + " : " + exp.ToString()); } } } break; } } else { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("ShardRecoveryManager.SubmitState()", "Shard does not contain a job against id: " + id); } } }