protected override void OnExecute(CommandProcessor theProcessor) { Platform.CheckForNullReference(Context, "Context"); DetermineTargetLocation(); EnsureStudyCanBeUpdated(_destinationStudyStorage); try { if (Context.WorkQueueUidList.Count>0) ProcessUidList(); } finally { UpdateHistory(_destinationStudyStorage); } if (_complete) { StudyRulesEngine engine = new StudyRulesEngine(_destinationStudyStorage, Context.Partition); engine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, theProcessor); } }
protected override void ProcessItem(Model.WorkQueue item) { Platform.CheckForNullReference(item, "item"); Platform.CheckForNullReference(item.StudyStorageKey, "item.StudyStorageKey"); var context = new StudyProcessorContext(StorageLocation, WorkQueueItem); // TODO: Should we enforce the patient's name rule? // If we do, the Study record will have the new patient's name // but how should we handle the name in the Patient record? const bool enforceNameRules = false; var processor = new SopInstanceProcessor(context) { EnforceNameRules = enforceNameRules}; var seriesMap = new Dictionary<string, List<string>>(); bool successful = true; string failureDescription = null; // The processor stores its state in the Data column ReadQueueData(item); if (_queueData.State == null || !_queueData.State.ExecuteAtLeastOnce) { // Added for ticket #9673: // If the study folder does not exist and the study has been archived, trigger a restore and we're done if (!Directory.Exists(StorageLocation.GetStudyPath())) { if (StorageLocation.ArchiveLocations.Count > 0) { Platform.Log(LogLevel.Info, "Reprocessing archived study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} without study data on the filesystem. Inserting Restore Request.", Study.StudyInstanceUid, Study.PatientsName, Study.PatientId, Study.AccessionNumber, ServerPartition.Description); PostProcessing(item, WorkQueueProcessorStatus.Complete, WorkQueueProcessorDatabaseUpdate.ResetQueueState); // Post process had to be done first so the study is unlocked so the RestoreRequest can be inserted. ServerHelper.InsertRestoreRequest(StorageLocation); RaiseAlert(WorkQueueItem, AlertLevel.Warning, string.Format( "Found study {0} for Patient {1} (A#:{2})on Partition {3} without storage folder, restoring study.", Study.StudyInstanceUid, Study.PatientsName, Study.AccessionNumber, ServerPartition.Description)); return; } } if (Study == null) Platform.Log(LogLevel.Info, "Reprocessing study {0} on Partition {1}", StorageLocation.StudyInstanceUid, ServerPartition.Description); else Platform.Log(LogLevel.Info, "Reprocessing study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4}", Study.StudyInstanceUid, Study.PatientsName, Study.PatientId, Study.AccessionNumber, ServerPartition.Description); CleanupDatabase(); } else { if (_queueData.State.Completed) { #region SAFE-GUARD CODE: PREVENT INFINITE LOOP // The processor indicated it had completed reprocessing in previous run. The entry should have been removed and this block of code should never be called. // However, we have seen ReprocessStudy entries that mysterously contain rows in the WorkQueueUid table. // The rows prevent the entry from being removed from the database and the ReprocessStudy keeps repeating itself. // update the state first, increment the CompleteAttemptCount _queueData.State.ExecuteAtLeastOnce = true; _queueData.State.Completed = true; _queueData.State.CompleteAttemptCount++; SaveState(item, _queueData); if (_queueData.State.CompleteAttemptCount < 10) { // maybe there was db error in previous attempt to remove the entry. Let's try again. Platform.Log(LogLevel.Info, "Resuming Reprocessing study {0} but it was already completed!!!", StorageLocation.StudyInstanceUid); PostProcessing(item, WorkQueueProcessorStatus.Complete, WorkQueueProcessorDatabaseUpdate.ResetQueueState); } else { // we are definitely stuck. Platform.Log(LogLevel.Error, "ReprocessStudy {0} for study {1} appears stuck. Aborting it.", item.Key, StorageLocation.StudyInstanceUid); item.FailureDescription = "This entry had completed but could not be removed."; PostProcessingFailure(item, WorkQueueProcessorFailureType.Fatal); } return; #endregion } if (Study == null) Platform.Log(LogLevel.Info, "Resuming Reprocessing study {0} on Partition {1}", StorageLocation.StudyInstanceUid, ServerPartition.Description); else Platform.Log(LogLevel.Info, "Resuming Reprocessing study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4}", Study.StudyInstanceUid, Study.PatientsName, Study.PatientId, Study.AccessionNumber, ServerPartition.Description); } StudyXml studyXml = LoadStudyXml(); int reprocessedCounter = 0; var removedFiles = new List<FileInfo>(); try { // Traverse the directories, process 500 files at a time FileProcessor.Process(StorageLocation.GetStudyPath(), "*.*", delegate(string path, out bool cancel) { #region Reprocess File var file = new FileInfo(path); // ignore all files except those ending ".dcm" // ignore "bad(0).dcm" files too if (Regex.IsMatch(file.Name.ToUpper(), "[0-9]+\\.DCM$")) { try { var dicomFile = new DicomFile(path); dicomFile.Load(DicomReadOptions.StorePixelDataReferences | DicomReadOptions.Default); string seriesUid = dicomFile.DataSet[DicomTags.SeriesInstanceUid].GetString(0, string.Empty); string instanceUid =dicomFile.DataSet[DicomTags.SopInstanceUid].GetString(0,string.Empty); if (studyXml.Contains(seriesUid, instanceUid)) { if (!seriesMap.ContainsKey(seriesUid)) { seriesMap.Add(seriesUid, new List<string>()); } if (!seriesMap[seriesUid].Contains(instanceUid)) seriesMap[seriesUid].Add(instanceUid); else { Platform.Log(LogLevel.Warn, "SOP Instance UID in {0} appears more than once in the study.", path); } } else { Platform.Log(ServerPlatform.InstanceLogLevel, "Reprocessing SOP {0} for study {1}",instanceUid, StorageLocation.StudyInstanceUid); string groupId = ServerHelper.GetUidGroup(dicomFile, StorageLocation.ServerPartition, WorkQueueItem.InsertTime); ProcessingResult result = processor.ProcessFile(groupId, dicomFile, studyXml, true, false, null, null, SopInstanceProcessorSopType.ReprocessedSop); switch (result.Status) { case ProcessingStatus.Success: reprocessedCounter++; if (!seriesMap.ContainsKey(seriesUid)) { seriesMap.Add(seriesUid, new List<string>()); } if (!seriesMap[seriesUid].Contains(instanceUid)) seriesMap[seriesUid].Add(instanceUid); else { Platform.Log(LogLevel.Warn, "SOP Instance UID in {0} appears more than once in the study.", path); } break; case ProcessingStatus.Reconciled: Platform.Log(LogLevel.Warn, "SOP was unexpectedly reconciled on reprocess SOP {0} for study {1}. It will be removed from the folder.", instanceUid, StorageLocation.StudyInstanceUid); failureDescription = String.Format("SOP Was reconciled: {0}", instanceUid); // Added for #10620 (Previously we didn't do anything here) // Because we are reprocessing files in the study folder, when file needs to be reconciled it is copied to the reconcile folder // Therefore, we need to delete the one in the study folder. Otherwise, there will be problem when the SIQ entry is reconciled. // InstanceAlreadyExistsException will also be thrown by the SOpInstanceProcessor if this ReprocessStudy WQI // resumes and reprocesses the same file again. // Note: we are sure that the file has been copied to the Reconcile folder and there's no way back. // We must get rid of this file in the study folder. FileUtils.Delete(path); // Special handling: if the file is one which we're supposed to reprocess at the end (see ProcessAdditionalFiles), we must remove the file from the list if (_additionalFilesToProcess != null && _additionalFilesToProcess.Contains(path)) { _additionalFilesToProcess.Remove(path); } break; } } } catch (DicomException ex) { // TODO : should we fail the reprocess instead? Deleting an dicom file can lead to incomplete study. removedFiles.Add(file); Platform.Log(LogLevel.Warn, "Skip reprocessing and delete {0}: Not readable.", path); FileUtils.Delete(path); failureDescription = ex.Message; } } else if (!file.Extension.Equals(".xml") && !file.Extension.Equals(".gz")) { // not a ".dcm" or header file, delete it removedFiles.Add(file); FileUtils.Delete(path); } #endregion if (reprocessedCounter>0 && reprocessedCounter % 200 == 0) { Platform.Log(LogLevel.Info, "Reprocessed {0} files for study {1}", reprocessedCounter, StorageLocation.StudyInstanceUid); } cancel = reprocessedCounter >= 5000; }, true); if (studyXml != null) { EnsureConsistentObjectCount(studyXml, seriesMap); SaveStudyXml(studyXml); } // Completed if either all files have been reprocessed // or no more dicom files left that can be reprocessed. _completed = reprocessedCounter == 0; } catch (Exception e) { successful = false; failureDescription = e.Message; Platform.Log(LogLevel.Error, e, "Unexpected exception when reprocessing study: {0}", StorageLocation.StudyInstanceUid); Platform.Log(LogLevel.Error, "Study may be in invalid unprocessed state. Study location: {0}", StorageLocation.GetStudyPath()); throw; } finally { LogRemovedFiles(removedFiles); // Update the state _queueData.State.ExecuteAtLeastOnce = true; _queueData.State.Completed = _completed; _queueData.State.CompleteAttemptCount++; SaveState(item, _queueData); if (!successful) { FailQueueItem(item, failureDescription); } else { if (!_completed) { // Put it back to Pending PostProcessing(item, WorkQueueProcessorStatus.Pending, WorkQueueProcessorDatabaseUpdate.None); } else { LogHistory(); // Run Study / Series Rules Engine. var engine = new StudyRulesEngine(StorageLocation, ServerPartition); engine.Apply(ServerRuleApplyTimeEnum.StudyProcessed); // Log the FilesystemQueue related entries StorageLocation.LogFilesystemQueue(); PostProcessing(item, WorkQueueProcessorStatus.Complete, WorkQueueProcessorDatabaseUpdate.ResetQueueState); Platform.Log(LogLevel.Info, "Completed reprocessing of study {0} on partition {1}", StorageLocation.StudyInstanceUid, ServerPartition.Description); } } } }
protected override void OnExecute(CommandProcessor theProcessor) { Platform.CheckForNullReference(Context, "Context"); _destinationStudyStorage = Context.History.DestStudyStorageKey != null ? StudyStorageLocation.FindStorageLocations(StudyStorage.Load(Context.History.DestStudyStorageKey))[0] : Context.WorkQueueItemStudyStorage; EnsureStudyCanBeUpdated(_destinationStudyStorage); if (_updateDestination) UpdateExistingStudy(); LoadMergedStudyEntities(); try { LoadUidMappings(); if (Context.WorkQueueUidList.Count>0) { ProcessUidList(); LogResult(); } } finally { UpdateHistory(_destinationStudyStorage); } if (_complete) { StudyRulesEngine engine = new StudyRulesEngine(_destinationStudyStorage, Context.Partition); engine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, theProcessor); } }
protected override void ProcessItem(Model.WorkQueue item) { Platform.CheckForNullReference(item, "item"); Platform.CheckForNullReference(StorageLocation, "StorageLocation"); // Verify the study is not lossy online and lossless in the archive. // This could happen if the images were received WHILE the study was being lossy compressed. // The study state would not be set until the compression was completed or partially completed. CheckIfStudyIsLossy(); Statistics.TotalProcessTime.Start(); bool successful; bool idle = false; //Load the specific UIDs that need to be processed. LoadUids(item); int totalUidCount = WorkQueueUidList.Count; if (totalUidCount == 0) { successful = true; idle = true; } else { try { Context = new StudyProcessorContext(StorageLocation, WorkQueueItem); // Load the rules engine _sopProcessedRulesEngine = new ServerRulesEngine(ServerRuleApplyTimeEnum.SopProcessed, item.ServerPartitionKey); _sopProcessedRulesEngine.AddOmittedType(ServerRuleTypeEnum.SopCompress); _sopProcessedRulesEngine.Load(); Statistics.SopProcessedEngineLoadTime.Add(_sopProcessedRulesEngine.Statistics.LoadTime); Context.SopProcessedRulesEngine = _sopProcessedRulesEngine; if (Study != null) { Platform.Log(LogLevel.Info, "Processing study {0} for Patient {1} (PatientId:{2} A#:{3}), {4} objects", Study.StudyInstanceUid, Study.PatientsName, Study.PatientId, Study.AccessionNumber, WorkQueueUidList.Count); } else { Platform.Log(LogLevel.Info, "Processing study {0}, {1} objects", StorageLocation.StudyInstanceUid, WorkQueueUidList.Count); } // ProcessSavedFile the images in the list successful = ProcessUidList(item) > 0; } catch (StudyIsNearlineException ex) { // delay until the target is restored // NOTE: If the study could not be restored after certain period of time, this entry will be failed. if (ex.RestoreRequested) { PostponeItem(string.Format("Unable to auto-reconcile at this time: the target study {0} is not online yet. Restore has been requested.", ex.StudyInstanceUid)); return; } // fail right away FailQueueItem(item, string.Format("Unable to auto-reconcile at this time: the target study {0} is not nearline and could not be restored.", ex.StudyInstanceUid)); return; } } Statistics.TotalProcessTime.End(); if (successful) { if (idle && item.ExpirationTime <= Platform.Time) { // Run Study / Series Rules Engine. var engine = new StudyRulesEngine(StorageLocation, ServerPartition); engine.Apply(ServerRuleApplyTimeEnum.StudyProcessed); // Log the FilesystemQueue related entries StorageLocation.LogFilesystemQueue(); // Delete the queue entry. PostProcessing(item, WorkQueueProcessorStatus.Complete, WorkQueueProcessorDatabaseUpdate.ResetQueueState); } else if (idle) PostProcessing(item, WorkQueueProcessorStatus.IdleNoDelete, // Don't delete, so we ensure the rules engine is run later. WorkQueueProcessorDatabaseUpdate.ResetQueueState); else PostProcessing(item, WorkQueueProcessorStatus.Pending, WorkQueueProcessorDatabaseUpdate.ResetQueueState); } else { bool allFailedDuplicate = CollectionUtils.TrueForAll(WorkQueueUidList, uid => uid.Duplicate && uid.Failed); if (allFailedDuplicate) { Platform.Log(LogLevel.Error, "All entries are duplicates"); PostProcessingFailure(item, WorkQueueProcessorFailureType.Fatal); return; } PostProcessingFailure(item, WorkQueueProcessorFailureType.NonFatal); } }
/// <summary> /// Archive the specified <see cref="ArchiveQueue"/> item. /// </summary> /// <param name="queueItem">The ArchiveQueue item to archive.</param> public void Run(ArchiveQueue queueItem) { using (ArchiveProcessorContext executionContext = new ArchiveProcessorContext(queueItem)) { try { if (!GetStudyStorageLocation(queueItem)) { Platform.Log(LogLevel.Error, "Unable to find readable study storage location for archival queue request {0}. Delaying request.", queueItem.Key); queueItem.FailureDescription = "Unable to find readable study storage location for archival queue request."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } // First, check to see if we can lock the study, if not just reschedule the queue entry. if (!_storageLocation.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle)) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is currently locked, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study is currently locked, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } StudyIntegrityValidator validator = new StudyIntegrityValidator(); validator.ValidateStudyState("Archive", _storageLocation, StudyIntegrityValidationModes.Default); using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker<ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.ArchiveScheduled }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to lock, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study failed to lock, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } update.Commit(); } string studyFolder = _storageLocation.GetStudyPath(); string studyXmlFile = _storageLocation.GetStudyXmlPath(); // Load the study Xml file, this is used to generate the list of dicom files to archive. LoadStudyXml(studyXmlFile); DicomFile file = LoadFileFromStudyXml(); string patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, string.Empty); string patientId = file.DataSet[DicomTags.PatientId].GetString(0, string.Empty); string accessionNumber = file.DataSet[DicomTags.AccessionNumber].GetString(0, string.Empty); Platform.Log(LogLevel.Info, "Starting archival of study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} on archive {5}", _storageLocation.StudyInstanceUid, patientsName, patientId, accessionNumber, _hsmArchive.ServerPartition.Description, _hsmArchive.PartitionArchive.Description); // Use the command processor to do the archival. using (ServerCommandProcessor commandProcessor = new ServerCommandProcessor("Archive")) { _archiveXml = new XmlDocument(); // Create the study date folder string zipFilename = Path.Combine(_hsmArchive.HsmPath, _storageLocation.StudyFolder); commandProcessor.AddCommand(new CreateDirectoryCommand(zipFilename)); // Create a folder for the study zipFilename = Path.Combine(zipFilename, _storageLocation.StudyInstanceUid); commandProcessor.AddCommand(new CreateDirectoryCommand(zipFilename)); // Save the archive data in the study folder, based on a filename with a date / time stamp string filename = String.Format("{0}.zip", Platform.Time.ToString("yyyy-MM-dd-HHmm")); zipFilename = Path.Combine(zipFilename, filename); // Create the Xml data to store in the ArchiveStudyStorage table telling // where the archived study is located. XmlElement hsmArchiveElement = _archiveXml.CreateElement("HsmArchive"); _archiveXml.AppendChild(hsmArchiveElement); XmlElement studyFolderElement = _archiveXml.CreateElement("StudyFolder"); hsmArchiveElement.AppendChild(studyFolderElement); studyFolderElement.InnerText = _storageLocation.StudyFolder; XmlElement filenameElement = _archiveXml.CreateElement("Filename"); hsmArchiveElement.AppendChild(filenameElement); filenameElement.InnerText = filename; XmlElement studyInstanceUidElement = _archiveXml.CreateElement("Uid"); hsmArchiveElement.AppendChild(studyInstanceUidElement); studyInstanceUidElement.InnerText = _storageLocation.StudyInstanceUid; // Create the Zip file commandProcessor.AddCommand( new CreateStudyZipCommand(zipFilename, _studyXml, studyFolder, executionContext.TempDirectory)); // Update the database. commandProcessor.AddCommand(new InsertArchiveStudyStorageCommand(queueItem.StudyStorageKey, queueItem.PartitionArchiveKey, queueItem.GetKey(), _storageLocation.ServerTransferSyntaxKey, _archiveXml)); StudyRulesEngine studyEngine = new StudyRulesEngine(_storageLocation, _hsmArchive.ServerPartition, _studyXml); studyEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); if (!commandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure archiving study ({0}) to archive {1}: {2}, zip filename: {3}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, commandProcessor.FailureReason, zipFilename); queueItem.FailureDescription = commandProcessor.FailureReason; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } else Platform.Log(LogLevel.Info, "Successfully archived study {0} on {1} to zip {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, zipFilename); // Log the current FilesystemQueue settings _storageLocation.LogFilesystemQueue(); } } catch (StudyIntegrityValidationFailure ex) { StringBuilder error = new StringBuilder(); error.AppendLine(String.Format("Partition : {0}", ex.ValidationStudyInfo.ServerAE)); error.AppendLine(String.Format("Patient : {0}", ex.ValidationStudyInfo.PatientsName)); error.AppendLine(String.Format("Study Uid : {0}", ex.ValidationStudyInfo.StudyInstaneUid)); error.AppendLine(String.Format("Accession# : {0}", ex.ValidationStudyInfo.AccessionNumber)); error.AppendLine(String.Format("Study Date : {0}", ex.ValidationStudyInfo.StudyDate)); queueItem.FailureDescription = error.ToString(); _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } catch (Exception e) { String msg = String.Format("Unexpected exception archiving study: {0} on {1}: {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, e.Message); Platform.Log(LogLevel.Error, e, msg); queueItem.FailureDescription = msg; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } finally { // Unlock the Queue Entry using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker<ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is failed to unlock.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); } } } }
/// <summary> /// Called when all WorkQueue UIDs have been processed /// </summary> private void OnAllWorkQueueUidsProcessed() { if (Study != null) { Platform.Log(LogLevel.Info, "StudyProcess Cleanup completed for study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4}", Study.StudyInstanceUid, Study.PatientsName, Study.PatientId, Study.AccessionNumber, ServerPartition.Description); Platform.Log(LogLevel.Info, "Applying rules engine to study being cleaned up to ensure disk management is applied."); // Run Study / Series Rules Engine. var engine = new StudyRulesEngine(StorageLocation, ServerPartition); engine.Apply(ServerRuleApplyTimeEnum.StudyProcessed); StorageLocation.LogFilesystemQueue(); PostProcessing(WorkQueueItem, WorkQueueProcessorStatus.Complete, WorkQueueProcessorDatabaseUpdate.ResetQueueState); } else { Platform.Log(LogLevel.Info, "StudyProcess Cleanup completed. Performing final checks.."); // Study is never processed if (EnsureNoOrphanFiles()) { DeleteStudyStorage(WorkQueueItem); Platform.Log(LogLevel.Info, "StudyProcess Cleanup completed. Study no longer exists in the system"); PostProcessing(WorkQueueItem, WorkQueueProcessorStatus.Complete, WorkQueueProcessorDatabaseUpdate.ResetQueueState); } } }
/// <summary> /// Archive the specified <see cref="ArchiveQueue"/> item. /// </summary> /// <param name="queueItem">The ArchiveQueue item to archive.</param> public void Run(ArchiveQueue queueItem) { using (ArchiveProcessorContext executionContext = new ArchiveProcessorContext(queueItem)) { try { if (!GetStudyStorageLocation(queueItem)) { Platform.Log(LogLevel.Error, "Unable to find readable study storage location for archival queue request {0}. Delaying request.", queueItem.Key); queueItem.FailureDescription = "Unable to find readable study storage location for archival queue request."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } // First, check to see if we can lock the study, if not just reschedule the queue entry. if (!_storageLocation.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle)) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is currently locked, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study is currently locked, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } StudyIntegrityValidator validator = new StudyIntegrityValidator(); validator.ValidateStudyState("Archive", _storageLocation, StudyIntegrityValidationModes.Default); using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker<ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.ArchiveScheduled }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to lock, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study failed to lock, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } update.Commit(); } string studyXmlFile = _storageLocation.GetStudyXmlPath(); // Load the study Xml file, this is used to generate the list of dicom files to archive. LoadStudyXml(studyXmlFile); DicomFile file = LoadFileFromStudyXml(); string patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, string.Empty); string patientId = file.DataSet[DicomTags.PatientId].GetString(0, string.Empty); string accessionNumber = file.DataSet[DicomTags.AccessionNumber].GetString(0, string.Empty); Platform.Log(LogLevel.Info, "Starting archival of study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} on archive {5}", _storageLocation.StudyInstanceUid, patientsName, patientId, accessionNumber, _hsmArchive.ServerPartition.Description, _hsmArchive.PartitionArchive.Description); // Use the command processor to do the archival. using (ServerCommandProcessor commandProcessor = new ServerCommandProcessor("Archive")) { var archiveStudyCmd = new ArchiveStudyCommand(_storageLocation, _hsmArchive.HsmPath, executionContext.TempDirectory, _hsmArchive.PartitionArchive) { ForceCompress = HsmSettings.Default.CompressZipFiles }; commandProcessor.AddCommand(archiveStudyCmd); commandProcessor.AddCommand(new UpdateArchiveQueueItemCommand(queueItem.GetKey(),_storageLocation.GetKey(), ArchiveQueueStatusEnum.Completed)); StudyRulesEngine studyEngine = new StudyRulesEngine(_storageLocation, _hsmArchive.ServerPartition, _studyXml); studyEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); if (!commandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure archiving study ({0}) to archive {1}: {2}, zip filename: {3}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, commandProcessor.FailureReason, archiveStudyCmd.OutputZipFilePath); queueItem.FailureDescription = commandProcessor.FailureReason; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } else Platform.Log(LogLevel.Info, "Successfully archived study {0} on {1} to zip {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, archiveStudyCmd.OutputZipFilePath); // Log the current FilesystemQueue settings _storageLocation.LogFilesystemQueue(); } } catch (StudyIntegrityValidationFailure ex) { StringBuilder error = new StringBuilder(); error.AppendLine(String.Format("Partition : {0}", ex.ValidationStudyInfo.ServerAE)); error.AppendLine(String.Format("Patient : {0}", ex.ValidationStudyInfo.PatientsName)); error.AppendLine(String.Format("Study Uid : {0}", ex.ValidationStudyInfo.StudyInstaneUid)); error.AppendLine(String.Format("Accession# : {0}", ex.ValidationStudyInfo.AccessionNumber)); error.AppendLine(String.Format("Study Date : {0}", ex.ValidationStudyInfo.StudyDate)); queueItem.FailureDescription = error.ToString(); _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } catch (Exception e) { String msg = String.Format("Unexpected exception archiving study: {0} on {1}: {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, e.Message); Platform.Log(LogLevel.Error, e, msg); queueItem.FailureDescription = msg; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } finally { // Unlock the Queue Entry using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker<ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is failed to unlock.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); } } } }
/// <summary> /// Reprocess a specific study. /// </summary> /// <param name="partition">The ServerPartition the study is on.</param> /// <param name="location">The storage location of the study to process.</param> /// <param name="engine">The rules engine to use when processing the study.</param> /// <param name="postArchivalEngine">The rules engine used for studies that have been archived.</param> /// <param name="dataAccessEngine">The rules engine strictly used for setting data acess.</param> protected static void ProcessStudy(ServerPartition partition, StudyStorageLocation location, ServerRulesEngine engine, ServerRulesEngine postArchivalEngine, ServerRulesEngine dataAccessEngine) { if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle) || !location.AcquireWriteLock()) { Platform.Log(LogLevel.Error, "Unable to lock study {0}. The study is being processed. (Queue State: {1})", location.StudyInstanceUid,location.QueueStudyStateEnum.Description); } else { try { DicomFile msg = LoadInstance(location); if (msg == null) { Platform.Log(LogLevel.Error, "Unable to load file for study {0}", location.StudyInstanceUid); return; } bool archiveQueueExists; bool archiveStudyStorageExists; bool filesystemDeleteExists; using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { // Check for existing archive queue entries var archiveQueueBroker = read.GetBroker<IArchiveQueueEntityBroker>(); var archiveQueueCriteria = new ArchiveQueueSelectCriteria(); archiveQueueCriteria.StudyStorageKey.EqualTo(location.Key); archiveQueueExists = archiveQueueBroker.Count(archiveQueueCriteria) > 0; var archiveStorageBroker = read.GetBroker<IArchiveStudyStorageEntityBroker>(); var archiveStudyStorageCriteria = new ArchiveStudyStorageSelectCriteria(); archiveStudyStorageCriteria.StudyStorageKey.EqualTo(location.Key); archiveStudyStorageExists = archiveStorageBroker.Count(archiveStudyStorageCriteria) > 0; var filesystemQueueBroker = read.GetBroker<IFilesystemQueueEntityBroker>(); var filesystemQueueCriteria = new FilesystemQueueSelectCriteria(); filesystemQueueCriteria.StudyStorageKey.EqualTo(location.Key); filesystemQueueCriteria.FilesystemQueueTypeEnum.EqualTo(FilesystemQueueTypeEnum.DeleteStudy); filesystemDeleteExists = filesystemQueueBroker.Count(filesystemQueueCriteria) > 0; } using (var commandProcessor = new ServerCommandProcessor("Study Rule Processor")) { var context = new ServerActionContext(msg, location.FilesystemKey, partition, location.Key, commandProcessor); // Check if the Study has been archived if (archiveStudyStorageExists && !archiveQueueExists && !filesystemDeleteExists) { // Add a command to delete the current filesystemQueue entries, so that they can // be reinserted by the rules engine. context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyArchived)); // How to deal with exiting FilesystemQueue entries is problematic here. If the study // has been migrated off tier 1, we probably don't want to modify the tier migration // entries. Compression entries may have been entered when the Study was initially // processed, we don't want to delete them, because they might still be valid. // We just re-run the rules engine at this point, and delete only the StudyPurge entries, // since those we know at least would only be applied for archived studies. var studyRulesEngine = new StudyRulesEngine(postArchivalEngine, location, location.ServerPartition, location.LoadStudyXml()); studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); // Post Archive doesn't allow data access rules. Force Data Access rules to be reapplied // to these studies also. dataAccessEngine.Execute(context); } else { // Add a command to delete the current filesystemQueue entries, so that they can // be reinserted by the rules engine. context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key,ServerRuleApplyTimeEnum.StudyProcessed)); // Execute the rules engine, insert commands to update the database into the command processor. // Due to ticket #11673, we create a new rules engine instance for each study, since the Study QC rules // don't work right now with a single rules engine. //TODO CR (Jan 2014) - Check if we can go back to caching the rules engine to reduce database hits on the rules var studyRulesEngine = new StudyRulesEngine(location, location.ServerPartition, location.LoadStudyXml()); studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, commandProcessor); } // Do the actual database updates. if (false == context.CommandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure processing Study level rules for study {0}", location.StudyInstanceUid); } // Log the FilesystemQueue related entries location.LogFilesystemQueue(); } } finally { location.ReleaseWriteLock(); } } }