예제 #1
0
		protected override bool OnExecute(ServerActionContext context)
		{
			DateTime scheduledTime = Platform.Time;

			if (_exprScheduledTime != null)
			{
				scheduledTime = Evaluate(_exprScheduledTime, context, scheduledTime);
			}

			scheduledTime = CalculateOffsetTime(scheduledTime, _offsetTime, _units);
			XmlDocument doc = new XmlDocument();

			XmlElement element = doc.CreateElement("compress");
			doc.AppendChild(element);
			XmlAttribute syntaxAttribute = doc.CreateAttribute("syntax");
			syntaxAttribute.Value = TransferSyntax.Jpeg2000ImageCompressionLosslessOnlyUid;
			element.Attributes.Append(syntaxAttribute);

			Platform.Log(LogLevel.Debug, "Jpeg 2000 Lossless Compression Scheduling: This study will be compressed on {0}", scheduledTime);
			context.CommandProcessor.AddCommand(
				new InsertFilesystemQueueCommand(_queueType, context.FilesystemKey, context.StudyLocationKey,
				                                 scheduledTime, doc));

			return true;
		}
예제 #2
0
		/// <summary>
		/// Creates an instance of <see cref="UpdateSeriesCommand"/> to update the existing Series record in the database
		/// </summary>
		public UpdateSeriesCommand(StudyStorageLocation storageLocation, ServerActionContext sopContext)
			: base(String.Concat("Update Series Command"))
		{
			_storageLocation = storageLocation;
			_data = sopContext.Message.DataSet;
			_context = sopContext;
		}
예제 #3
0
		public ApplyRulesCommand(string directory, string studyInstanceUid, ServerActionContext context) : base("Apply Server Rules", true)
		{
			_directory = directory;
			_studyInstanceUid = studyInstanceUid;
			_context = context;
			_engine = new ServerRulesEngine(ServerRuleApplyTimeEnum.StudyRestored, _context.ServerPartitionKey);
			_engine.Load();
		}
예제 #4
0
        public void Apply(ServerRuleApplyTimeEnum applyTime, CommandProcessor theProcessor)
        {
            try
            {
                if (_studyRulesEngine == null || !_studyRulesEngine.RuleApplyTime.Equals(applyTime))
                {
                    _studyRulesEngine = new ServerRulesEngine(applyTime, _location.ServerPartitionKey);
                    _studyRulesEngine.Load();
                }

                List <string> files = GetFirstInstanceInEachStudySeries();
                if (files.Count == 0)
                {
                    string message =
                        String.Format("Unexpectedly unable to find SOP instances for rules engine in each series in study: {0}",
                                      _location.StudyInstanceUid);
                    Platform.Log(LogLevel.Error, message);
                    throw new ApplicationException(message);
                }

                Platform.Log(LogLevel.Info, "Processing Study Level rules for study {0} on partition {1} at {2} apply time",
                             _location.StudyInstanceUid, _partition.Description, applyTime.Description);

                foreach (string seriesFilePath in files)
                {
                    var theFile = new DicomFile(seriesFilePath);
                    theFile.Load(DicomReadOptions.Default);
                    var context =
                        new ServerActionContext(theFile, _location.FilesystemKey, _partition, _location.Key, theProcessor)
                    {
                        RuleEngine = _studyRulesEngine
                    };
                    _studyRulesEngine.Execute(context);

                    ProcessSeriesRules(theFile, theProcessor);
                }

                if (applyTime.Equals(ServerRuleApplyTimeEnum.StudyProcessed))
                {
                    // This is a bit kludgy, but we had a problem with studies with only 1 image incorectlly
                    // having archive requests inserted when they were scheduled for deletion.  Calling
                    // this command here so that if a delete is inserted at the study level, we will remove
                    // the previously inserted archive request for the study.  Note also this has to be done
                    // after the rules engine is executed.
                    theProcessor.AddCommand(new InsertArchiveQueueCommand(_location.ServerPartitionKey, _location.Key));
                }
            }
            finally
            {
                if (_studyRulesEngine != null)
                {
                    _studyRulesEngine.Complete(_studyRulesEngine.RulesApplied);
                }
            }
        }
예제 #5
0
        /// <summary>
        /// Method for applying rules when a new series has been inserted.
        /// </summary>
        /// <param name="file">The DICOM file being processed.</param>
        /// <param name="processor">The command processor</param>
        private void ProcessSeriesRules(DicomFile file, CommandProcessor processor)
        {
            if (_seriesRulesEngine == null)
            {
                _seriesRulesEngine = new ServerRulesEngine(ServerRuleApplyTimeEnum.SeriesProcessed, _location.ServerPartitionKey);
                _seriesRulesEngine.Load();
            }
            else
            {
                _seriesRulesEngine.Statistics.LoadTime.Reset();
                _seriesRulesEngine.Statistics.ExecutionTime.Reset();
            }

            var context = new ServerActionContext(file, _location.FilesystemKey, _partition, _location.Key, processor);


            _seriesRulesEngine.Execute(context);
        }
		private void InsertInstance(DicomFile file, StudyXml stream, WorkQueueUid uid, string deleteFile, SopInstanceProcessorSopType sopType)
		{
			using (var processor = new ServerCommandProcessor("Processing WorkQueue DICOM file"))
			{
			    EventsHelper.Fire(OnInsertingSop, this, new SopInsertingEventArgs {Processor = processor });

				InsertInstanceCommand insertInstanceCommand = null;
				InsertStudyXmlCommand insertStudyXmlCommand = null;

				String patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, String.Empty);
				_modality = file.DataSet[DicomTags.Modality].GetString(0, String.Empty);

				if (_context.UpdateCommands.Count > 0)
				{
					foreach (BaseImageLevelUpdateCommand command in _context.UpdateCommands)
					{
						command.File = file;
						processor.AddCommand(command);
					}
				}
				try
				{
					// Create a context for applying actions from the rules engine
					ServerActionContext context =
						new ServerActionContext(file, _context.StorageLocation.FilesystemKey, _context.Partition, _context.StorageLocation.Key);
					context.CommandProcessor = processor;

					_context.SopCompressionRulesEngine.Execute(context);
                    String seriesUid = file.DataSet[DicomTags.SeriesInstanceUid].GetString(0, String.Empty);
                    String sopUid = file.DataSet[DicomTags.SopInstanceUid].GetString(0, String.Empty);
                    String finalDest = _context.StorageLocation.GetSopInstancePath(seriesUid, sopUid);

					if (_context.UpdateCommands.Count > 0)
					{
						processor.AddCommand(new SaveDicomFileCommand(_context.StorageLocation, file, file.Filename != finalDest));
					}
					else if (file.Filename != finalDest || processor.CommandCount > 0)
                    {
						// Have to be careful here about failure on exists vs. not failing on exists
						// because of the different use cases of the importer.
                        // save the file in the study folder, or if its been compressed
						processor.AddCommand(new SaveDicomFileCommand(finalDest, file, file.Filename != finalDest));
                    }

					// Update the StudyStream object
					insertStudyXmlCommand = new InsertStudyXmlCommand(file, stream, _context.StorageLocation);
					processor.AddCommand(insertStudyXmlCommand);

					// Have the rules applied during the command processor, and add the objects.
					processor.AddCommand(new ApplySopRulesCommand(context,_context.SopProcessedRulesEngine));

					// If specified, delete the file
					if (deleteFile != null)
						processor.AddCommand(new FileDeleteCommand(deleteFile, true));

					// Insert into the database, but only if its not a duplicate so the counts don't get off
					insertInstanceCommand = new InsertInstanceCommand(file, _context.StorageLocation);
					processor.AddCommand(insertInstanceCommand);
					
					// Do a check if the StudyStatus value should be changed in the StorageLocation.  This
					// should only occur if the object has been compressed in the previous steps.
					processor.AddCommand(new UpdateStudyStatusCommand(_context.StorageLocation, file));

					if (uid!=null)
						processor.AddCommand(new DeleteWorkQueueUidCommand(uid));

					// Do the actual processing
					if (!processor.Execute())
					{
						Platform.Log(LogLevel.Error, "Failure processing command {0} for SOP: {1}", processor.Description, file.MediaStorageSopInstanceUid);
						Platform.Log(LogLevel.Error, "File that failed processing: {0}", file.Filename);
						throw new ApplicationException("Unexpected failure (" + processor.FailureReason + ") executing command for SOP: " + file.MediaStorageSopInstanceUid, processor.FailureException);
					}
					Platform.Log(ServerPlatform.InstanceLogLevel, "Processed SOP: {0} for Patient {1}", file.MediaStorageSopInstanceUid, patientsName);

					// Fire NewSopEventArgs or UpdateSopEventArgs Event
					// Know its a duplicate if we have to delete the duplicate object
					if (sopType == SopInstanceProcessorSopType.NewSop)
						EventManager.FireEvent(this, new NewSopEventArgs { File = file, ServerPartitionEntry = _context.Partition, WorkQueueUidEntry = uid, WorkQueueEntry = _context.WorkQueueEntry, FileLength = InstanceStats.FileSize });
					else if (sopType == SopInstanceProcessorSopType.UpdatedSop)
						EventManager.FireEvent(this, new UpdateSopEventArgs {File = file,ServerPartitionEntry = _context.Partition,WorkQueueUidEntry = uid, WorkQueueEntry = _context.WorkQueueEntry, FileLength = InstanceStats.FileSize});
				}
				catch (Exception e)
				{
					Platform.Log(LogLevel.Error, e, "Unexpected exception when {0}.  Rolling back operation.",
					             processor.Description);
					processor.Rollback();
					if (sopType == SopInstanceProcessorSopType.NewSop)
						EventManager.FireEvent(this, new FailedNewSopEventArgs { File = file, ServerPartitionEntry = _context.Partition, WorkQueueUidEntry = uid, WorkQueueEntry = _context.WorkQueueEntry, FileLength = InstanceStats.FileSize, FailureMessage = e.Message });
					else
						EventManager.FireEvent(this, new FailedUpdateSopEventArgs { File = file, ServerPartitionEntry = _context.Partition, WorkQueueUidEntry = uid, WorkQueueEntry = _context.WorkQueueEntry, FileLength = InstanceStats.FileSize, FailureMessage = e.Message });
					throw new ApplicationException("Unexpected exception when processing file.", e);
				}
				finally
				{
					if (insertInstanceCommand != null && insertInstanceCommand.Statistics.IsSet)
						_instanceStats.InsertDBTime.Add(insertInstanceCommand.Statistics);
					if (insertStudyXmlCommand != null && insertStudyXmlCommand.Statistics.IsSet)
						_instanceStats.InsertStreamTime.Add(insertStudyXmlCommand.Statistics);
				}
			}
		}
예제 #7
0
		/// <summary>
		/// Method for applying rules when a new series has been inserted.
		/// </summary>
		/// <param name="file">The DICOM file being processed.</param>
		/// <param name="processor">The command processor</param>
		private void ProcessSeriesRules(DicomFile file, CommandProcessor processor)
		{
			if (_seriesRulesEngine == null)
			{
				_seriesRulesEngine = new ServerRulesEngine(ServerRuleApplyTimeEnum.SeriesProcessed, _location.ServerPartitionKey);
				_seriesRulesEngine.Load();
			}
			else
			{
				_seriesRulesEngine.Statistics.LoadTime.Reset();
				_seriesRulesEngine.Statistics.ExecutionTime.Reset();
			}

			var context = new ServerActionContext(file, _location.FilesystemKey, _partition, _location.Key,processor);


		    _seriesRulesEngine.Execute(context);

		}
예제 #8
0
		public void Apply(ServerRuleApplyTimeEnum applyTime, CommandProcessor theProcessor)
		{
			try
			{
				if (_studyRulesEngine == null || !_studyRulesEngine.RuleApplyTime.Equals(applyTime))
				{
					_studyRulesEngine = new ServerRulesEngine(applyTime, _location.ServerPartitionKey);
					_studyRulesEngine.Load();
				}

				List<string> files = GetFirstInstanceInEachStudySeries();
				if (files.Count == 0)
				{
					string message =
						String.Format("Unexpectedly unable to find SOP instances for rules engine in each series in study: {0}",
						              _location.StudyInstanceUid);
					Platform.Log(LogLevel.Error, message);
					throw new ApplicationException(message);
				}

				Platform.Log(LogLevel.Info, "Processing Study Level rules for study {0} on partition {1} at {2} apply time",
				             _location.StudyInstanceUid, _partition.Description, applyTime.Description);

				foreach (string seriesFilePath in files)
				{
					var theFile = new DicomFile(seriesFilePath);
					theFile.Load(DicomReadOptions.Default);
					var context =
						new ServerActionContext(theFile, _location.FilesystemKey, _partition, _location.Key, theProcessor){ RuleEngine = _studyRulesEngine};
					_studyRulesEngine.Execute(context);

					ProcessSeriesRules(theFile, theProcessor);
				}

				if (applyTime.Equals(ServerRuleApplyTimeEnum.StudyProcessed))
				{
					// This is a bit kludgy, but we had a problem with studies with only 1 image incorectlly
					// having archive requests inserted when they were scheduled for deletion.  Calling
					// this command here so that if a delete is inserted at the study level, we will remove
					// the previously inserted archive request for the study.  Note also this has to be done
					// after the rules engine is executed.
					theProcessor.AddCommand(new InsertArchiveQueueCommand(_location.ServerPartitionKey, _location.Key));
				}
			}
			finally
			{
				if (_studyRulesEngine!=null)
					_studyRulesEngine.Complete(_studyRulesEngine.RulesApplied);
			}


			
		}
        private void RestoreOnlineStudy(RestoreQueue queueItem, string zipFile, string destinationFolder)
		{
			try
			{
				using (ServerCommandProcessor processor = new ServerCommandProcessor("NAS Restore Online Study"))
				{
					using (ZipFile zip = new ZipFile(zipFile))
					{
						foreach (string file in zip.EntryFileNames)
						{
							processor.AddCommand(new ExtractZipFileAndReplaceCommand(zipFile, file, destinationFolder));
						}
					}

					// We rebuild the StudyXml, in case any settings or issues have happened since archival
					processor.AddCommand(new RebuildStudyXmlCommand(_location.StudyInstanceUid, destinationFolder));

					StudyStatusEnum status;

					if (_syntax.Encapsulated && _syntax.LosslessCompressed)
						status = StudyStatusEnum.OnlineLossless;
					else if (_syntax.Encapsulated && _syntax.LossyCompressed)
						status = StudyStatusEnum.OnlineLossy;
					else
						status = StudyStatusEnum.Online;

					processor.AddCommand(new UpdateStudyStateCommand(_location, status, _serverSyntax));

					// Apply the rules engine.
					ServerActionContext context =
						new ServerActionContext(null, _location.FilesystemKey, _nasArchive.ServerPartition,
												queueItem.StudyStorageKey) {CommandProcessor = processor};
					processor.AddCommand(
						new ApplyRulesCommand(destinationFolder, _location.StudyInstanceUid, context));

					if (!processor.Execute())
					{
						Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}",
									 _location.StudyInstanceUid, _nasArchive.PartitionArchive.Description);
						queueItem.FailureDescription = processor.FailureReason;
						_nasArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time);
					}
					else
					{
						// Unlock the Queue Entry and set to complete
						using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush))
						{
							_nasArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60));
							ILockStudy studyLock = update.GetBroker<ILockStudy>();
							LockStudyParameters parms = new LockStudyParameters
							                            	{
							                            		StudyStorageKey = queueItem.StudyStorageKey,
							                            		QueueStudyStateEnum = QueueStudyStateEnum.Idle
							                            	};
							bool retVal = studyLock.Execute(parms);
							if (!parms.Successful || !retVal)
							{
								Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to unlock.", _location.StudyInstanceUid,
											 _nasArchive.ServerPartition.Description);
							}

							update.Commit();

							Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _location.StudyInstanceUid,
										 _nasArchive.PartitionArchive.Description);

                            OnStudyRestored(_location);
						}
					}
				}
			}
			catch (Exception e)
			{
				Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}",
							 _location.StudyInstanceUid, _nasArchive.PartitionArchive.Description);
				queueItem.FailureDescription = e.Message;
				_nasArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time);
			}
		}
		public void RestoreNearlineStudy(RestoreQueue queueItem, string zipFile, string destinationFolder, string studyFolder)
		{
            ServerFilesystemInfo fs = _nasArchive.Selector.SelectFilesystem();
			if (fs == null)
			{
				DateTime scheduleTime = Platform.Time.AddMinutes(5);
				Platform.Log(LogLevel.Error, "No writeable filesystem for restore, rescheduling restore request to {0}", scheduleTime);
				queueItem.FailureDescription = "No writeable filesystem for restore, rescheduling restore request";
				_nasArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Pending, scheduleTime);
				return;
			}

		    StudyStorageLocation restoredLocation = null;
			try
			{
				using (ServerCommandProcessor processor = 
                    new ServerCommandProcessor("NAS Restore Offline Study"))
				{
					processor.AddCommand(new CreateDirectoryCommand(destinationFolder));
					destinationFolder = Path.Combine(destinationFolder, studyFolder);
					processor.AddCommand(new CreateDirectoryCommand(destinationFolder));
					destinationFolder = Path.Combine(destinationFolder, _studyStorage.StudyInstanceUid);
					processor.AddCommand(new CreateDirectoryCommand(destinationFolder));
					processor.AddCommand(new ExtractZipCommand(zipFile, destinationFolder));

					// We rebuild the StudyXml, in case any settings or issues have happened since archival
					processor.AddCommand(new RebuildStudyXmlCommand(_studyStorage.StudyInstanceUid, destinationFolder));

                    // Apply the rules engine.
					ServerActionContext context =
						new ServerActionContext(null, fs.Filesystem.GetKey(), _nasArchive.ServerPartition,
						                        queueItem.StudyStorageKey) {CommandProcessor = processor};
					processor.AddCommand(
						new ApplyRulesCommand(destinationFolder, _studyStorage.StudyInstanceUid, context));

					// Do the actual insert into the DB
					InsertFilesystemStudyStorageCommand insertStorageCommand = new InsertFilesystemStudyStorageCommand(
													_nasArchive.PartitionArchive.ServerPartitionKey,
						                            _studyStorage.StudyInstanceUid,
						                            studyFolder,
						                            fs.Filesystem.GetKey(), _syntax);
					processor.AddCommand(insertStorageCommand);

					if (!processor.Execute())
					{
						Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}",
						             _studyStorage.StudyInstanceUid, _nasArchive.PartitionArchive.Description);
						queueItem.FailureDescription = processor.FailureReason;
						_nasArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time);
					}
					else
					{
					    restoredLocation = insertStorageCommand.Location;

						// Unlock the Queue Entry
						using (
							IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush))
						{
							bool retVal = _nasArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60));
							ILockStudy studyLock = update.GetBroker<ILockStudy>();
							LockStudyParameters parms = new LockStudyParameters
							                            	{
							                            		StudyStorageKey = queueItem.StudyStorageKey,
							                            		QueueStudyStateEnum = QueueStudyStateEnum.Idle
							                            	};
							retVal = retVal && studyLock.Execute(parms);
							if (!parms.Successful || !retVal)
							{
								string message =
									String.Format("Study {0} on partition {1} failed to unlock.", _studyStorage.StudyInstanceUid,
									              _nasArchive.ServerPartition.Description);
								Platform.Log(LogLevel.Info, message);
								throw new ApplicationException(message);
							}
							update.Commit();

							Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _studyStorage.StudyInstanceUid,
										 _nasArchive.PartitionArchive.Description);

                            OnStudyRestored(restoredLocation);
						}
					}
				}
			}
            catch(StudyIntegrityValidationFailure ex)
            {
                Debug.Assert(restoredLocation != null);
                // study has been restored but it seems corrupted. Need to reprocess it.
                ReprocessStudy(restoredLocation, ex.Message);
            }
			catch (Exception e)
			{
				Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}",
							 _studyStorage.StudyInstanceUid, _nasArchive.PartitionArchive.Description);
				_nasArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time);
			}
		}
예제 #11
0
		private void SaveFile(DicomFile file)
		{
			String seriesInstanceUid = file.DataSet[DicomTags.SeriesInstanceUid].GetString(0, String.Empty);
			String sopInstanceUid = file.DataSet[DicomTags.SopInstanceUid].GetString(0, String.Empty);

			String destPath = _oldStudyLocation.FilesystemPath;
			
			using (ServerCommandProcessor filesystemUpdateProcessor = new ServerCommandProcessor("Update Study"))
			{
				filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

				destPath = Path.Combine(destPath, _partition.PartitionFolder);
				filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

				destPath = Path.Combine(destPath, _oldStudyFolder);
				filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

				destPath = Path.Combine(destPath, _newStudyInstanceUid);
				filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

				destPath = Path.Combine(destPath, seriesInstanceUid);
				filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

				destPath = Path.Combine(destPath, sopInstanceUid);
				destPath += ServerPlatform.DicomFileExtension;

				// Overwrite the prior file
				SaveDicomFileCommand saveCommand = new SaveDicomFileCommand(destPath, file, false);
				filesystemUpdateProcessor.AddCommand(saveCommand);

				if (_rulesEngine != null)
				{
					ServerActionContext context = new ServerActionContext(file, _oldStudyLocation.FilesystemKey, _partition, _oldStudyLocation.Key, filesystemUpdateProcessor);
					_rulesEngine.Execute(context); 
				}

				if (!filesystemUpdateProcessor.Execute())
				{
					throw new ApplicationException(String.Format("Unable to update image {0} : {1}", file.Filename, filesystemUpdateProcessor.FailureReason));
				}
			}            
		}
		/// <summary>
		/// Reprocess a specific study.
		/// </summary>
		/// <param name="partition">The ServerPartition the study is on.</param>
		/// <param name="location">The storage location of the study to process.</param>
		/// <param name="engine">The rules engine to use when processing the study.</param>
		/// <param name="postArchivalEngine">The rules engine used for studies that have been archived.</param>
		/// <param name="dataAccessEngine">The rules engine strictly used for setting data acess.</param>
		protected static void ProcessStudy(ServerPartition partition, StudyStorageLocation location, ServerRulesEngine engine, ServerRulesEngine postArchivalEngine, ServerRulesEngine dataAccessEngine)
		{
			if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle) || !location.AcquireWriteLock())
			{
				Platform.Log(LogLevel.Error, "Unable to lock study {0}. The study is being processed. (Queue State: {1})", location.StudyInstanceUid,location.QueueStudyStateEnum.Description); 
			}
			else
			{
				try
				{
					DicomFile msg = LoadInstance(location);
					if (msg == null)
					{
						Platform.Log(LogLevel.Error, "Unable to load file for study {0}", location.StudyInstanceUid);
						return;
					}

					bool archiveQueueExists;
					bool archiveStudyStorageExists;
					bool filesystemDeleteExists;
					using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext())
					{
						// Check for existing archive queue entries
						var archiveQueueBroker = read.GetBroker<IArchiveQueueEntityBroker>();
						var archiveQueueCriteria = new ArchiveQueueSelectCriteria();
						archiveQueueCriteria.StudyStorageKey.EqualTo(location.Key);
						archiveQueueExists = archiveQueueBroker.Count(archiveQueueCriteria) > 0;


						var archiveStorageBroker = read.GetBroker<IArchiveStudyStorageEntityBroker>();
						var archiveStudyStorageCriteria = new ArchiveStudyStorageSelectCriteria();
						archiveStudyStorageCriteria.StudyStorageKey.EqualTo(location.Key);
						archiveStudyStorageExists = archiveStorageBroker.Count(archiveStudyStorageCriteria) > 0;

						var filesystemQueueBroker = read.GetBroker<IFilesystemQueueEntityBroker>();
						var filesystemQueueCriteria = new FilesystemQueueSelectCriteria();
						filesystemQueueCriteria.StudyStorageKey.EqualTo(location.Key);
						filesystemQueueCriteria.FilesystemQueueTypeEnum.EqualTo(FilesystemQueueTypeEnum.DeleteStudy);
						filesystemDeleteExists = filesystemQueueBroker.Count(filesystemQueueCriteria) > 0;
					}

					using (var commandProcessor = new ServerCommandProcessor("Study Rule Processor"))
					{
						var context = new ServerActionContext(msg, location.FilesystemKey, partition, location.Key, commandProcessor);
					
						// Check if the Study has been archived 
						if (archiveStudyStorageExists && !archiveQueueExists && !filesystemDeleteExists)
						{
							// Add a command to delete the current filesystemQueue entries, so that they can 
							// be reinserted by the rules engine.
							context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyArchived));

							// How to deal with exiting FilesystemQueue entries is problematic here.  If the study
							// has been migrated off tier 1, we probably don't want to modify the tier migration 
							// entries.  Compression entries may have been entered when the Study was initially 
							// processed, we don't want to delete them, because they might still be valid.  
							// We just re-run the rules engine at this point, and delete only the StudyPurge entries,
							// since those we know at least would only be applied for archived studies.
							var studyRulesEngine = new StudyRulesEngine(postArchivalEngine, location, location.ServerPartition, location.LoadStudyXml());
							studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor);

							// Post Archive doesn't allow data access rules.  Force Data Access rules to be reapplied
							// to these studies also.
							dataAccessEngine.Execute(context);
						}
						else
						{
							// Add a command to delete the current filesystemQueue entries, so that they can 
							// be reinserted by the rules engine.
							context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key,ServerRuleApplyTimeEnum.StudyProcessed));

							// Execute the rules engine, insert commands to update the database into the command processor.
							// Due to ticket #11673, we create a new rules engine instance for each study, since the Study QC rules
							// don't work right now with a single rules engine.
							//TODO CR (Jan 2014) - Check if we can go back to caching the rules engine to reduce database hits on the rules
							var studyRulesEngine = new StudyRulesEngine(location, location.ServerPartition, location.LoadStudyXml());
							studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, commandProcessor);
						}

						// Do the actual database updates.
						if (false == context.CommandProcessor.Execute())
						{
							Platform.Log(LogLevel.Error, "Unexpected failure processing Study level rules for study {0}", location.StudyInstanceUid);
						}

						// Log the FilesystemQueue related entries
						location.LogFilesystemQueue();
					}
				}
				finally
				{
					location.ReleaseWriteLock();
				}
			}
		}
예제 #13
0
		public ApplySopRulesCommand(ServerActionContext context, ServerRulesEngine engine)
			: base("Apply SOP Rules Engine and insert Archival Request", false)
		{
			_context = context;
			_engine = engine;
		}
		protected void ProcessFile(Model.WorkQueue item, WorkQueueUid sop, string path, StudyXml studyXml, IDicomCodecFactory theCodecFactory)
		{
			DicomFile file;

			_instanceStats = new CompressInstanceStatistics();

			_instanceStats.ProcessTime.Start();

			// Use the command processor for rollback capabilities.
            using (ServerCommandProcessor processor = new ServerCommandProcessor("Processing WorkQueue Compress DICOM File"))
            {
                string modality = String.Empty;

                try
                {
                    file = new DicomFile(path);

                    _instanceStats.FileLoadTime.Start();
                    file.Load(DicomReadOptions.StorePixelDataReferences | DicomReadOptions.Default);
                    _instanceStats.FileLoadTime.End();

                	modality = file.DataSet[DicomTags.Modality].GetString(0, String.Empty);

                    FileInfo fileInfo = new FileInfo(path);
                    _instanceStats.FileSize = (ulong)fileInfo.Length;

                    // Get the Patients Name for processing purposes.
                    String patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, "");

                    if (file.TransferSyntax.Equals(theCodecFactory.CodecTransferSyntax))
                    {
						// Delete the WorkQueueUid item
						processor.AddCommand(new DeleteWorkQueueUidCommand(sop));

						// Do the actual processing
						if (!processor.Execute())
						{
							Platform.Log(LogLevel.Warn, "Failure deleteing WorkQueueUid: {0} for SOP: {1}", processor.Description, file.MediaStorageSopInstanceUid);
							Platform.Log(LogLevel.Warn, "Compression file that failed: {0}", file.Filename);
						}
						else
						{
							Platform.Log(LogLevel.Warn, "Skip compressing SOP {0}. Its current transfer syntax is {1}",
							             file.MediaStorageSopInstanceUid, file.TransferSyntax.Name);
						}
                    }
                    else
                    {
                        IDicomCodec codec = theCodecFactory.GetDicomCodec();

                        // Create a context for applying actions from the rules engine
                        var context = new ServerActionContext(file, StorageLocation.FilesystemKey, ServerPartition, item.StudyStorageKey);
                        context.CommandProcessor = processor;
                        
                        var parms = theCodecFactory.GetCodecParameters(item.Data);
                        var compressCommand =
                            new DicomCompressCommand(context.Message, theCodecFactory.CodecTransferSyntax, codec, parms);
                        processor.AddCommand(compressCommand);

                        var save = new SaveDicomFileCommand(file.Filename, file, false);
                        processor.AddCommand(save);

                        // Update the StudyStream object, must be done after compression
                        // and after the compressed image has been successfully saved
                        var insertStudyXmlCommand = new UpdateStudyXmlCommand(file, studyXml, StorageLocation);
                        processor.AddCommand(insertStudyXmlCommand);

						// Delete the WorkQueueUid item
                    	processor.AddCommand(new DeleteWorkQueueUidCommand(sop));

                        // Do the actual processing
                        if (!processor.Execute())
                        {
                            _instanceStats.CompressTime.Add(compressCommand.CompressTime);
                            Platform.Log(LogLevel.Error, "Failure compressing command {0} for SOP: {1}", processor.Description, file.MediaStorageSopInstanceUid);
                            Platform.Log(LogLevel.Error, "Compression file that failed: {0}", file.Filename);
                            throw new ApplicationException("Unexpected failure (" + processor.FailureReason + ") executing command for SOP: " + file.MediaStorageSopInstanceUid,processor.FailureException);
                        }
                    	_instanceStats.CompressTime.Add(compressCommand.CompressTime);
                    	Platform.Log(ServerPlatform.InstanceLogLevel, "Compress SOP: {0} for Patient {1}", file.MediaStorageSopInstanceUid,
                    	             patientsName);
                    }
                    
                }
                catch (Exception e)
                {
                    Platform.Log(LogLevel.Error, e, "Unexpected exception when {0}.  Rolling back operation.",
                                 processor.Description);
                    processor.Rollback();

                	throw;
                }
                finally
                {
                    _instanceStats.ProcessTime.End();
                    _studyStats.AddSubStats(_instanceStats);

                    _studyStats.StudyInstanceUid = StorageLocation.StudyInstanceUid;
                    if (String.IsNullOrEmpty(modality) == false)
                        _studyStats.Modality = modality;

                    // Update the statistics
                    _studyStats.NumInstances++;
                }
            }
		}
		/// <summary>
		/// Overwrites existing copy with the received duplicate. Update the database, study xml and applies any SOP rules.
		/// </summary>
		/// <param name="dupFile"></param>
		/// <param name="uid"></param>
		/// <param name="studyXml"></param>
		/// <returns></returns>
		private ProcessDuplicateResult OverwriteAndUpdateDuplicate(DicomFile dupFile, WorkQueueUid uid, StudyXml studyXml)
		{
			Platform.Log(LogLevel.Info, "Overwriting duplicate SOP {0}", uid.SopInstanceUid);

			var result = new ProcessDuplicateResult();
			result.ActionTaken = DuplicateProcessResultAction.Accept;

			using (var processor = new ServerCommandProcessor("Overwrite duplicate instance"))
			{
				var sopContext = new ServerActionContext(dupFile, Context.StorageLocation.FilesystemKey, Context.StorageLocation.ServerPartition, Context.StorageLocation.Key, processor);

				var destination = Context.StorageLocation.GetSopInstancePath(uid.SeriesInstanceUid, uid.SopInstanceUid);
				processor.AddCommand(new RenameFileCommand(dupFile.Filename, destination, false));

				// Do so that the FileSize calculation inInsertStudyXmlCommand works
				dupFile.Filename = destination;

				// Update the StudyStream object
				var insertStudyXmlCommand = new InsertStudyXmlCommand(dupFile, studyXml, Context.StorageLocation);
				processor.AddCommand(insertStudyXmlCommand);

				// Ideally we don't need to insert the instance into the database since it's a duplicate.
				// However, we need to do so to ensure the Study record is recreated if we are dealing with an orphan study.
				// For other cases, this will cause the instance count in the DB to be out of sync with the filesystem.
				// But it will be corrected at the end of the processing when the study verification is executed.
				processor.AddCommand(new UpdateInstanceCommand(Context.StorageLocation.ServerPartition,Context.StorageLocation,dupFile));

				processor.AddCommand(new DeleteWorkQueueUidCommand(uid));

				processor.AddCommand(new ApplySopRulesCommand(sopContext, Context.SopProcessedRulesEngine));

				if (!processor.Execute())
				{
					EventManager.FireEvent(this, new FailedUpdateSopEventArgs { File = dupFile, ServerPartitionEntry = Context.StorageLocation.ServerPartition, WorkQueueUidEntry = uid, WorkQueueEntry = WorkQueueItem, FileLength = (ulong)insertStudyXmlCommand.FileSize, FailureMessage = processor.FailureReason });

					// cause the item to fail
					throw new Exception(string.Format("Error occurred when trying to overwrite duplicate in the filesystem."), processor.FailureException);
				}

				EventManager.FireEvent(this, new UpdateSopEventArgs { File = dupFile, ServerPartitionEntry = Context.StorageLocation.ServerPartition, WorkQueueUidEntry = uid, WorkQueueEntry = WorkQueueItem, FileLength = (ulong)insertStudyXmlCommand.FileSize });
			}

			return result;
		}
		void SaveDuplicateReport(WorkQueueUid uid, string sourceFile, string destinationFile, DicomFile dupFile, StudyXml studyXml)
        {
            using (var processor = new ServerCommandProcessor("Save duplicate report"))
            {
				var sopContext = new ServerActionContext(dupFile, Context.StorageLocation.FilesystemKey, Context.StorageLocation.ServerPartition, Context.StorageLocation.Key, processor);

				processor.AddCommand(new RenameFileCommand(sourceFile, destinationFile, false));

                // Update the StudyStream object
                processor.AddCommand( new InsertStudyXmlCommand(dupFile, studyXml, Context.StorageLocation));

				// Update the series
				processor.AddCommand(new UpdateSeriesCommand(Context.StorageLocation, sopContext));

                processor.AddCommand(new DeleteWorkQueueUidCommand(uid));

				processor.AddCommand(new ApplySopRulesCommand(sopContext, Context.SopProcessedRulesEngine));

                processor.Execute();
            }
        }