예제 #1
0
        protected override void OnExecute(CommandProcessor theProcessor)
        {
            // Run the rules engine against the object.
            _engine.Execute(_context);

            // Do insert into the archival queue.  Note that we re-run this with each object processed
            // so that the scheduled time is pushed back each time.  Note, however, if the study only
            // has one image, we could incorrectly insert an ArchiveQueue request, since the
            // study rules haven't been run.  We re-run the command when the study processed
            // rules are run to remove out the archivequeue request again, if it isn't needed.
            _context.CommandProcessor.AddCommand(
                new InsertArchiveQueueCommand(_context.ServerPartitionKey, _context.StudyLocationKey));
        }
예제 #2
0
        private void SaveFile(DicomFile file)
        {
            String seriesInstanceUid = file.DataSet[DicomTags.SeriesInstanceUid].GetString(0, String.Empty);
            String sopInstanceUid    = file.DataSet[DicomTags.SopInstanceUid].GetString(0, String.Empty);

            String destPath = _oldStudyLocation.FilesystemPath;

            using (ServerCommandProcessor filesystemUpdateProcessor = new ServerCommandProcessor("Update Study"))
            {
                filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

                destPath = Path.Combine(destPath, _partition.PartitionFolder);
                filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

                destPath = Path.Combine(destPath, _oldStudyFolder);
                filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

                destPath = Path.Combine(destPath, _newStudyInstanceUid);
                filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

                destPath = Path.Combine(destPath, seriesInstanceUid);
                filesystemUpdateProcessor.AddCommand(new CreateDirectoryCommand(destPath));

                destPath  = Path.Combine(destPath, sopInstanceUid);
                destPath += ServerPlatform.DicomFileExtension;

                // Overwrite the prior file
                SaveDicomFileCommand saveCommand = new SaveDicomFileCommand(destPath, file, false);
                filesystemUpdateProcessor.AddCommand(saveCommand);

                if (_rulesEngine != null)
                {
                    ServerActionContext context = new ServerActionContext(file, _oldStudyLocation.FilesystemKey, _partition, _oldStudyLocation.Key)
                    {
                        CommandProcessor = filesystemUpdateProcessor
                    };
                    _rulesEngine.Execute(context);
                }

                if (!filesystemUpdateProcessor.Execute())
                {
                    throw new ApplicationException(String.Format("Unable to update image {0} : {1}", file.Filename, filesystemUpdateProcessor.FailureReason));
                }
            }
        }
        /// <summary>
        /// Apply the rules.
        /// </summary>
        /// <remarks>
        /// When rules are applied, we are simply adding new <see cref="ServerDatabaseCommand"/> instances
        /// for the rules to the currently executing <see cref="ServerCommandProcessor"/>.  They will be
        /// executed after all other rules have been executed.
        /// </remarks>
        protected override void OnExecute(CommandProcessor theProcessor)
        {
            string   studyXmlFile = Path.Combine(_directory, String.Format("{0}.xml", _studyInstanceUid));
            StudyXml theXml       = new StudyXml(_studyInstanceUid);

            if (File.Exists(studyXmlFile))
            {
                using (Stream fileStream = FileStreamOpener.OpenForRead(studyXmlFile, FileMode.Open))
                {
                    XmlDocument theDoc = new XmlDocument();

                    StudyXmlIo.Read(theDoc, fileStream);

                    theXml.SetMemento(theDoc);

                    fileStream.Close();
                }
            }
            else
            {
                string errorMsg = String.Format("Unable to load study XML file of restored study: {0}", studyXmlFile);

                Platform.Log(LogLevel.Error, errorMsg);
                throw new ApplicationException(errorMsg);
            }

            DicomFile defaultFile   = null;
            bool      rulesExecuted = false;

            foreach (SeriesXml seriesXml in theXml)
            {
                foreach (InstanceXml instanceXml in seriesXml)
                {
                    // Skip non-image objects
                    if (instanceXml.SopClass.Equals(SopClass.KeyObjectSelectionDocumentStorage) ||
                        instanceXml.SopClass.Equals(SopClass.GrayscaleSoftcopyPresentationStateStorageSopClass) ||
                        instanceXml.SopClass.Equals(SopClass.BlendingSoftcopyPresentationStateStorageSopClass) ||
                        instanceXml.SopClass.Equals(SopClass.ColorSoftcopyPresentationStateStorageSopClass))
                    {
                        // Save the first one encountered, just in case the whole study is non-image objects.
                        if (defaultFile == null)
                        {
                            defaultFile = new DicomFile("test", new DicomAttributeCollection(), instanceXml.Collection);
                        }
                        continue;
                    }

                    DicomFile file = new DicomFile("test", new DicomAttributeCollection(), instanceXml.Collection);
                    _context.Message = file;
                    _engine.Execute(_context);
                    rulesExecuted = true;
                    break;
                }
                if (rulesExecuted)
                {
                    break;
                }
            }

            if (!rulesExecuted && defaultFile != null)
            {
                _context.Message = defaultFile;
                _engine.Execute(_context);
            }
        }
예제 #4
0
		public void Apply(ServerRuleApplyTimeEnum applyTime, CommandProcessor theProcessor)
		{
			_studyRulesEngine = new ServerRulesEngine(applyTime, _location.ServerPartitionKey);
			_studyRulesEngine.Load();

			List<string> files = GetFirstInstanceInEachStudySeries();
			if (files.Count == 0)
			{
				string message =
					String.Format("Unexectedly unable to find SOP instances for rules engine in each series in study: {0}",
					              _location.StudyInstanceUid);
				Platform.Log(LogLevel.Error, message);
				throw new ApplicationException(message);
			}

			Platform.Log(LogLevel.Info, "Processing Study Level rules for study {0} on partition {1} at {2} apply time",
			             _location.StudyInstanceUid, _partition.Description, applyTime.Description);

			foreach (string seriesFilePath in files)
			{
				var theFile = new DicomFile(seriesFilePath);
				theFile.Load(DicomReadOptions.Default);
			    var context =
			        new ServerActionContext(theFile, _location.FilesystemKey, _partition, _location.Key)
			            {CommandProcessor = theProcessor};
			    _studyRulesEngine.Execute(context);

				ProcessSeriesRules(theFile, theProcessor);
			}

			if (applyTime.Equals(ServerRuleApplyTimeEnum.StudyProcessed))
			{
				// This is a bit kludgy, but we had a problem with studies with only 1 image incorectlly
				// having archive requests inserted when they were scheduled for deletion.  Calling
				// this command here so that if a delete is inserted at the study level, we will remove
				// the previously inserted archive request for the study.  Note also this has to be done
				// after the rules engine is executed.
				theProcessor.AddCommand(new InsertArchiveQueueCommand(_location.ServerPartitionKey, _location.Key));
			}
		}
예제 #5
0
        /// <summary>
        /// Reprocess a specific study.
        /// </summary>
        /// <param name="partition">The ServerPartition the study is on.</param>
        /// <param name="location">The storage location of the study to process.</param>
        /// <param name="engine">The rules engine to use when processing the study.</param>
        /// <param name="postArchivalEngine">The rules engine used for studies that have been archived.</param>
        /// <param name="dataAccessEngine">The rules engine strictly used for setting data acess.</param>
        protected static void ProcessStudy(ServerPartition partition, StudyStorageLocation location, ServerRulesEngine engine, ServerRulesEngine postArchivalEngine, ServerRulesEngine dataAccessEngine)
        {
            if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle) || !location.AcquireWriteLock())
            {
                Platform.Log(LogLevel.Error, "Unable to lock study {0}. The study is being processed. (Queue State: {1})", location.StudyInstanceUid, location.QueueStudyStateEnum.Description);
            }
            else
            {
                try
                {
                    DicomFile msg = LoadInstance(location);
                    if (msg == null)
                    {
                        Platform.Log(LogLevel.Error, "Unable to load file for study {0}", location.StudyInstanceUid);
                        return;
                    }

                    bool archiveQueueExists;
                    bool archiveStudyStorageExists;
                    bool filesystemDeleteExists;
                    using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext())
                    {
                        // Check for existing archive queue entries
                        var archiveQueueBroker   = read.GetBroker <IArchiveQueueEntityBroker>();
                        var archiveQueueCriteria = new ArchiveQueueSelectCriteria();
                        archiveQueueCriteria.StudyStorageKey.EqualTo(location.Key);
                        archiveQueueExists = archiveQueueBroker.Count(archiveQueueCriteria) > 0;


                        var archiveStorageBroker        = read.GetBroker <IArchiveStudyStorageEntityBroker>();
                        var archiveStudyStorageCriteria = new ArchiveStudyStorageSelectCriteria();
                        archiveStudyStorageCriteria.StudyStorageKey.EqualTo(location.Key);
                        archiveStudyStorageExists = archiveStorageBroker.Count(archiveStudyStorageCriteria) > 0;

                        var filesystemQueueBroker   = read.GetBroker <IFilesystemQueueEntityBroker>();
                        var filesystemQueueCriteria = new FilesystemQueueSelectCriteria();
                        filesystemQueueCriteria.StudyStorageKey.EqualTo(location.Key);
                        filesystemQueueCriteria.FilesystemQueueTypeEnum.EqualTo(FilesystemQueueTypeEnum.DeleteStudy);
                        filesystemDeleteExists = filesystemQueueBroker.Count(filesystemQueueCriteria) > 0;
                    }

                    using (var commandProcessor = new ServerCommandProcessor("Study Rule Processor")
                    {
                        PrimaryServerPartitionKey = partition.GetKey(),
                        PrimaryStudyKey = location.Study.GetKey()
                    })
                    {
                        var context = new ServerActionContext(msg, location.FilesystemKey, partition, location.Key, commandProcessor);

                        // Check if the Study has been archived
                        if (archiveStudyStorageExists && !archiveQueueExists && !filesystemDeleteExists)
                        {
                            // Add a command to delete the current filesystemQueue entries, so that they can
                            // be reinserted by the rules engine.
                            context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyArchived));

                            // How to deal with exiting FilesystemQueue entries is problematic here.  If the study
                            // has been migrated off tier 1, we probably don't want to modify the tier migration
                            // entries.  Compression entries may have been entered when the Study was initially
                            // processed, we don't want to delete them, because they might still be valid.
                            // We just re-run the rules engine at this point, and delete only the StudyPurge entries,
                            // since those we know at least would only be applied for archived studies.
                            var studyRulesEngine = new StudyRulesEngine(postArchivalEngine, location, location.ServerPartition, location.LoadStudyXml());
                            studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor);

                            // Post Archive doesn't allow data access rules.  Force Data Access rules to be reapplied
                            // to these studies also.
                            dataAccessEngine.Execute(context);
                        }
                        else
                        {
                            // Add a command to delete the current filesystemQueue entries, so that they can
                            // be reinserted by the rules engine.
                            context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyProcessed));

                            // Execute the rules engine, insert commands to update the database into the command processor.
                            // Due to ticket #11673, we create a new rules engine instance for each study, since the Study QC rules
                            // don't work right now with a single rules engine.
                            //TODO CR (Jan 2014) - Check if we can go back to caching the rules engine to reduce database hits on the rules
                            var studyRulesEngine = new StudyRulesEngine(location, location.ServerPartition, location.LoadStudyXml());
                            studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, commandProcessor);
                        }

                        // Do the actual database updates.
                        if (false == context.CommandProcessor.Execute())
                        {
                            Platform.Log(LogLevel.Error, "Unexpected failure processing Study level rules for study {0}", location.StudyInstanceUid);
                        }

                        // Log the FilesystemQueue related entries
                        location.LogFilesystemQueue();
                    }
                }
                finally
                {
                    location.ReleaseWriteLock();
                }
            }
        }
		/// <summary>
		/// Reprocess a specific study.
		/// </summary>
		/// <param name="partition">The ServerPartition the study is on.</param>
		/// <param name="location">The storage location of the study to process.</param>
		/// <param name="engine">The rules engine to use when processing the study.</param>
		/// <param name="postArchivalEngine">The rules engine used for studies that have been archived.</param>
		/// <param name="dataAccessEngine">The rules engine strictly used for setting data acess.</param>
		protected static void ProcessStudy(ServerPartition partition, StudyStorageLocation location, ServerRulesEngine engine, ServerRulesEngine postArchivalEngine, ServerRulesEngine dataAccessEngine)
		{
			if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle) || !location.AcquireWriteLock())
			{
				Platform.Log(LogLevel.Error, "Unable to lock study {0}. The study is being processed. (Queue State: {1})", location.StudyInstanceUid,location.QueueStudyStateEnum.Description); 
			}
			else
			{
				try
				{
					DicomFile msg = LoadInstance(location);
					if (msg == null)
					{
						Platform.Log(LogLevel.Error, "Unable to load file for study {0}", location.StudyInstanceUid);
						return;
					}

					bool archiveQueueExists;
					bool archiveStudyStorageExists;
					bool filesystemDeleteExists;
					using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext())
					{
						// Check for existing archive queue entries
						var archiveQueueBroker = read.GetBroker<IArchiveQueueEntityBroker>();
						var archiveQueueCriteria = new ArchiveQueueSelectCriteria();
						archiveQueueCriteria.StudyStorageKey.EqualTo(location.Key);
						archiveQueueExists = archiveQueueBroker.Count(archiveQueueCriteria) > 0;


						var archiveStorageBroker = read.GetBroker<IArchiveStudyStorageEntityBroker>();
						var archiveStudyStorageCriteria = new ArchiveStudyStorageSelectCriteria();
						archiveStudyStorageCriteria.StudyStorageKey.EqualTo(location.Key);
						archiveStudyStorageExists = archiveStorageBroker.Count(archiveStudyStorageCriteria) > 0;

						var filesystemQueueBroker = read.GetBroker<IFilesystemQueueEntityBroker>();
						var filesystemQueueCriteria = new FilesystemQueueSelectCriteria();
						filesystemQueueCriteria.StudyStorageKey.EqualTo(location.Key);
						filesystemQueueCriteria.FilesystemQueueTypeEnum.EqualTo(FilesystemQueueTypeEnum.DeleteStudy);
						filesystemDeleteExists = filesystemQueueBroker.Count(filesystemQueueCriteria) > 0;
					}

					using (var commandProcessor = new ServerCommandProcessor("Study Rule Processor"))
					{
						var context = new ServerActionContext(msg, location.FilesystemKey, partition, location.Key, commandProcessor);
					
						// Check if the Study has been archived 
						if (archiveStudyStorageExists && !archiveQueueExists && !filesystemDeleteExists)
						{
							// Add a command to delete the current filesystemQueue entries, so that they can 
							// be reinserted by the rules engine.
							context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyArchived));

							// How to deal with exiting FilesystemQueue entries is problematic here.  If the study
							// has been migrated off tier 1, we probably don't want to modify the tier migration 
							// entries.  Compression entries may have been entered when the Study was initially 
							// processed, we don't want to delete them, because they might still be valid.  
							// We just re-run the rules engine at this point, and delete only the StudyPurge entries,
							// since those we know at least would only be applied for archived studies.
							var studyRulesEngine = new StudyRulesEngine(postArchivalEngine, location, location.ServerPartition, location.LoadStudyXml());
							studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor);

							// Post Archive doesn't allow data access rules.  Force Data Access rules to be reapplied
							// to these studies also.
							dataAccessEngine.Execute(context);
						}
						else
						{
							// Add a command to delete the current filesystemQueue entries, so that they can 
							// be reinserted by the rules engine.
							context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key,ServerRuleApplyTimeEnum.StudyProcessed));

							// Execute the rules engine, insert commands to update the database into the command processor.
							// Due to ticket #11673, we create a new rules engine instance for each study, since the Study QC rules
							// don't work right now with a single rules engine.
							//TODO CR (Jan 2014) - Check if we can go back to caching the rules engine to reduce database hits on the rules
							var studyRulesEngine = new StudyRulesEngine(location, location.ServerPartition, location.LoadStudyXml());
							studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, commandProcessor);
						}

						// Do the actual database updates.
						if (false == context.CommandProcessor.Execute())
						{
							Platform.Log(LogLevel.Error, "Unexpected failure processing Study level rules for study {0}", location.StudyInstanceUid);
						}

						// Log the FilesystemQueue related entries
						location.LogFilesystemQueue();
					}
				}
				finally
				{
					location.ReleaseWriteLock();
				}
			}
		}