protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var filesystemQueueBroker = updateContext.GetBroker <IFilesystemQueueEntityBroker>(); var criteria = new FilesystemQueueSelectCriteria(); criteria.StudyStorageKey.EqualTo(_storageLocationKey); IList <FilesystemQueue> filesystemQueueItems = filesystemQueueBroker.Find(criteria); var workQueueBroker = updateContext.GetBroker <IWorkQueueEntityBroker>(); var workQueueCriteria = new WorkQueueSelectCriteria(); workQueueCriteria.StudyStorageKey.EqualTo(_storageLocationKey); workQueueCriteria.WorkQueueTypeEnum.In(new[] { WorkQueueTypeEnum.PurgeStudy, WorkQueueTypeEnum.DeleteStudy, WorkQueueTypeEnum.CompressStudy, WorkQueueTypeEnum.MigrateStudy }); IList <WorkQueue> workQueueItems = workQueueBroker.Find(workQueueCriteria); foreach (FilesystemQueue queue in filesystemQueueItems) { bool delete = false; if (_applyTime.Equals(ServerRuleApplyTimeEnum.StudyArchived)) { if (queue.FilesystemQueueTypeEnum.Equals(FilesystemQueueTypeEnum.PurgeStudy)) { delete = true; } } else { delete = true; } if (delete) { if (!filesystemQueueBroker.Delete(queue.GetKey())) { throw new ApplicationException("Unable to delete items in the filesystem queue"); } } } if (!_applyTime.Equals(ServerRuleApplyTimeEnum.StudyArchived)) { // delete work queue foreach (Model.WorkQueue item in workQueueItems) { if (!item.Delete(updateContext)) { throw new ApplicationException("Unable to delete items in the work queue"); } } } }
public void Apply(ServerRuleApplyTimeEnum applyTime, CommandProcessor theProcessor) { try { if (_studyRulesEngine == null || !_studyRulesEngine.RuleApplyTime.Equals(applyTime)) { _studyRulesEngine = new ServerRulesEngine(applyTime, _location.ServerPartitionKey); _studyRulesEngine.Load(); } List <string> files = GetFirstInstanceInEachStudySeries(); if (files.Count == 0) { string message = String.Format("Unexpectedly unable to find SOP instances for rules engine in each series in study: {0}", _location.StudyInstanceUid); Platform.Log(LogLevel.Error, message); throw new ApplicationException(message); } Platform.Log(LogLevel.Info, "Processing Study Level rules for study {0} on partition {1} at {2} apply time", _location.StudyInstanceUid, _partition.Description, applyTime.Description); foreach (string seriesFilePath in files) { var theFile = new DicomFile(seriesFilePath); theFile.Load(DicomReadOptions.Default); var context = new ServerActionContext(theFile, _location.FilesystemKey, _partition, _location.Key, theProcessor) { RuleEngine = _studyRulesEngine }; _studyRulesEngine.Execute(context); ProcessSeriesRules(theFile, theProcessor); } if (applyTime.Equals(ServerRuleApplyTimeEnum.StudyProcessed)) { // This is a bit kludgy, but we had a problem with studies with only 1 image incorectlly // having archive requests inserted when they were scheduled for deletion. Calling // this command here so that if a delete is inserted at the study level, we will remove // the previously inserted archive request for the study. Note also this has to be done // after the rules engine is executed. theProcessor.AddCommand(new InsertArchiveQueueCommand(_location.ServerPartitionKey, _location.Key)); } } finally { if (_studyRulesEngine != null) { _studyRulesEngine.Complete(_studyRulesEngine.RulesApplied); } } }
public UpdateStudyCommand(ServerPartition partition, StudyStorageLocation studyLocation, IList <BaseImageLevelUpdateCommand> imageLevelCommands, ServerRuleApplyTimeEnum applyTime) : base("Update existing study") { _partition = partition; _oldStudyLocation = studyLocation; _commands = imageLevelCommands; _statistics = new UpdateStudyStatistics(_oldStudyLocation.StudyInstanceUid); // Load the engine for editing rules. _rulesEngine = new ServerRulesEngine(applyTime, _partition.Key); if (applyTime.Equals(ServerRuleApplyTimeEnum.SopProcessed)) { _rulesEngine.AddIncludeType(ServerRuleTypeEnum.AutoRoute); } _rulesEngine.Load(); }