public void RunApplication(string[] args) { using (IUpdateContext updateContext = PersistentStore.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IPartitionArchiveEntityBroker partitionBroker = updateContext.GetBroker<IPartitionArchiveEntityBroker>(); PartitionArchiveSelectCriteria partitionCriteria = new PartitionArchiveSelectCriteria(); partitionCriteria.Enabled.EqualTo(true); partitionCriteria.ReadOnly.EqualTo(false); PartitionArchive activePartition = partitionBroker.FindOne(partitionCriteria); if (activePartition == null) { Platform.Log(LogLevel.Error, "No active ArchivePartition were found."); return; } partitionCriteria.ReadOnly.EqualTo(true); IList<ServerEntityKey> partitionKeys = new List<ServerEntityKey>(); foreach (PartitionArchive partition in partitionBroker.Find(partitionCriteria)) { partitionKeys.Add(partition.Key); } IArchiveQueueEntityBroker queueBroker = updateContext.GetBroker<IArchiveQueueEntityBroker>(); ArchiveQueueSelectCriteria queueCriteria = new ArchiveQueueSelectCriteria(); queueCriteria.ArchiveQueueStatusEnum.In(new ArchiveQueueStatusEnum[] { ArchiveQueueStatusEnum.Failed, ArchiveQueueStatusEnum.Pending }); queueCriteria.PartitionArchiveKey.In(partitionKeys); ArchiveQueueUpdateColumns queueColumns = new ArchiveQueueUpdateColumns() { PartitionArchiveKey = activePartition.Key, ArchiveQueueStatusEnum = ArchiveQueueStatusEnum.Pending, ProcessorId = "", ScheduledTime = Platform.Time }; if (queueBroker.Update(queueCriteria, queueColumns)) { updateContext.Commit(); } } }
public bool ResetArchiveQueueItem(IList<ArchiveQueue> items, DateTime time) { if (items == null || items.Count == 0) return false; ArchiveQueueUpdateColumns columns = new ArchiveQueueUpdateColumns(); columns.ArchiveQueueStatusEnum = ArchiveQueueStatusEnum.Pending; columns.ProcessorId = ""; columns.ScheduledTime = time; bool result = true; IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IArchiveQueueEntityBroker archiveQueueBroker = ctx.GetBroker<IArchiveQueueEntityBroker>(); foreach (ArchiveQueue item in items) { // Only do an update if its a failed status currently ArchiveQueueSelectCriteria criteria = new ArchiveQueueSelectCriteria(); criteria.ArchiveQueueStatusEnum.EqualTo(ArchiveQueueStatusEnum.Failed); criteria.StudyStorageKey.EqualTo(item.StudyStorageKey); if (!archiveQueueBroker.Update(criteria, columns)) { result = false; break; } } if (result) ctx.Commit(); } return result; }
public int GetArchiveQueueCount(Study study) { Platform.CheckForNullReference(study, "Study"); ArchiveQueueAdaptor adaptor = new ArchiveQueueAdaptor(); ArchiveQueueSelectCriteria archiveQueueCriteria = new ArchiveQueueSelectCriteria(); archiveQueueCriteria.StudyStorageKey.EqualTo(study.StudyStorageKey); archiveQueueCriteria.ScheduledTime.SortDesc(0); return adaptor.GetCount(archiveQueueCriteria); }
/// <summary> /// Determine if the specified partition can be deleted. If studies are scheduled /// to be archived on that partition or studies are already archived on that partition, /// then the partition may not be deleted. /// /// </summary> /// <param name="partition"></param> /// <returns></returns> public bool CanDelete(PartitionArchive partition) { ArchiveQueueAdaptor archiveQueueAdaptor = new ArchiveQueueAdaptor(); ArchiveQueueSelectCriteria selectCriteria = new ArchiveQueueSelectCriteria(); selectCriteria.PartitionArchiveKey.EqualTo(partition.GetKey()); ArchiveStudyStorageAdaptor archiveStudyStorageAdaptor = new ArchiveStudyStorageAdaptor(); ArchiveStudyStorageSelectCriteria criteria = new ArchiveStudyStorageSelectCriteria(); criteria.PartitionArchiveKey.EqualTo(partition.GetKey()); int queueItems = archiveQueueAdaptor.GetCount(selectCriteria); int storageItems = 0; // only check if we need to. if (queueItems == 0) storageItems = archiveStudyStorageAdaptor.GetCount(criteria); return !((queueItems > 0) || (storageItems > 0)); }
/// <summary> /// Reprocess a specific study. /// </summary> /// <param name="partition">The ServerPartition the study is on.</param> /// <param name="location">The storage location of the study to process.</param> /// <param name="engine">The rules engine to use when processing the study.</param> /// <param name="postArchivalEngine">The rules engine used for studies that have been archived.</param> /// <param name="dataAccessEngine">The rules engine strictly used for setting data acess.</param> protected static void ProcessStudy(ServerPartition partition, StudyStorageLocation location, ServerRulesEngine engine, ServerRulesEngine postArchivalEngine, ServerRulesEngine dataAccessEngine) { if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle) || !location.AcquireWriteLock()) { Platform.Log(LogLevel.Error, "Unable to lock study {0}. The study is being processed. (Queue State: {1})", location.StudyInstanceUid,location.QueueStudyStateEnum.Description); } else { try { DicomFile msg = LoadInstance(location); if (msg == null) { Platform.Log(LogLevel.Error, "Unable to load file for study {0}", location.StudyInstanceUid); return; } bool archiveQueueExists; bool archiveStudyStorageExists; bool filesystemDeleteExists; using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { // Check for existing archive queue entries var archiveQueueBroker = read.GetBroker<IArchiveQueueEntityBroker>(); var archiveQueueCriteria = new ArchiveQueueSelectCriteria(); archiveQueueCriteria.StudyStorageKey.EqualTo(location.Key); archiveQueueExists = archiveQueueBroker.Count(archiveQueueCriteria) > 0; var archiveStorageBroker = read.GetBroker<IArchiveStudyStorageEntityBroker>(); var archiveStudyStorageCriteria = new ArchiveStudyStorageSelectCriteria(); archiveStudyStorageCriteria.StudyStorageKey.EqualTo(location.Key); archiveStudyStorageExists = archiveStorageBroker.Count(archiveStudyStorageCriteria) > 0; var filesystemQueueBroker = read.GetBroker<IFilesystemQueueEntityBroker>(); var filesystemQueueCriteria = new FilesystemQueueSelectCriteria(); filesystemQueueCriteria.StudyStorageKey.EqualTo(location.Key); filesystemQueueCriteria.FilesystemQueueTypeEnum.EqualTo(FilesystemQueueTypeEnum.DeleteStudy); filesystemDeleteExists = filesystemQueueBroker.Count(filesystemQueueCriteria) > 0; } using (var commandProcessor = new ServerCommandProcessor("Study Rule Processor")) { var context = new ServerActionContext(msg, location.FilesystemKey, partition, location.Key, commandProcessor); // Check if the Study has been archived if (archiveStudyStorageExists && !archiveQueueExists && !filesystemDeleteExists) { // Add a command to delete the current filesystemQueue entries, so that they can // be reinserted by the rules engine. context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyArchived)); // How to deal with exiting FilesystemQueue entries is problematic here. If the study // has been migrated off tier 1, we probably don't want to modify the tier migration // entries. Compression entries may have been entered when the Study was initially // processed, we don't want to delete them, because they might still be valid. // We just re-run the rules engine at this point, and delete only the StudyPurge entries, // since those we know at least would only be applied for archived studies. var studyRulesEngine = new StudyRulesEngine(postArchivalEngine, location, location.ServerPartition, location.LoadStudyXml()); studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); // Post Archive doesn't allow data access rules. Force Data Access rules to be reapplied // to these studies also. dataAccessEngine.Execute(context); } else { // Add a command to delete the current filesystemQueue entries, so that they can // be reinserted by the rules engine. context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key,ServerRuleApplyTimeEnum.StudyProcessed)); // Execute the rules engine, insert commands to update the database into the command processor. // Due to ticket #11673, we create a new rules engine instance for each study, since the Study QC rules // don't work right now with a single rules engine. //TODO CR (Jan 2014) - Check if we can go back to caching the rules engine to reduce database hits on the rules var studyRulesEngine = new StudyRulesEngine(location, location.ServerPartition, location.LoadStudyXml()); studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, commandProcessor); } // Do the actual database updates. if (false == context.CommandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure processing Study level rules for study {0}", location.StudyInstanceUid); } // Log the FilesystemQueue related entries location.LogFilesystemQueue(); } } finally { location.ReleaseWriteLock(); } } }
public bool IsArchivingScheduled() { using (var ctx = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { var broker = ctx.GetBroker<IArchiveQueueEntityBroker>(); var criteria = new ArchiveQueueSelectCriteria(); criteria.StudyStorageKey.EqualTo(StudyStorage.GetKey()); return broker.Find(criteria).Any(); } }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var broker = updateContext.GetBroker<IArchiveQueueEntityBroker>(); var criteria = new ArchiveQueueSelectCriteria(); criteria.PartitionArchiveKey.EqualTo(_archive.GetKey()); criteria.StudyStorageKey.EqualTo(_studyStorage.GetKey()); broker.Delete(criteria); }
/// <summary> /// Reset any archival request that may have been left In Progress when the service last shut down. /// </summary> public void ResetFailedArchiveQueueItems() { using (IUpdateContext updateContext = PersistentStore.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IArchiveQueueEntityBroker broker = updateContext.GetBroker<IArchiveQueueEntityBroker>(); ArchiveQueueSelectCriteria criteria = new ArchiveQueueSelectCriteria(); criteria.ProcessorId.EqualTo(ServerPlatform.ProcessorId); criteria.ArchiveQueueStatusEnum.EqualTo(ArchiveQueueStatusEnum.InProgress); IList<ArchiveQueue> failedList = broker.Find(criteria); foreach (ArchiveQueue failedItem in failedList) { UpdateArchiveQueue(updateContext, failedItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); Platform.Log(LogLevel.Warn, "Reseting ArchiveQueue entry {0} to Pending that was In Progress at startup for PartitionArchive {1}", failedItem.Key, _partitionArchive.Description); } if (failedList.Count > 0) updateContext.Commit(); else Platform.Log(LogLevel.Info,"No ArchiveQueue entries to reset on startup for archive {0}",_partitionArchive.Description); } }
public ArchiveQueueSelectCriteria(ArchiveQueueSelectCriteria other) : base(other) {}
public ArchiveQueueSelectCriteria(ArchiveQueueSelectCriteria other) : base(other) { }