/// <summary> /// Validates the state of the study. /// </summary> /// <param name="context">Name of the application</param> /// <param name="studyStorage">The study to validate</param> /// <param name="modes">Specifying what validation to execute</param> public void ValidateStudyState(String context, StudyStorageLocation studyStorage, StudyIntegrityValidationModes modes) { Platform.CheckForNullReference(studyStorage, "studyStorage"); if (modes == StudyIntegrityValidationModes.None) return; using (ServerExecutionContext scope = new ServerExecutionContext()) { Study study = studyStorage.LoadStudy(scope.PersistenceContext); if (study!=null) { StudyXml studyXml = studyStorage.LoadStudyXml(); if (modes == StudyIntegrityValidationModes.Default || (modes & StudyIntegrityValidationModes.InstanceCount) == StudyIntegrityValidationModes.InstanceCount) { if (studyXml != null && studyXml.NumberOfStudyRelatedInstances != study.NumberOfStudyRelatedInstances) { ValidationStudyInfo validationStudyInfo = new ValidationStudyInfo(study, studyStorage.ServerPartition); throw new StudyIntegrityValidationFailure( ValidationErrors.InconsistentObjectCount, validationStudyInfo, String.Format("Number of instances in database and xml do not match: {0} vs {1}.", study.NumberOfStudyRelatedInstances, studyXml.NumberOfStudyRelatedInstances )); } } } } }
public void Dispose() { try { if (!DirectoryUtility.DeleteIfEmpty(_backupDirectory)) { Platform.Log(LogLevel.Warn, "Some backup files can be found left in {0}", BackupDirectory); } if (Platform.IsLogLevelEnabled(LogLevel.Debug) && Directory.Exists(_tempDirectory)) { Platform.Log(LogLevel.Debug, "Deleting temp folder: {0}", _tempDirectory); } DirectoryUtility.DeleteIfEmpty(_tempDirectory); } finally { if (_updateContext != null) { Rollback(); } if (_readContext != null) { _readContext.Dispose(); _readContext = null; } // reset the current context for the thread _current = _inheritFrom; } }
public ServerExecutionContext(String contextId, ServerExecutionContext inheritFrom) { Platform.CheckForNullReference(contextId, "contextId"); _contextId = contextId; _inheritFrom = inheritFrom; _current = this; }
private void LoadAdditionalEntities() { Debug.Assert(ServerPartition != null); Debug.Assert(StorageLocation != null); using (ServerExecutionContext context = new ServerExecutionContext()) { if (_filesystem != null) _filesystem = FilesystemMonitor.Instance.GetFilesystemInfo(StorageLocation.FilesystemKey); _study = StorageLocation.LoadStudy(context.ReadContext); _patient = Patient.Load(context.ReadContext, _study.PatientKey); } }
/// <summary> /// Get a list of candidates from the <see cref="FilesystemQueue"/>. /// </summary> /// <param name="item">The ServiceLock item.</param> /// <param name="scheduledTime">The scheduled time to query against</param> /// <param name="type">The type of FilesystemQueue entry.</param> /// <param name="statusCheck">If true, check for specific status value WorkQueue entries already existing, otherwise check for any WorkQueue entry.</param> /// <returns>The list of queue entries.</returns> protected IList<FilesystemQueue> GetFilesystemQueueCandidates(Model.ServiceLock item, DateTime scheduledTime, FilesystemQueueTypeEnum type, bool statusCheck) { using (ServerExecutionContext context = new ServerExecutionContext()) { IFilesystemQueueEntityBroker broker = context.ReadContext.GetBroker<IFilesystemQueueEntityBroker>(); FilesystemQueueSelectCriteria fsQueueCriteria = new FilesystemQueueSelectCriteria(); fsQueueCriteria.FilesystemKey.EqualTo(item.FilesystemKey); fsQueueCriteria.ScheduledTime.LessThanOrEqualTo(scheduledTime); fsQueueCriteria.FilesystemQueueTypeEnum.EqualTo(type); // Do the select based on the QueueStudyState (used to be based on a link to the WorkQueue table) StudyStorageSelectCriteria studyStorageSearchCriteria = new StudyStorageSelectCriteria(); studyStorageSearchCriteria.QueueStudyStateEnum.EqualTo(QueueStudyStateEnum.Idle); fsQueueCriteria.StudyStorage.Exists(studyStorageSearchCriteria); fsQueueCriteria.ScheduledTime.SortAsc(0); IList<FilesystemQueue> list = broker.Find(fsQueueCriteria, 0, ServiceLockSettings.Default.FilesystemQueueResultCount); return list; } }
/// <summary> /// Process a <see cref="WorkQueue"/> item of type AutoRoute. /// </summary> protected override void ProcessItem(Model.WorkQueue item) { if (WorkQueueItem.ScheduledTime >= WorkQueueItem.ExpirationTime && !HasPendingItems) { Platform.Log(LogLevel.Debug, "Removing Idle {0} entry : {1}", item.WorkQueueTypeEnum, item.GetKey().Key); base.PostProcessing(item, WorkQueueProcessorStatus.Complete, WorkQueueProcessorDatabaseUpdate.None); return; } if (!HasPendingItems) { // nothing to process, change to idle state PostProcessing(item, WorkQueueProcessorStatus.Idle, WorkQueueProcessorDatabaseUpdate.None); return; } Platform.Log(LogLevel.Info, "Moving study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} to {5}...", Study.StudyInstanceUid, Study.PatientsName, Study.PatientId, Study.AccessionNumber, ServerPartition.Description, DestinationDevice.AeTitle); // Load remote device information from the database. Device device = DestinationDevice; if (device == null) { item.FailureDescription = String.Format("Unknown auto-route destination \"{0}\"", item.DeviceKey); Platform.Log(LogLevel.Error, item.FailureDescription); PostProcessingFailure(item, WorkQueueProcessorFailureType.Fatal); // Fatal Error return; } if (device.Dhcp && device.IpAddress.Length == 0) { item.FailureDescription = String.Format("Auto-route destination is a DHCP device with no known IP address: \"{0}\"", device.AeTitle); Platform.Log(LogLevel.Error, item.FailureDescription); PostProcessingFailure(item, WorkQueueProcessorFailureType.Fatal); // Fatal error return; } // Now setup the StorageSCU component int sendCounter = 0; using (ImageServerStorageScu scu = new ImageServerStorageScu(ServerPartition, device)) { using (ServerExecutionContext context = new ServerExecutionContext()) // set the preferred syntax lists scu.LoadPreferredSyntaxes(context.ReadContext); // Load the Instances to Send into the SCU component scu.AddStorageInstanceList(InstanceList); // Set an event to be called when each image is transferred scu.ImageStoreCompleted += delegate(Object sender, StorageInstance instance) { if (instance.SendStatus.Status == DicomState.Success || instance.SendStatus.Status == DicomState.Warning || instance.SendStatus.Equals(DicomStatuses.SOPClassNotSupported)) { sendCounter++; OnInstanceSent(instance); } if (instance.SendStatus.Status == DicomState.Failure) { scu.FailureDescription = instance.SendStatus.Description; if (false == String.IsNullOrEmpty(instance.ExtendedFailureDescription)) { scu.FailureDescription = String.Format("{0} [{1}]", scu.FailureDescription, instance.ExtendedFailureDescription); } } if (CancelPending && !(this is WebMoveStudyItemProcessor) && !scu.Canceled) { Platform.Log(LogLevel.Info, "Auto-route canceled due to shutdown for study: {0}", StorageLocation.StudyInstanceUid); item.FailureDescription = "Operation was canceled due to server shutdown request."; scu.Cancel(); } }; try { // Block until send is complete scu.Send(); // Join for the thread to exit scu.Join(); } catch (Exception ex) { Platform.Log(LogLevel.Error, ex, "Error occurs while sending images to {0} : {1}", device.AeTitle, ex.Message); } finally { if (scu.FailureDescription.Length > 0) { item.FailureDescription = scu.FailureDescription; scu.Status = ScuOperationStatus.Failed; } // Reset the WorkQueue entry status if ((InstanceList.Count > 0 && sendCounter != InstanceList.Count) // not all sop were sent || scu.Status == ScuOperationStatus.Failed || scu.Status == ScuOperationStatus.ConnectFailed) { PostProcessingFailure(item, WorkQueueProcessorFailureType.NonFatal); // failures occurred} } else { OnComplete(); } } } }
/// <summary> /// Finds a list of <see cref="StudyIntegrityQueue"/> related to the specified <see cref="StudyStorage"/>. /// </summary> /// <param name="studyStorageKey"></param> /// <param name="filter">A delegate that will be used to filter the returned list. Pass in Null to get the entire list.</param> /// <returns>A list of <see cref="StudyIntegrityQueue"/></returns> static public IList<StudyIntegrityQueue> FindSIQEntries(ServerEntityKey studyStorageKey, Predicate<StudyIntegrityQueue> filter) { using (ServerExecutionContext scope = new ServerExecutionContext()) { IStudyIntegrityQueueEntityBroker broker = scope.PersistenceContext.GetBroker<IStudyIntegrityQueueEntityBroker>(); StudyIntegrityQueueSelectCriteria criteria = new StudyIntegrityQueueSelectCriteria(); criteria.StudyStorageKey.EqualTo(studyStorageKey); criteria.InsertTime.SortDesc(0); IList<StudyIntegrityQueue> list = broker.Find(criteria); if (filter != null) { CollectionUtils.Remove(list, filter); } return list; } }
/// <summary> /// Finds a list of <see cref="StudyHistory"/> records of the specified <see cref="StudyHistoryTypeEnum"/> /// for the specified <see cref="StudyStorage"/>. /// </summary> /// <param name="studyStorage"></param> /// <returns></returns> /// <param name="types"></param> static public IList<StudyHistory> FindStudyHistories(StudyStorage studyStorage, IEnumerable<StudyHistoryTypeEnum> types) { // Use of ExecutionContext to re-use db connection if possible using (ServerExecutionContext scope = new ServerExecutionContext()) { IStudyHistoryEntityBroker broker = scope.PersistenceContext.GetBroker<IStudyHistoryEntityBroker>(); StudyHistorySelectCriteria criteria = new StudyHistorySelectCriteria(); criteria.StudyStorageKey.EqualTo(studyStorage.Key); criteria.StudyHistoryTypeEnum.EqualTo(StudyHistoryTypeEnum.StudyReconciled); if (types!=null) { criteria.StudyHistoryTypeEnum.In(types); } criteria.InsertTime.SortAsc(0); IList<StudyHistory> historyList = broker.Find(criteria); return historyList; } }
/// <summary> /// Checks for the existinance of a SOP for a given Study in the <see cref="WorkQueue"/> for a <see cref="WorkQueueTypeEnum.ReconcileStudy"/>. /// </summary> /// <param name="studyStorageKey">The StudyStorage primary key</param> /// <param name="seriesInstanceUid">The Series Instance Uid of the Sop</param> /// <param name="sopInstanceUid">The Sop Instance to look for</param> /// <returns>true if an entry exists, false if it doesn't</returns> static public bool WorkQueueUidExists(ServerEntityKey studyStorageKey, string seriesInstanceUid, string sopInstanceUid) { Platform.CheckForNullReference(studyStorageKey, "studyStorageKey"); using (ServerExecutionContext scope = new ServerExecutionContext()) { IWorkQueueEntityBroker broker = scope.PersistenceContext.GetBroker<IWorkQueueEntityBroker>(); WorkQueueUidSelectCriteria uidSelectCriteria = new WorkQueueUidSelectCriteria(); uidSelectCriteria.SeriesInstanceUid.EqualTo(seriesInstanceUid); uidSelectCriteria.SopInstanceUid.EqualTo(sopInstanceUid); WorkQueueSelectCriteria selectCriteria = new WorkQueueSelectCriteria(); selectCriteria.StudyStorageKey.EqualTo(studyStorageKey); selectCriteria.WorkQueueTypeEnum.EqualTo(WorkQueueTypeEnum.ReconcileStudy); selectCriteria.WorkQueueUidRelatedEntityCondition.Exists(uidSelectCriteria); return broker.Count(selectCriteria) > 0; } }
/// <summary> /// Finds a list of <see cref="WorkQueue"/> related to the specified <see cref="studyStorageKey"/>. /// </summary> /// <param name="studyStorageKey"></param> /// <param name="filter">A delegate that will be used to filter the returned list. Pass in Null to get the entire list.</param> /// <returns>A list of <see cref="WorkQueue"/></returns> static public IList<WorkQueue> FindWorkQueueEntries(ServerEntityKey studyStorageKey, Predicate<WorkQueue> filter) { Platform.CheckForNullReference(studyStorageKey, "studyStorageKey"); using (ServerExecutionContext scope = new ServerExecutionContext()) { IWorkQueueEntityBroker broker = scope.PersistenceContext.GetBroker<IWorkQueueEntityBroker>(); WorkQueueSelectCriteria criteria = new WorkQueueSelectCriteria(); criteria.StudyStorageKey.EqualTo(studyStorageKey); criteria.InsertTime.SortDesc(0); IList<WorkQueue> list = broker.Find(criteria); if (filter != null) { CollectionUtils.Remove(list, filter); } return list; } }
private static bool GetStudyStorage(ServerPartition partition, string studyInstanceUid, out StudyStorage storage) { using (ServerExecutionContext context = new ServerExecutionContext()) { storage = StudyStorage.Load(context.ReadContext, partition.Key, studyInstanceUid); if (storage != null) return true; return false; } }
private static bool ArchiveTimeRange(ImageServerLogWriter<ApplicationLog> writer, DateTime cutOffTime) { ApplicationLogSelectCriteria criteria = new ApplicationLogSelectCriteria(); criteria.Timestamp.LessThan(cutOffTime); criteria.Timestamp.SortAsc(0); using (ServerExecutionContext context = new ServerExecutionContext()) { IApplicationLogEntityBroker broker = context.ReadContext.GetBroker<IApplicationLogEntityBroker>(); List<ServerEntityKey> keyList = new List<ServerEntityKey>(1000); try { broker.Find(criteria, delegate(ApplicationLog result) { keyList.Add(result.Key); if (writer.WriteLog(result, result.Timestamp)) { // The logs been flushed, delete the log entries cached. // Purposely use a read context here, even though we're doing // an update, so we don't use transaction wrappers, optimization // is more important at this point. using ( IReadContext update = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { IApplicationLogEntityBroker updateBroker = update.GetBroker<IApplicationLogEntityBroker>(); foreach (ServerEntityKey key in keyList) updateBroker.Delete(key); } keyList = new List<ServerEntityKey>(1000); } }); if (keyList.Count > 0) { // Purposely use a read context here, even though we're doing an update, so we // don't have to do an explicit commit and don't use transaction wrappers. using ( IReadContext update = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { IApplicationLogEntityBroker updateBroker = update.GetBroker<IApplicationLogEntityBroker>(); foreach (ServerEntityKey key in keyList) updateBroker.Delete(key); } } } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when purging log files."); return false; } return true; } }
private bool ArchiveLogs(ServerFilesystemInfo archiveFs) { string archivePath = Path.Combine(archiveFs.Filesystem.FilesystemPath, "AlertLog"); DateTime cutOffTime = Platform.Time.Date.AddDays(ServiceLockSettings.Default.AlertCachedDays*-1); AlertSelectCriteria criteria = new AlertSelectCriteria(); criteria.InsertTime.LessThan(cutOffTime); criteria.InsertTime.SortAsc(0); using (ServerExecutionContext context = new ServerExecutionContext()) { IAlertEntityBroker broker = context.ReadContext.GetBroker<IAlertEntityBroker>(); ImageServerLogWriter<Alert> writer = new ImageServerLogWriter<Alert>(archivePath, "Alert"); List<ServerEntityKey> keyList = new List<ServerEntityKey>(500); try { broker.Find(criteria, delegate(Alert result) { keyList.Add(result.Key); // If configured, don't flush to disk. We just delete the contents of keyList below. if (!ServiceLockSettings.Default.AlertDelete) { if (writer.WriteLog(result, result.InsertTime)) { // The logs been flushed, delete the log entries cached. using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush) ) { IApplicationLogEntityBroker updateBroker = update.GetBroker<IApplicationLogEntityBroker>(); foreach (ServerEntityKey key in keyList) updateBroker.Delete(key); update.Commit(); } keyList = new List<ServerEntityKey>(); } } }); writer.FlushLog(); if (keyList.Count > 0) { using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IAlertEntityBroker updateBroker = update.GetBroker<IAlertEntityBroker>(); foreach (ServerEntityKey key in keyList) updateBroker.Delete(key); update.Commit(); } } } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when purging Alert log files."); writer.Dispose(); return false; } writer.Dispose(); return true; } }
/// <summary> /// Process StudyCompress Candidates retrieved from the <see cref="Model.FilesystemQueue"/> table /// </summary> /// <param name="candidateList">The list of candidate studies for deleting.</param> /// <param name="type">The type of compress candidate (lossy or lossless)</param> private void ProcessCompressCandidates(IEnumerable<FilesystemQueue> candidateList, FilesystemQueueTypeEnum type) { using (ServerExecutionContext context = new ServerExecutionContext()) { DateTime scheduledTime = Platform.Time.AddSeconds(10); foreach (FilesystemQueue queueItem in candidateList) { // Check for Shutdown/Cancel if (CancelPending) break; // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) continue; StudyXml studyXml; try { studyXml = LoadStudyXml(location); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress candidate, unexpected exception loading StudyXml file for {0}", location.GetStudyPath()); continue; } using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker<ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.CompressScheduled; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Lossless Compress. Reason:{0}. Skipping study ({1})", lockParms.FailureReason, location.StudyInstanceUid); continue; } scheduledTime = scheduledTime.AddSeconds(3); IInsertWorkQueueFromFilesystemQueue workQueueInsert = update.GetBroker<IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters(); insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; insertParms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.LosslessCompress; insertParms.StudyStorageKey = location.GetKey(); insertParms.ServerPartitionKey = location.ServerPartitionKey; DateTime expirationTime = scheduledTime; insertParms.ScheduledTime = expirationTime; insertParms.DeleteFilesystemQueue = true; insertParms.Data = queueItem.QueueXml; insertParms.FilesystemQueueTypeEnum = type; insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; try { WorkQueue entry = workQueueInsert.FindOne(insertParms); InsertWorkQueueUidFromStudyXml(studyXml, update, entry.GetKey()); update.Commit(); _studiesInserted++; } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress record, unexpected problem inserting 'CompressStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); // throw; -- would cause abort of inserts, go ahead and try everything } } } } }
/// <summary> /// Load the specific SOP Instance Uids in the database for the WorkQueue item. /// </summary> /// <param name="item">The WorkQueue item.</param> protected void LoadUids(Model.WorkQueue item) { if (_uidList==null) { UidsLoadTime.Add(delegate { using (ServerExecutionContext context = new ServerExecutionContext()) { IWorkQueueUidEntityBroker select = context.ReadContext.GetBroker<IWorkQueueUidEntityBroker>(); WorkQueueUidSelectCriteria parms = new WorkQueueUidSelectCriteria(); parms.WorkQueueKey.EqualTo(item.GetKey()); _uidList = select.Find(parms); _uidList = TruncateList(item, _uidList); } } ); } }
private ReconcileStudyQueueDescription CreateQueueEntryDescription(DicomFile file) { using(var context = new ServerExecutionContext()) { Study study = _studyLocation.LoadStudy(context.PersistenceContext); if (study!=null) { var desc = new ReconcileStudyQueueDescription { ExistingPatientId = study.PatientId, ExistingPatientName = study.PatientsName, ExistingAccessionNumber = study.AccessionNumber, ConflictingPatientName = file.DataSet[DicomTags.PatientsName].ToString(), ConflictingPatientId = file.DataSet[DicomTags.PatientId].ToString(), ConflictingAccessionNumber = file.DataSet[DicomTags.AccessionNumber].ToString() }; return desc; } return null; } }
private void ReinventoryFilesystem(Filesystem filesystem) { ServerPartition partition; DirectoryInfo filesystemDir = new DirectoryInfo(filesystem.FilesystemPath); foreach(DirectoryInfo partitionDir in filesystemDir.GetDirectories()) { if (GetServerPartition(partitionDir.Name, out partition) == false) continue; foreach(DirectoryInfo dateDir in partitionDir.GetDirectories()) { if (dateDir.FullName.EndsWith("Deleted", StringComparison.InvariantCultureIgnoreCase) || dateDir.FullName.EndsWith(ServerPlatform.ReconcileStorageFolder, StringComparison.InvariantCultureIgnoreCase)) continue; List<FileInfo> fileList; foreach (DirectoryInfo studyDir in dateDir.GetDirectories()) { if (studyDir.FullName.EndsWith("Deleted", StringComparison.InvariantCultureIgnoreCase)) continue; // Check for Cancel message if (CancelPending) return; String studyInstanceUid = studyDir.Name; StudyStorageLocation location; if (GetStudyStorageLocation(partition.Key, studyInstanceUid, out location)) { #region Study record exists in db int integrityQueueCount; int workQueueCount; Study theStudy = GetStudyAndQueues(location, out integrityQueueCount, out workQueueCount); if (theStudy != null) continue; if (integrityQueueCount != 0 && workQueueCount != 0) continue; fileList = LoadSopFiles(studyDir, false); if (fileList.Count == 0) { Platform.Log(LogLevel.Warn, "Found empty study folder with StorageLocation, deleteing StorageLocation: {0}\\{1}", dateDir.Name, studyDir.Name); studyDir.Delete(true); RemoveStudyStorage(location); continue; } // WriteLock the new study storage for study processing if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.ProcessingScheduled)) { string failureReason; if (!ServerHelper.LockStudy(location.Key,QueueStudyStateEnum.ProcessingScheduled, out failureReason)) Platform.Log(LogLevel.Error, "Unable to lock study {0} for Study Processing", location.StudyInstanceUid); } #endregion } else { #region Directory not in DB, fileList = LoadSopFiles(studyDir, true); if (fileList.Count == 0) { Platform.Log(LogLevel.Warn, "Found empty study folder: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } DicomFile file = LoadFileFromList(fileList); if (file == null) { Platform.Log(LogLevel.Warn, "Found directory with no readable files: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } // Do a second check, using the study instance uid from a file in the directory. // had an issue with trailing periods on uids causing us to not find the // study storage, and insert a new record into the database. studyInstanceUid = file.DataSet[DicomTags.StudyInstanceUid].ToString(); if (GetStudyStorageLocation(partition.Key, studyInstanceUid, out location)) { continue; } StudyStorage storage; if (GetStudyStorage(partition, studyInstanceUid, out storage)) { Platform.Log(LogLevel.Warn, "Study {0} on filesystem partition {1} is offline {2}", studyInstanceUid, partition.Description, studyDir.ToString()); continue; } Platform.Log(LogLevel.Info, "Reinventory inserting study storage location for {0} on partition {1}", studyInstanceUid, partition.Description); // Insert StudyStorage using (IUpdateContext update = _store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IInsertStudyStorage studyInsert = update.GetBroker<IInsertStudyStorage>(); InsertStudyStorageParameters insertParms = new InsertStudyStorageParameters { ServerPartitionKey = partition.GetKey(), StudyInstanceUid = studyInstanceUid, Folder = dateDir.Name, FilesystemKey = filesystem.GetKey(), QueueStudyStateEnum = QueueStudyStateEnum.Idle }; if (file.TransferSyntax.LosslessCompressed) { insertParms.TransferSyntaxUid = file.TransferSyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossless; } else if (file.TransferSyntax.LossyCompressed) { insertParms.TransferSyntaxUid = file.TransferSyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossy; } else { insertParms.TransferSyntaxUid = TransferSyntax.ExplicitVrLittleEndianUid; insertParms.StudyStatusEnum = StudyStatusEnum.Online; } location = studyInsert.FindOne(insertParms); // WriteLock the new study storage for study processing ILockStudy lockStudy = update.GetBroker<ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters { StudyStorageKey = location.Key, QueueStudyStateEnum = QueueStudyStateEnum.ProcessingScheduled }; if (!lockStudy.Execute(lockParms) || !lockParms.Successful) Platform.Log(LogLevel.Error, "Unable to lock study {0} for Study Processing", location.StudyInstanceUid); update.Commit(); } #endregion } string studyXml = location.GetStudyXmlPath(); if (File.Exists(studyXml)) FileUtils.Delete(studyXml); string studyGZipXml = location.GetCompressedStudyXmlPath(); if (File.Exists(studyGZipXml)) FileUtils.Delete(studyGZipXml); foreach (FileInfo sopFile in fileList) { String sopInstanceUid = sopFile.Name.Replace(sopFile.Extension, string.Empty); using (ServerExecutionContext context = new ServerExecutionContext()) { // Just use a read context here, in hopes of improving // performance. Every other place in the code should use // Update contexts when doing transactions. IInsertWorkQueue workQueueInsert = context.ReadContext.GetBroker<IInsertWorkQueue>(); InsertWorkQueueParameters queueInsertParms = new InsertWorkQueueParameters { WorkQueueTypeEnum = WorkQueueTypeEnum.StudyProcess, StudyStorageKey = location.GetKey(), ServerPartitionKey = partition.GetKey(), SeriesInstanceUid = sopFile.Directory.Name, SopInstanceUid = sopInstanceUid, ScheduledTime = Platform.Time }; if (workQueueInsert.FindOne(queueInsertParms) == null) Platform.Log(LogLevel.Error, "Failure attempting to insert SOP Instance into WorkQueue during Reinventory."); } } } // Cleanup the date directory, if its empty. DirectoryUtility.DeleteIfEmpty(dateDir.FullName); } } }
/// <summary> /// Load the <see cref="ServerRulesEngine"/> for each partition. /// </summary> private void LoadRulesEngine() { using (ServerExecutionContext context = new ServerExecutionContext()) { var broker = context.ReadContext.GetBroker<IServerPartitionEntityBroker>(); var criteria = new ServerPartitionSelectCriteria(); IList<ServerPartition> partitions = broker.Find(criteria); foreach (ServerPartition partition in partitions) { var engine = new ServerRulesEngine(ServerRuleApplyTimeEnum.StudyProcessed, partition.Key); engine.Load(); _engines.Add(partition, engine); engine = new ServerRulesEngine(ServerRuleApplyTimeEnum.StudyArchived, partition.Key); engine.Load(); _postArchivalEngines.Add(partition, engine); engine = new ServerRulesEngine(ServerRuleApplyTimeEnum.StudyProcessed, partition.Key); engine.AddIncludeType(ServerRuleTypeEnum.DataAccess); engine.Load(); _dataAccessEngine.Add(partition, engine); } } }
public void Dispose() { try { if (!DirectoryUtility.DeleteIfEmpty(_backupDirectory)) { Platform.Log(LogLevel.Warn, "Some backup files can be found left in {0}", BackupDirectory); } if (Platform.IsLogLevelEnabled(LogLevel.Debug) && Directory.Exists(_tempDirectory)) Platform.Log(LogLevel.Debug, "Deleting temp folder: {0}", _tempDirectory); DirectoryUtility.DeleteIfEmpty(_tempDirectory); } finally { if (_updateContext != null) Rollback(); if (_readContext!=null) { _readContext.Dispose(); _readContext = null; } // reset the current context for the thread _current = _inheritFrom; } }
/// <summary> /// Returns number of Delete Study, Tier Migrate, and Study Purge work queue items /// that are still Pending or In Progress for the filesystem associated with the /// specified <see cref="ServiceLock"/>. /// </summary> /// <param name="item">The ServiceLock item.</param> /// <returns>The number of WorkQueue entries pending.</returns> private int CheckWorkQueueCount(Model.ServiceLock item) { using (ServerExecutionContext context = new ServerExecutionContext()) { IWorkQueueEntityBroker select = context.ReadContext.GetBroker<IWorkQueueEntityBroker>(); WorkQueueSelectCriteria criteria = new WorkQueueSelectCriteria(); criteria.WorkQueueTypeEnum.In(new[] { WorkQueueTypeEnum.DeleteStudy, WorkQueueTypeEnum.MigrateStudy, WorkQueueTypeEnum.PurgeStudy }); // Do Pending status, in case there's a Failure status entry, we don't want to // block on that. criteria.WorkQueueStatusEnum.In(new[] {WorkQueueStatusEnum.Pending, WorkQueueStatusEnum.InProgress}); FilesystemStudyStorageSelectCriteria filesystemCriteria = new FilesystemStudyStorageSelectCriteria(); filesystemCriteria.FilesystemKey.EqualTo(item.FilesystemKey); criteria.FilesystemStudyStorageRelatedEntityCondition.Exists(filesystemCriteria); int count = select.Count(criteria); return count; } }
/// <summary> /// Process the <see cref="ServiceLock"/> item. /// </summary> /// <param name="item"></param> protected override void OnProcess(Model.ServiceLock item) { PersistentStoreRegistry.GetDefaultStore(); using (ServerExecutionContext context = new ServerExecutionContext()) { IServerPartitionEntityBroker broker = context.ReadContext.GetBroker<IServerPartitionEntityBroker>(); ServerPartitionSelectCriteria criteria = new ServerPartitionSelectCriteria(); criteria.AeTitle.SortAsc(0); _partitions = broker.Find(criteria); } ServerFilesystemInfo info = FilesystemMonitor.Instance.GetFilesystemInfo(item.FilesystemKey); Platform.Log(LogLevel.Info, "Starting rebuilding of Study XML files for filesystem: {0}", info.Filesystem.Description); TraverseFilesystemStudies(info.Filesystem); item.ScheduledTime = item.ScheduledTime.AddDays(1); if (CancelPending) { Platform.Log(LogLevel.Info, "FilesystemRebuildXml of {0} has been canceled, rescheduling. Note that the entire Filesystem will be rebuilt again.", info.Filesystem.Description); UnlockServiceLock(item, true, Platform.Time.AddMinutes(1)); } else UnlockServiceLock(item, false, Platform.Time.AddDays(1)); Platform.Log(LogLevel.Info, "Completed rebuilding of the Study XML files for filesystem: {0}", info.Filesystem.Description); }
private bool ArchiveLogs(ServerFilesystemInfo archiveFs) { try { using (ServerExecutionContext context = new ServerExecutionContext()) { string archivePath = Path.Combine(archiveFs.Filesystem.FilesystemPath, "ApplicationLog"); ApplicationLogSelectCriteria criteria = new ApplicationLogSelectCriteria(); criteria.Timestamp.SortAsc(0); IApplicationLogEntityBroker broker = context.ReadContext.GetBroker<IApplicationLogEntityBroker>(); ApplicationLog firstLog = broker.FindOne(criteria); if (firstLog == null) return true; DateTime currentCutOffTime = firstLog.Timestamp.AddMinutes(5); int cachedDays = ServiceLockSettings.Default.ApplicationLogCachedDays; if (cachedDays < 0) cachedDays = 0; DateTime cutOffTime = Platform.Time.Date.AddDays(cachedDays*-1); if (currentCutOffTime > cutOffTime) return true; using ( ImageServerLogWriter<ApplicationLog> writer = new ImageServerLogWriter<ApplicationLog>(archivePath, "ApplicationLog")) { while (currentCutOffTime < cutOffTime) { if (!ArchiveTimeRange(writer, currentCutOffTime)) { writer.FlushLog(); return false; } currentCutOffTime = currentCutOffTime.AddMinutes(5); } // Now flush the last potential 5 minutes. if (!ArchiveTimeRange(writer, cutOffTime)) { writer.FlushLog(); return false; } writer.FlushLog(); } return true; } } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when writing log file."); return false; } }