/// <summary> /// Reschedule a list of <see cref="WorkQueue"/> items /// </summary> /// <param name="items">List of <see cref="WorkQueue"/> items to be rescheduled</param> /// <param name="newScheduledTime">New schedule start date/time</param> /// <param name="expirationTime">New expiration date/time</param> /// <param name="priority">New priority</param> /// <returns>A value indicating whether all <see cref="WorkQueue"/> items in <paramref name="items"/> are updated successfully.</returns> /// <remarks> /// If one or more <see cref="WorkQueue"/> in <paramref name="items"/> cannot be rescheduled, all changes will be /// reverted and <b>false</b> will be returned. /// </remarks> public bool RescheduleWorkQueueItems(IList <WorkQueue> items, DateTime newScheduledTime, DateTime expirationTime, WorkQueuePriorityEnum priority) { if (items == null || items.Count == 0) { return(false); } WorkQueueUpdateColumns updatedColumns = new WorkQueueUpdateColumns(); updatedColumns.WorkQueuePriorityEnum = priority; updatedColumns.ScheduledTime = newScheduledTime; updatedColumns.ExpirationTime = expirationTime; updatedColumns.FailureCount = 0; updatedColumns.FailureDescription = String.Empty; updatedColumns.LastUpdatedTime = Platform.Time; bool result = false; IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IWorkQueueEntityBroker workQueueBroker = ctx.GetBroker <IWorkQueueEntityBroker>(); foreach (WorkQueue item in items) { result = workQueueBroker.Update(item.Key, updatedColumns); if (!result) { break; } } if (result) { ctx.Commit(); } } return(result); }
/// <summary> /// Deletes a list of <see cref="WorkQueue"/> items from the system. /// </summary> /// <param name="items">The list of <see cref="WorkQueue"/> items to be deleted</param> /// <returns>A value indicating whether all items have been successfully deleted.</returns> /// /// <remarks> /// If one or more <see cref="WorkQueue"/> in <paramref name="items"/> cannot be deleted, the method will return <b>false</b> /// and the deletion will be undone (i.e., All of the <see cref="WorkQueue"/> items will remain in the database) /// </remarks> public bool DeleteWorkQueueItems(IList <WorkQueue> items) { if (items == null || items.Count == 0) { return(false); } bool result = true; IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext uctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IDeleteWorkQueue delete = uctx.GetBroker <IDeleteWorkQueue>(); foreach (WorkQueue item in items) { WorkQueueDeleteParameters parms = new WorkQueueDeleteParameters(); parms.ServerPartitionKey = item.ServerPartitionKey; parms.StudyStorageKey = item.StudyStorageKey; parms.WorkQueueKey = item.Key; parms.WorkQueueTypeEnum = item.WorkQueueTypeEnum; // NOTE: QueueStudyState is reset by the stored procedure if (!delete.Execute(parms)) { Platform.Log(LogLevel.Error, "Unexpected error trying to delete WorkQueue entry"); result = false; } } if (result) { uctx.Commit(); } } return(result); }
private static void WriteToDatabase(ImageServer.Common.Alert alert) { XmlDocument doc = CreateXmlContent(alert); AlertUpdateColumns columns = new AlertUpdateColumns(); columns.AlertCategoryEnum = AlertCategoryEnum.GetEnum(alert.Category.ToString()); columns.AlertLevelEnum = AlertLevelEnum.GetEnum(alert.Level.ToString()); columns.Component = alert.Source.Name; columns.Content = doc; columns.InsertTime = Platform.Time; columns.Source = alert.Source.Host; columns.TypeCode = alert.Code; IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IAlertEntityBroker alertBroker = ctx.GetBroker <IAlertEntityBroker>(); alertBroker.Insert(columns); ctx.Commit(); } }
public void Execute() { // Wrap the upgrade in a single commit. using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { UpdateContext context = updateContext as UpdateContext; if (context == null) { Console.WriteLine("Unexpected error opening connection to the database."); throw new ApplicationException("Error opening connection to the database."); } ExecuteSql(context, GetScript()); DatabaseVersionUpdateColumns columns = new DatabaseVersionUpdateColumns(); DatabaseVersionSelectCriteria criteria = new DatabaseVersionSelectCriteria(); columns.Revision = DestinationVersion.Revision.ToString(); columns.Build = DestinationVersion.Build.ToString(); columns.Minor = DestinationVersion.Minor.ToString(); columns.Major = DestinationVersion.Major.ToString(); IDatabaseVersionEntityBroker broker = context.GetBroker <IDatabaseVersionEntityBroker>(); broker.Update(criteria, columns); updateContext.Commit(); } if (_upgradeStoredProcs) { RunSqlScriptApplication app = new RunSqlScriptApplication(); app.RunApplication(new string[] { "-storedprocedures" }); } return; }
/// <summary> /// Method called when stopping the DICOM SCP. /// </summary> protected override void Stop() { //TODO CR (Jan 2014): Move this into the base if it applies to all subclasses? PersistentStoreRegistry.GetDefaultStore().ShutdownRequested = true; lock (_syncLock) { if (_changedEvent == null) { return; } ServerPartitionMonitor.Instance.Changed -= _changedEvent; foreach (DicomScp <DicomScpContext> scp in _listenerList) { scp.Stop(); var helper = new ApplicationActivityAuditHelper( ServerPlatform.AuditSource, EventIdentificationContentsEventOutcomeIndicator.Success, ApplicationActivityType.ApplicationStopped, new AuditProcessActiveParticipant(scp.AeTitle)); ServerAuditHelper.LogAuditMessage(helper); } foreach (DicomScp <DicomScpContext> scp in _alternateAeListenerList) { scp.Stop(); var helper = new ApplicationActivityAuditHelper( ServerPlatform.AuditSource, EventIdentificationContentsEventOutcomeIndicator.Success, ApplicationActivityType.ApplicationStopped, new AuditProcessActiveParticipant(scp.AeTitle)); ServerAuditHelper.LogAuditMessage(helper); } } }
public bool ReprocessWorkQueueItem(WorkQueue item) { // #10620: Get a list of remaining WorkQueueUids which need to be reprocess // Note: currently only WorkQueueUIDs in failed StudyProcess will be reprocessed var remainingWorkQueueUidPaths = item.GetAllWorkQueueUidPaths(); IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { // delete current workqueue IWorkQueueUidEntityBroker uidBroker = ctx.GetBroker <IWorkQueueUidEntityBroker>(); WorkQueueUidSelectCriteria criteria = new WorkQueueUidSelectCriteria(); criteria.WorkQueueKey.EqualTo(item.GetKey()); if (uidBroker.Delete(criteria) >= 0) { IWorkQueueEntityBroker workQueueBroker = ctx.GetBroker <IWorkQueueEntityBroker>(); if (workQueueBroker.Delete(item.GetKey())) { IList <StudyStorageLocation> locations = item.LoadStudyLocations(ctx); if (locations != null && locations.Count > 0) { StudyReprocessor reprocessor = new StudyReprocessor(); String reason = String.Format("User reprocesses failed {0}", item.WorkQueueTypeEnum); WorkQueue reprocessEntry = reprocessor.ReprocessStudy(ctx, reason, locations[0], remainingWorkQueueUidPaths, Platform.Time); if (reprocessEntry != null) { ctx.Commit(); } return(reprocessEntry != null); } } } } return(false); }
public bool DeleteOrderItem(ServerEntityKey partitionKey, ServerEntityKey orderKey) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { // Disconnect studies from order var studyBroker = updateContext.GetBroker <IStudyEntityBroker>(); var criteria = new StudySelectCriteria(); criteria.OrderKey.EqualTo(orderKey); criteria.ServerPartitionKey.EqualTo(partitionKey); var updateColumns = new StudyUpdateColumns { OrderKey = null }; studyBroker.Update(criteria, updateColumns); bool retValue = _adaptor.Delete(updateContext, orderKey); updateContext.Commit(); return(retValue); } }
/// <summary> /// Reset the Lock for a specific <see cref="Model.ServiceLock"/> row. /// </summary> /// <param name="item">The row to reset the lock for.</param> private void ResetServiceLock(Model.ServiceLock item) { // keep trying just in case of db error while (true) { try { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { // Update the ServiceLock item status and times. IUpdateServiceLock update = updateContext.GetBroker <IUpdateServiceLock>(); ServiceLockUpdateParameters parms = new ServiceLockUpdateParameters(); parms.ServiceLockKey = item.GetKey(); parms.Lock = false; parms.ScheduledTime = Platform.Time.AddMinutes(10); parms.ProcessorId = item.ProcessorId; if (false == update.Execute(parms)) { Platform.Log(LogLevel.Error, "Unable to update ServiceLock GUID Status: {0}", item.GetKey().ToString()); } updateContext.Commit(); } break; } catch (Exception ex) { Platform.Log(LogLevel.Error, ex, "Exception has occured when trying to reset the entry. Retry later"); _terminationEvent.WaitOne(2000, false); } } }
protected override bool Initialize() { if (_theProcessor == null) { // Force a read context to be opened. When developing the retry mechanism // for startup when the DB was down, there were problems when the type // initializer for enumerated values were failng first. For some reason, // when the database went back online, they would still give exceptions. // changed to force the processor to open a dummy DB connect and cause an // exception here, instead of getting to the enumerated value initializer. using (IReadContext readContext = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { readContext.Dispose(); } var xp = new WorkQueueManagerExtensionPoint(); IWorkQueueManagerExtensionPoint[] extensions = CollectionUtils.Cast <IWorkQueueManagerExtensionPoint>(xp.CreateExtensions()).ToArray(); foreach (IWorkQueueManagerExtensionPoint extension in extensions) { try { extension.OnInitializing(this); } catch (Exception) { ThreadRetryDelay = (int)_retryDelay.TotalMilliseconds; return(false); } } _theProcessor = new WorkQueueProcessor(_threadCount, ThreadStop, Name); } return(true); }
protected void RemoveDatabase(Model.WorkQueue item) { // NOTE: This was an IUpdateContext, however, it was modified to be an IReadContext // after having problems w/ locks on asystem with a fair amount of load. The // updates are just automatically committed within the stored procedure when it // runs... using (IReadContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { // Setup the delete parameters DeleteStudyStorageParameters parms = new DeleteStudyStorageParameters(); parms.ServerPartitionKey = item.ServerPartitionKey; parms.StudyStorageKey = item.StudyStorageKey; // Get the Insert Instance broker and do the insert IDeleteStudyStorage delete = updateContext.GetBroker <IDeleteStudyStorage>(); if (false == delete.Execute(parms)) { Platform.Log(LogLevel.Error, "Unexpected error when trying to delete study: {0} on partition {1}", StorageLocation.StudyInstanceUid, ServerPartition.Description); } } }
/// <summary> /// Method for processing Series level queries. /// </summary> /// <param name="message"></param> /// <param name="del"></param> /// <returns></returns> public override void Query(DicomAttributeCollection message, ServerQueryResultDelegate del) { //Read context for the query. using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { var tagList = new List <uint>(); var selectSeries = read.GetBroker <ISeriesEntityBroker>(); //TODO (CR May 2010): Should change so that the Partition AE Title is passed in the RetrieveAeTitle tag in the query message. var criteria = new SeriesSelectCriteria(); if (Partition != null) { criteria.ServerPartitionKey.EqualTo(Partition.Key); } DicomAttributeCollection data = message; foreach (DicomAttribute attrib in message) { tagList.Add(attrib.Tag.TagValue); if (!attrib.IsNull) { switch (attrib.Tag.TagValue) { case DicomTags.StudyInstanceUid: List <ServerEntityKey> list = LoadStudyKey(read, (string[])data[DicomTags.StudyInstanceUid].Values); QueryHelper.SetKeyCondition(criteria.StudyKey, list.ToArray()); break; case DicomTags.SeriesInstanceUid: QueryHelper.SetStringArrayCondition(criteria.SeriesInstanceUid, (string[])data[DicomTags.SeriesInstanceUid].Values); break; case DicomTags.Modality: QueryHelper.SetStringCondition(criteria.Modality, data[DicomTags.Modality].GetString(0, string.Empty)); break; case DicomTags.SeriesNumber: QueryHelper.SetStringCondition(criteria.SeriesNumber, data[DicomTags.SeriesNumber].GetString(0, string.Empty)); break; case DicomTags.SeriesDescription: QueryHelper.SetStringCondition(criteria.SeriesDescription, data[DicomTags.SeriesDescription].GetString(0, string.Empty)); break; case DicomTags.PerformedProcedureStepStartDate: QueryHelper.SetRangeCondition(criteria.PerformedProcedureStepStartDate, data[DicomTags.PerformedProcedureStepStartDate].GetString(0, string.Empty)); break; case DicomTags.PerformedProcedureStepStartTime: QueryHelper.SetRangeCondition(criteria.PerformedProcedureStepStartTime, data[DicomTags.PerformedProcedureStepStartTime].GetString(0, string.Empty)); break; case DicomTags.RequestAttributesSequence: // todo break; } } } // Open a second read context, in case other queries are required. using (IReadContext subRead = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { selectSeries.Find(criteria, delegate(Series row) { if (CancelReceived) { throw new DicomException("DICOM C-Cancel Received"); } var response = new DicomMessage(); PopulateSeries(subRead, message, response, tagList, row); del(response.DataSet); }); } return; } }
private void RestoreOnlineStudy(RestoreQueue queueItem, string zipFile, string destinationFolder) { try { using (var processor = new ServerCommandProcessor("HSM Restore Online Study")) { var zipService = Platform.GetService <IZipService>(); using (var zipWriter = zipService.OpenWrite(zipFile)) { foreach (string file in zipWriter.EntryFileNames) { processor.AddCommand(new ExtractZipFileAndReplaceCommand(zipFile, file, destinationFolder)); } } // We rebuild the StudyXml, in case any settings or issues have happened since archival processor.AddCommand(new RebuildStudyXmlCommand(_location.StudyInstanceUid, destinationFolder)); StudyStatusEnum status; if (_syntax.Encapsulated && _syntax.LosslessCompressed) { status = StudyStatusEnum.OnlineLossless; } else if (_syntax.Encapsulated && _syntax.LossyCompressed) { status = StudyStatusEnum.OnlineLossy; } else { status = StudyStatusEnum.Online; } processor.AddCommand(new UpdateStudyStateCommand(_location, status, _serverSyntax)); // Apply the rules engine. var context = new ServerActionContext(null, _location.FilesystemKey, _hsmArchive.ServerPartition, queueItem.StudyStorageKey) { CommandProcessor = processor }; processor.AddCommand( new ApplyRulesCommand(destinationFolder, _location.StudyInstanceUid, context)); if (!processor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = processor.FailureReason; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } else { // Unlock the Queue Entry and set to complete using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { _hsmArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60)); var studyLock = update.GetBroker <ILockStudy>(); var parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to unlock.", _location.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); _location = ReloadStorageLocation(); OnStudyRestored(_location); } } } } catch (StudyIntegrityValidationFailure ex) { // study has been restored but it seems corrupted. Need to reprocess it. ReprocessStudy(_location, ex.Message); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = e.Message; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } }
public void RestoreNearlineStudy(RestoreQueue queueItem, string zipFile, string studyFolder) { ServerFilesystemInfo fs = _hsmArchive.Selector.SelectFilesystem(); if (fs == null) { DateTime scheduleTime = Platform.Time.AddMinutes(5); Platform.Log(LogLevel.Error, "No writeable filesystem for restore, rescheduling restore request to {0}", scheduleTime); queueItem.FailureDescription = "No writeable filesystem for restore, rescheduling restore request"; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Pending, scheduleTime); return; } string destinationFolder = Path.Combine(fs.Filesystem.FilesystemPath, _hsmArchive.ServerPartition.PartitionFolder); StudyStorageLocation restoredLocation = null; try { using (var processor = new ServerCommandProcessor("HSM Restore Offline Study")) { processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); destinationFolder = Path.Combine(destinationFolder, studyFolder); processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); destinationFolder = Path.Combine(destinationFolder, _studyStorage.StudyInstanceUid); processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); processor.AddCommand(new ExtractZipCommand(zipFile, destinationFolder)); // We rebuild the StudyXml, in case any settings or issues have happened since archival processor.AddCommand(new RebuildStudyXmlCommand(_studyStorage.StudyInstanceUid, destinationFolder)); // Apply the rules engine. var context = new ServerActionContext(null, fs.Filesystem.GetKey(), _hsmArchive.ServerPartition, queueItem.StudyStorageKey) { CommandProcessor = processor }; processor.AddCommand( new ApplyRulesCommand(destinationFolder, _studyStorage.StudyInstanceUid, context)); // Do the actual insert into the DB var insertStorageCommand = new InsertFilesystemStudyStorageCommand( _hsmArchive.PartitionArchive.ServerPartitionKey, _studyStorage.StudyInstanceUid, studyFolder, fs.Filesystem.GetKey(), _syntax); processor.AddCommand(insertStorageCommand); if (!processor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = processor.FailureReason; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } else { restoredLocation = insertStorageCommand.Location; // Unlock the Queue Entry using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { bool retVal = _hsmArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60)); var studyLock = update.GetBroker <ILockStudy>(); var parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; retVal = retVal && studyLock.Execute(parms); if (!parms.Successful || !retVal) { string message = String.Format("Study {0} on partition {1} failed to unlock.", _studyStorage.StudyInstanceUid, _hsmArchive.ServerPartition.Description); Platform.Log(LogLevel.Info, message); throw new ApplicationException(message); } update.Commit(); Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); OnStudyRestored(restoredLocation); } } } } catch (StudyIntegrityValidationFailure ex) { Debug.Assert(restoredLocation != null); // study has been restored but it seems corrupted. Need to reprocess it. ReprocessStudy(restoredLocation, ex.Message); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } }
/// <summary> /// Process StudyCompress Candidates retrieved from the <see cref="Model.FilesystemQueue"/> table /// </summary> /// <param name="candidateList">The list of candidate studies for deleting.</param> /// <param name="type">The type of compress candidate (lossy or lossless)</param> private void ProcessCompressCandidates(IEnumerable <FilesystemQueue> candidateList, FilesystemQueueTypeEnum type) { using (ServerExecutionContext context = new ServerExecutionContext()) { DateTime scheduledTime = Platform.Time.AddSeconds(10); foreach (FilesystemQueue queueItem in candidateList) { // Check for Shutdown/Cancel if (CancelPending) { break; } // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } StudyXml studyXml; try { studyXml = LoadStudyXml(location); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress candidate, unexpected exception loading StudyXml file for {0}", location.GetStudyPath()); continue; } using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.CompressScheduled; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Lossless Compress. Reason:{0}. Skipping study ({1})", lockParms.FailureReason, location.StudyInstanceUid); continue; } scheduledTime = scheduledTime.AddSeconds(3); IInsertWorkQueueFromFilesystemQueue workQueueInsert = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters(); insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; insertParms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.LosslessCompress; insertParms.StudyStorageKey = location.GetKey(); insertParms.ServerPartitionKey = location.ServerPartitionKey; DateTime expirationTime = scheduledTime; insertParms.ScheduledTime = expirationTime; insertParms.DeleteFilesystemQueue = true; insertParms.Data = queueItem.QueueXml; insertParms.FilesystemQueueTypeEnum = type; insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; try { WorkQueue entry = workQueueInsert.FindOne(insertParms); InsertWorkQueueUidFromStudyXml(studyXml, update, entry.GetKey()); update.Commit(); _studiesInserted++; } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress record, unexpected problem inserting 'CompressStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); // throw; -- would cause abort of inserts, go ahead and try everything } } } } }
/// <summary> /// Process study migration candidates retrieved from the <see cref="Model.FilesystemQueue"/> table /// </summary> /// <param name="candidateList">The list of candidate studies for deleting.</param> private void ProcessStudyMigrateCandidates(IList <FilesystemQueue> candidateList) { Platform.CheckForNullReference(candidateList, "candidateList"); if (candidateList.Count > 0) { Platform.Log(LogLevel.Debug, "Scheduling tier-migration for {0} eligible studies...", candidateList.Count); } FilesystemProcessStatistics summaryStats = new FilesystemProcessStatistics("FilesystemTierMigrateInsert"); foreach (FilesystemQueue queueItem in candidateList) { if (_bytesToRemove < 0 || CancelPending) { Platform.Log(LogLevel.Debug, "Estimated disk space has been reached."); break; } StudyProcessStatistics stats = new StudyProcessStatistics("TierMigrateStudy"); stats.TotalTime.Start(); stats.StudyStorageTime.Start(); // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } stats.StudyStorageTime.End(); stats.CalculateDirectorySizeTime.Start(); // Get the disk usage float studySize = EstimateFolderSizeFromStudyXml(location); stats.CalculateDirectorySizeTime.End(); stats.DirectorySize = (ulong)studySize; stats.DbUpdateTime.Start(); using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters { StudyStorageKey = location.Key, QueueStudyStateEnum = QueueStudyStateEnum.MigrationScheduled }; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Tier Migration. Reason:{0}. Skipping study ({1})", lockParms.FailureReason, location.StudyInstanceUid); continue; } IInsertWorkQueueFromFilesystemQueue broker = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters { StudyStorageKey = location.GetKey(), ServerPartitionKey = location.ServerPartitionKey, ScheduledTime = _scheduledTime, DeleteFilesystemQueue = true, WorkQueueTypeEnum = WorkQueueTypeEnum.MigrateStudy, FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.TierMigrate }; Platform.Log(LogLevel.Debug, "Scheduling tier-migration for study {0} from {1} at {2}...", location.StudyInstanceUid, location.FilesystemTierEnum, _scheduledTime); WorkQueue insertItem = broker.FindOne(insertParms); if (insertItem == null) { Platform.Log(LogLevel.Error, "Unexpected problem inserting 'MigrateStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); } else { update.Commit(); _bytesToRemove -= studySize; _studiesMigrated++; // spread out the scheduled migration entries based on the size // assuming that the larger the study the longer it will take to migrate // The assumed migration speed is arbitarily chosen. double migrationSpeed = ServiceLockSettings.Default.TierMigrationSpeed * 1024 * 1024; // MB / sec TimeSpan estMigrateTime = TimeSpan.FromSeconds(studySize / migrationSpeed); _scheduledTime = _scheduledTime.Add(estMigrateTime); } } stats.DbUpdateTime.End(); stats.TotalTime.End(); summaryStats.AddSubStats(stats); StatisticsLogger.Log(LogLevel.Debug, stats); } summaryStats.CalculateAverage(); StatisticsLogger.Log(LogLevel.Info, false, summaryStats); }
/// <summary> /// Method for getting next <see cref="WorkQueue"/> entry. /// </summary> /// <param name="processorId">The Id of the processor.</param> /// <remarks> /// </remarks> /// <returns> /// A <see cref="WorkQueue"/> entry if found, or else null; /// </returns> public Model.WorkQueue GetWorkQueueItem(string processorId) { Model.WorkQueue queueListItem = null; // First check for Stat WorkQueue items. if (_threadPool.MemoryLimitedThreadsAvailable) { using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Stat }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // If we don't have the max high priority threads in use, // first see if there's any available if (queueListItem == null && _threadPool.HighPriorityThreadsAvailable) { using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.High }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // If we didn't find a high priority work queue item, and we have threads // available for memory limited work queue items, query for the next queue item available. if (queueListItem == null && _threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // This logic only accessed if memory limited and priority threads are used up if (queueListItem == null && !_threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Stat, MemoryLimited = true }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // This logic only accessed if memory limited and priority threads are used up if (queueListItem == null && !_threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, MemoryLimited = true }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } return(queueListItem); }
/// <summary> /// Lookup the device entity in the database corresponding to the remote AE of the association. /// </summary> /// <param name="partition">The partition to look up the devices</param> /// <param name="association">The association</param> /// <param name="isNew">Indicates whether the device returned is created by the call.</param> /// <returns>The device record corresponding to the called AE of the association</returns> public static Device LookupDevice(ServerPartition partition, AssociationParameters association, out bool isNew) { isNew = false; Device device; if (DeviceCache.TryGetValue(association.CallingAE + partition.Key, out device)) { return(device); } using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { var deviceEntityBroker = updateContext.GetBroker <IDeviceEntityBroker>(); // Setup the select parameters. var queryParameters = new DeviceSelectCriteria(); queryParameters.AeTitle.EqualTo(association.CallingAE); queryParameters.ServerPartitionKey.EqualTo(partition.GetKey()); var devices = deviceEntityBroker.Find(queryParameters); foreach (var d in devices) { if (string.Compare(d.AeTitle, association.CallingAE, false, CultureInfo.InvariantCulture) == 0) { device = d; break; } } if (device == null) { if (!partition.AcceptAnyDevice) { return(null); } if (partition.AutoInsertDevice) { // Auto-insert a new entry in the table. var updateColumns = new DeviceUpdateColumns { AeTitle = association.CallingAE, Enabled = true, Description = String.Format("AE: {0}", association.CallingAE), Dhcp = false, IpAddress = association.RemoteEndPoint.Address.ToString(), ServerPartitionKey = partition.GetKey(), Port = partition.DefaultRemotePort, AllowQuery = true, AllowRetrieve = true, AllowStorage = true, ThrottleMaxConnections = ImageServerCommonConfiguration.Device.MaxConnections, DeviceTypeEnum = DeviceTypeEnum.Workstation }; var insert = updateContext.GetBroker <IDeviceEntityBroker>(); device = insert.Insert(updateColumns); updateContext.Commit(); isNew = true; } } if (device != null) { // For DHCP devices, we always update the remote ip address, if its changed from what is in the DB. if (device.Dhcp && !association.RemoteEndPoint.Address.ToString().Equals(device.IpAddress)) { var updateColumns = new DeviceUpdateColumns { IpAddress = association.RemoteEndPoint.Address.ToString(), LastAccessedTime = Platform.Time }; if (!deviceEntityBroker.Update(device.GetKey(), updateColumns)) { Platform.Log(LogLevel.Error, "Unable to update IP Address for DHCP device {0} on partition '{1}'", device.AeTitle, partition.Description); } else { updateContext.Commit(); } } else if (!isNew) { var updateColumns = new DeviceUpdateColumns { LastAccessedTime = Platform.Time }; if (!deviceEntityBroker.Update(device.GetKey(), updateColumns)) { Platform.Log(LogLevel.Error, "Unable to update LastAccessedTime device {0} on partition '{1}'", device.AeTitle, partition.Description); } else { updateContext.Commit(); } } DeviceCache.Add(device.AeTitle + partition.Key, device); } } return(device); }
/// <summary> /// Traverse the filesystem directories for studies to rebuild the XML for. /// </summary> /// <param name="filesystem"></param> private void TraverseFilesystemStudies(Filesystem filesystem) { List <StudyStorageLocation> lockFailures = new List <StudyStorageLocation>(); ServerPartition partition; DirectoryInfo filesystemDir = new DirectoryInfo(filesystem.FilesystemPath); foreach (DirectoryInfo partitionDir in filesystemDir.GetDirectories()) { if (GetServerPartition(partitionDir.Name, out partition) == false) { continue; } foreach (DirectoryInfo dateDir in partitionDir.GetDirectories()) { if (dateDir.FullName.EndsWith("Deleted") || dateDir.FullName.EndsWith(ServerPlatform.ReconcileStorageFolder)) { continue; } foreach (DirectoryInfo studyDir in dateDir.GetDirectories()) { // Check for Cancel message if (CancelPending) { return; } String studyInstanceUid = studyDir.Name; StudyStorageLocation location; try { FilesystemMonitor.Instance.GetWritableStudyStorageLocation(partition.Key, studyInstanceUid, StudyRestore.False, StudyCache.False, out location); } catch (StudyNotFoundException) { List <FileInfo> fileList = LoadSopFiles(studyDir, true); if (fileList.Count == 0) { Platform.Log(LogLevel.Warn, "Found empty study folder: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } DicomFile file = LoadFileFromList(fileList); if (file == null) { Platform.Log(LogLevel.Warn, "Found directory with no readable files: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } // Do a second check, using the study instance uid from a file in the directory. // had an issue with trailing periods on uids causing us to not find the // study storage, and insert a new record into the database. studyInstanceUid = file.DataSet[DicomTags.StudyInstanceUid].ToString(); if (!studyInstanceUid.Equals(studyDir.Name)) { try { FilesystemMonitor.Instance.GetWritableStudyStorageLocation(partition.Key, studyInstanceUid, StudyRestore.False, StudyCache.False, out location); } catch (Exception e) { Platform.Log(LogLevel.Warn, "Study {0} on filesystem partition {1} not found {2}: {3}", studyInstanceUid, partition.Description, studyDir.ToString(), e.Message); continue; } } else { Platform.Log(LogLevel.Warn, "Study {0} on filesystem partition {1} not found {2}", studyInstanceUid, partition.Description, studyDir.ToString()); continue; } } catch (Exception e) { Platform.Log(LogLevel.Warn, "Study {0} on filesystem partition {1} not found {2}: {3}", studyInstanceUid, partition.Description, studyDir.ToString(), e.Message); continue; } // Location has been loaded, make sure its on the same filesystem if (!location.FilesystemKey.Equals(filesystem.Key)) { Platform.Log(LogLevel.Warn, "Study {0} on filesystem in directory: {1} is stored in different directory in the database: {2}", studyInstanceUid, studyDir.ToString(), location.GetStudyPath()); try { // Here due to defect #9673, attempting to cleanup errors from this ticket. if (Directory.Exists(location.GetStudyPath())) { if (File.Exists(location.GetStudyXmlPath())) { Platform.Log(LogLevel.Warn, "Deleting study {0}'s local directory. The database location has valid study: {1}", studyInstanceUid, studyDir.FullName); Directory.Delete(studyDir.FullName, true); continue; } Platform.Log(LogLevel.Warn, "Deleting study {0} directory stored in database, it does not have a study xml file: {1}", studyInstanceUid, location.GetStudyPath()); // Delete the Database's location, and we'll just adjust the database to point to the current directory Directory.Delete(location.GetStudyPath(), true); } using ( var readContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext( UpdateContextSyncMode.Flush)) { var update = new FilesystemStudyStorageUpdateColumns { FilesystemKey = filesystem.Key }; var broker = readContext.GetBroker <IFilesystemStudyStorageEntityBroker>(); broker.Update(location.FilesystemStudyStorageKey, update); readContext.Commit(); } Platform.Log(LogLevel.Warn, "Updated Study {0} FilesystemStudyStorage to point to the current filesystem.", studyInstanceUid); FilesystemMonitor.Instance.GetWritableStudyStorageLocation(partition.Key, studyInstanceUid, StudyRestore.False, StudyCache.False, out location); } catch (Exception x) { Platform.Log(LogLevel.Error, x, "Unexpected error attempting to update storage location for study: {0}", studyInstanceUid); } } try { if (!location.AcquireWriteLock()) { Platform.Log(LogLevel.Warn, "Unable to lock study: {0}, delaying rebuild", location.StudyInstanceUid); lockFailures.Add(location); continue; } var rebuilder = new StudyXmlRebuilder(location); rebuilder.RebuildXml(); location.ReleaseWriteLock(); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when rebuilding study xml for study: {0}", location.StudyInstanceUid); lockFailures.Add(location); } } // Cleanup the parent date directory, if its empty DirectoryUtility.DeleteIfEmpty(dateDir.FullName); } } // Re-do all studies that failed locks one time foreach (StudyStorageLocation location in lockFailures) { try { if (!location.AcquireWriteLock()) { Platform.Log(LogLevel.Warn, "Unable to lock study: {0}, skipping rebuild", location.StudyInstanceUid); continue; } StudyXmlRebuilder rebuilder = new StudyXmlRebuilder(location); rebuilder.RebuildXml(); location.ReleaseWriteLock(); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception on retry when rebuilding study xml for study: {0}", location.StudyInstanceUid); } } }
private void EnsureConsistentObjectCount(StudyXml studyXml, IDictionary <string, List <string> > processedSeriesMap) { Platform.CheckForNullReference(studyXml, "studyXml"); // We have to ensure that the counts in studyXml and what we have processed are consistent. // Files or folder may be reprocessed but then become missing when then entry is resumed. // We have to removed them from the studyXml before committing the it. Platform.Log(LogLevel.Info, "Verifying study xml against the filesystems"); int filesProcessed = 0; foreach (string seriesUid in processedSeriesMap.Keys) { filesProcessed += processedSeriesMap[seriesUid].Count; } // Used to keep track of the series to be removed. // We can't remove the item from the study xml while we are // interating through it var seriesToRemove = new List <string>(); foreach (SeriesXml seriesXml in studyXml) { if (!processedSeriesMap.ContainsKey(seriesXml.SeriesInstanceUid)) { seriesToRemove.Add(seriesXml.SeriesInstanceUid); } else { //check all instance in the series List <string> foundInstances = processedSeriesMap[seriesXml.SeriesInstanceUid]; var instanceToRemove = new List <string>(); foreach (InstanceXml instanceXml in seriesXml) { if (!foundInstances.Contains(instanceXml.SopInstanceUid)) { // the sop no long exists in the filesystem instanceToRemove.Add(instanceXml.SopInstanceUid); } } foreach (string instanceUid in instanceToRemove) { seriesXml[instanceUid] = null; Platform.Log(LogLevel.Info, "Removed SOP {0} in the study xml: it no longer exists.", instanceUid); } } } foreach (string seriesUid in seriesToRemove) { studyXml[seriesUid] = null; Platform.Log(LogLevel.Info, "Removed Series {0} in the study xml: it no longer exists.", seriesUid); } Platform.CheckTrue(studyXml.NumberOfStudyRelatedSeries == processedSeriesMap.Count, String.Format("Number of series in the xml do not match number of series reprocessed: {0} vs {1}", studyXml.NumberOfStudyRelatedInstances, processedSeriesMap.Count)); Platform.CheckTrue(studyXml.NumberOfStudyRelatedInstances == filesProcessed, String.Format("Number of instances in the xml do not match number of reprocessed: {0} vs {1}", studyXml.NumberOfStudyRelatedInstances, filesProcessed)); Platform.Log(LogLevel.Info, "Study xml has been verified."); if (StorageLocation.Study != null) { // update the instance count in the db using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { var broker = updateContext.GetBroker <IStudyEntityBroker>(); var columns = new StudyUpdateColumns { NumberOfStudyRelatedInstances = studyXml.NumberOfStudyRelatedInstances, NumberOfStudyRelatedSeries = studyXml.NumberOfStudyRelatedSeries }; broker.Update(StorageLocation.Study.GetKey(), columns); updateContext.Commit(); } } else { // alert orphaned StudyStorage entry RaiseAlert(WorkQueueItem, AlertLevel.Critical, String.Format("Study {0} has been reprocessed but Study record was NOT created. Images reprocessed: {1}. Path={2}", StorageLocation.StudyInstanceUid, filesProcessed, StorageLocation.GetStudyPath())); } }
/// <summary> /// Inserts work queue entry to process the duplicates. /// </summary> /// <param name="entryKey"><see cref="ServerEntityKey"/> of the <see cref="StudyIntegrityQueue"/> entry that has <see cref="StudyIntegrityReasonEnum"/> equal to <see cref="StudyIntegrityReasonEnum.Duplicate"/> </param> /// <param name="action"></param> public void Process(ServerEntityKey entryKey, ProcessDuplicateAction action) { DuplicateSopReceivedQueue entry = DuplicateSopReceivedQueue.Load(HttpContextData.Current.ReadContext, entryKey); Platform.CheckTrue(entry.StudyIntegrityReasonEnum == StudyIntegrityReasonEnum.Duplicate, "Invalid type of entry"); IList <StudyIntegrityQueueUid> uids = LoadDuplicateSopUid(entry); using (IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ProcessDuplicateQueueEntryQueueData data = new ProcessDuplicateQueueEntryQueueData { Action = action, DuplicateSopFolder = entry.GetFolderPath(context), UserName = ServerHelper.CurrentUserName, }; LockStudyParameters lockParms = new LockStudyParameters { QueueStudyStateEnum = QueueStudyStateEnum.ReconcileScheduled, StudyStorageKey = entry.StudyStorageKey }; ILockStudy lockBbroker = context.GetBroker <ILockStudy>(); lockBbroker.Execute(lockParms); if (!lockParms.Successful) { throw new ApplicationException(lockParms.FailureReason); } IWorkQueueProcessDuplicateSopBroker broker = context.GetBroker <IWorkQueueProcessDuplicateSopBroker>(); WorkQueueProcessDuplicateSopUpdateColumns columns = new WorkQueueProcessDuplicateSopUpdateColumns { Data = XmlUtils.SerializeAsXmlDoc(data), GroupID = entry.GroupID, ScheduledTime = Platform.Time, ExpirationTime = Platform.Time.Add(TimeSpan.FromMinutes(15)), ServerPartitionKey = entry.ServerPartitionKey, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Medium, StudyStorageKey = entry.StudyStorageKey, WorkQueueStatusEnum = WorkQueueStatusEnum.Pending }; WorkQueueProcessDuplicateSop processDuplicateWorkQueueEntry = broker.Insert(columns); IWorkQueueUidEntityBroker workQueueUidBroker = context.GetBroker <IWorkQueueUidEntityBroker>(); IStudyIntegrityQueueUidEntityBroker duplicateUidBroke = context.GetBroker <IStudyIntegrityQueueUidEntityBroker>(); foreach (StudyIntegrityQueueUid uid in uids) { WorkQueueUidUpdateColumns uidColumns = new WorkQueueUidUpdateColumns { Duplicate = true, Extension = ServerPlatform.DuplicateFileExtension, SeriesInstanceUid = uid.SeriesInstanceUid, SopInstanceUid = uid.SopInstanceUid, RelativePath = uid.RelativePath, WorkQueueKey = processDuplicateWorkQueueEntry.GetKey() }; workQueueUidBroker.Insert(uidColumns); duplicateUidBroke.Delete(uid.GetKey()); } IDuplicateSopEntryEntityBroker duplicateEntryBroker = context.GetBroker <IDuplicateSopEntryEntityBroker>(); duplicateEntryBroker.Delete(entry.GetKey()); context.Commit(); } }
/// <summary> /// Load the rules engine from the Persistent Store and compile the conditions and actions. /// </summary> public void Load() { Statistics.LoadTime.Start(); // Clearout the current type list. _typeList.Clear(); using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { IServerRuleEntityBroker broker = read.GetBroker <IServerRuleEntityBroker>(); ServerRuleSelectCriteria criteria = new ServerRuleSelectCriteria(); criteria.Enabled.EqualTo(true); criteria.ServerRuleApplyTimeEnum.EqualTo(_applyTime); criteria.ServerPartitionKey.EqualTo(_serverPartitionKey); // Add ommitted or included rule types, as appropriate if (_omitList.Count > 0) { criteria.ServerRuleTypeEnum.NotIn(_omitList.ToArray()); } else if (_includeList.Count > 0) { criteria.ServerRuleTypeEnum.In(_includeList.ToArray()); } IList <ServerRule> list = broker.Find(criteria); // Create the specification and action compilers // We'll compile the rules right away var specCompiler = GetSpecificationCompiler(); foreach (ServerRule serverRule in list) { try { var theRule = new Rule <ServerActionContext>(); theRule.Name = serverRule.RuleName; theRule.IsDefault = serverRule.DefaultRule; theRule.IsExempt = serverRule.ExemptRule; theRule.Description = serverRule.ServerRuleApplyTimeEnum.Description; XmlNode ruleNode = CollectionUtils.SelectFirst <XmlNode>(serverRule.RuleXml.ChildNodes, delegate(XmlNode child) { return(child.Name.Equals("rule")); }); var actionCompiler = GetActionCompiler(serverRule.ServerRuleTypeEnum); theRule.Compile(ruleNode, specCompiler, actionCompiler); RuleTypeCollection <ServerActionContext, ServerRuleTypeEnum> typeCollection; if (!_typeList.ContainsKey(serverRule.ServerRuleTypeEnum)) { typeCollection = new RuleTypeCollection <ServerActionContext, ServerRuleTypeEnum>(serverRule.ServerRuleTypeEnum); _typeList.Add(serverRule.ServerRuleTypeEnum, typeCollection); } else { typeCollection = _typeList[serverRule.ServerRuleTypeEnum]; } typeCollection.AddRule(theRule); } catch (Exception e) { // something wrong with the rule... Platform.Log(LogLevel.Warn, e, "Unable to add rule {0} to the engine. It will be skipped", serverRule.RuleName); } } } Statistics.LoadTime.End(); }
private void Initialize() { using (IPersistenceContext readContext = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { _backupDir = ServerExecutionContext.Current.BackupDirectory; _oldStudyPath = _oldStudyLocation.GetStudyPath(); _oldStudyInstanceUid = _oldStudyLocation.StudyInstanceUid; _oldStudyFolder = _oldStudyLocation.StudyFolder; _newStudyInstanceUid = _oldStudyInstanceUid; _study = _oldStudyLocation.LoadStudy(readContext); _totalSopCount = _study.NumberOfStudyRelatedInstances; _curPatient = _study.LoadPatient(readContext); _oldPatientInfo = new PatientInfo { Name = _curPatient.PatientsName, PatientId = _curPatient.PatientId, IssuerOfPatientId = _curPatient.IssuerOfPatientId }; _newPatientInfo = new PatientInfo(_oldPatientInfo); Debug.Assert(_newPatientInfo.Equals(_oldPatientInfo)); foreach (BaseImageLevelUpdateCommand command in _commands) { ImageLevelUpdateEntry imageLevelUpdate = command.UpdateEntry; if (imageLevelUpdate == null) { continue; } if (imageLevelUpdate.TagPath.Tag.TagValue == DicomTags.StudyInstanceUid) { _newStudyInstanceUid = imageLevelUpdate.GetStringValue(); } else if (imageLevelUpdate.TagPath.Tag.TagValue == DicomTags.PatientId) { _newPatientInfo.PatientId = imageLevelUpdate.GetStringValue(); } else if (imageLevelUpdate.TagPath.Tag.TagValue == DicomTags.IssuerOfPatientId) { _newPatientInfo.IssuerOfPatientId = imageLevelUpdate.GetStringValue(); } else if (imageLevelUpdate.TagPath.Tag.TagValue == DicomTags.PatientsName) { _newPatientInfo.Name = imageLevelUpdate.GetStringValue(); } } Platform.CheckForNullReference(_newStudyInstanceUid, "_newStudyInstanceUid"); NewStudyPath = Path.Combine(_oldStudyLocation.FilesystemPath, _partition.PartitionFolder); NewStudyPath = Path.Combine(NewStudyPath, _oldStudyFolder); NewStudyPath = Path.Combine(NewStudyPath, _newStudyInstanceUid); _newPatient = FindPatient(_newPatientInfo, readContext); _patientInfoIsNotChanged = _newPatientInfo.Equals(_oldPatientInfo); Statistics.InstanceCount = _study.NumberOfStudyRelatedInstances; Statistics.StudySize = (ulong)_oldStudyLocation.LoadStudyXml().GetStudySize(); // The study path will be changed. We will need to delete the original folder at the end. // May be too simple to test if two paths are the same. But let's assume it is good enough for 99% of the time. _deleteOriginalFolder = NewStudyPath != _oldStudyPath; _initialized = true; } }
private bool ArchiveLogs(ServerFilesystemInfo archiveFs) { string archivePath = Path.Combine(archiveFs.Filesystem.FilesystemPath, "AlertLog"); DateTime cutOffTime = Platform.Time.Date.AddDays(ServiceLockSettings.Default.AlertCachedDays * -1); AlertSelectCriteria criteria = new AlertSelectCriteria(); criteria.InsertTime.LessThan(cutOffTime); criteria.InsertTime.SortAsc(0); using (ServerExecutionContext context = new ServerExecutionContext()) { IAlertEntityBroker broker = context.ReadContext.GetBroker <IAlertEntityBroker>(); ImageServerLogWriter <Alert> writer = new ImageServerLogWriter <Alert>(archivePath, "Alert"); List <ServerEntityKey> keyList = new List <ServerEntityKey>(500); try { broker.Find(criteria, delegate(Alert result) { keyList.Add(result.Key); // If configured, don't flush to disk. We just delete the contents of keyList below. if (!ServiceLockSettings.Default.AlertDelete) { if (writer.WriteLog(result, result.InsertTime)) { // The logs been flushed, delete the log entries cached. using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush) ) { IApplicationLogEntityBroker updateBroker = update.GetBroker <IApplicationLogEntityBroker>(); foreach (ServerEntityKey key in keyList) { updateBroker.Delete(key); } update.Commit(); } keyList = new List <ServerEntityKey>(); } } }); writer.FlushLog(); if (keyList.Count > 0) { using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IAlertEntityBroker updateBroker = update.GetBroker <IAlertEntityBroker>(); foreach (ServerEntityKey key in keyList) { updateBroker.Delete(key); } update.Commit(); } } } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when purging Alert log files."); writer.Dispose(); return(false); } writer.Dispose(); return(true); } }
/// <summary> /// Process StudyPurge <see cref="FilesystemQueue"/> entries. /// </summary> /// <param name="candidateList">The list of candidates for purging</param> private void ProcessStudyPurgeCandidates(IList <FilesystemQueue> candidateList) { if (candidateList.Count > 0) { Platform.Log(LogLevel.Debug, "Scheduling purge study for {0} eligible studies...", candidateList.Count); } FilesystemProcessStatistics summaryStats = new FilesystemProcessStatistics("FilesystemPurgeInsert"); foreach (FilesystemQueue queueItem in candidateList) { if (_bytesToRemove < 0 || CancelPending) { break; } StudyProcessStatistics stats = new StudyProcessStatistics("PurgeStudy"); stats.TotalTime.Start(); stats.StudyStorageTime.Start(); // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } stats.StudyStorageTime.End(); stats.CalculateDirectorySizeTime.Start(); // Get the disk usage float studySize = EstimateFolderSizeFromStudyXml(location); stats.CalculateDirectorySizeTime.End(); stats.DirectorySize = (ulong)studySize; stats.DbUpdateTime.Start(); // Update the DB using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.PurgeScheduled; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Study Purge, skipping study ({0}", location.StudyInstanceUid); continue; } IInsertWorkQueueFromFilesystemQueue insertBroker = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters(); insertParms.StudyStorageKey = location.GetKey(); insertParms.ServerPartitionKey = location.ServerPartitionKey; insertParms.ScheduledTime = _scheduledTime; insertParms.DeleteFilesystemQueue = true; insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.PurgeStudy; insertParms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.PurgeStudy; WorkQueue insertItem = insertBroker.FindOne(insertParms); if (insertItem == null) { Platform.Log(LogLevel.Error, "Unexpected problem inserting 'PurgeStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); } else { update.Commit(); _bytesToRemove -= studySize; _studiesPurged++; _scheduledTime = _scheduledTime.AddSeconds(2); } } stats.DbUpdateTime.End(); stats.TotalTime.End(); summaryStats.AddSubStats(stats); StatisticsLogger.Log(LogLevel.Debug, stats); } summaryStats.CalculateAverage(); StatisticsLogger.Log(LogLevel.Info, false, summaryStats); }
private FilesystemMonitor() { _store = PersistentStoreRegistry.GetDefaultStore(); }
/// <summary> /// Reprocess a specific study. /// </summary> /// <param name="partition">The ServerPartition the study is on.</param> /// <param name="location">The storage location of the study to process.</param> /// <param name="engine">The rules engine to use when processing the study.</param> /// <param name="postArchivalEngine">The rules engine used for studies that have been archived.</param> /// <param name="dataAccessEngine">The rules engine strictly used for setting data acess.</param> protected static void ProcessStudy(ServerPartition partition, StudyStorageLocation location, ServerRulesEngine engine, ServerRulesEngine postArchivalEngine, ServerRulesEngine dataAccessEngine) { if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle) || !location.AcquireWriteLock()) { Platform.Log(LogLevel.Error, "Unable to lock study {0}. The study is being processed. (Queue State: {1})", location.StudyInstanceUid, location.QueueStudyStateEnum.Description); } else { try { DicomFile msg = LoadInstance(location); if (msg == null) { Platform.Log(LogLevel.Error, "Unable to load file for study {0}", location.StudyInstanceUid); return; } bool archiveQueueExists; bool archiveStudyStorageExists; bool filesystemDeleteExists; using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { // Check for existing archive queue entries var archiveQueueBroker = read.GetBroker <IArchiveQueueEntityBroker>(); var archiveQueueCriteria = new ArchiveQueueSelectCriteria(); archiveQueueCriteria.StudyStorageKey.EqualTo(location.Key); archiveQueueExists = archiveQueueBroker.Count(archiveQueueCriteria) > 0; var archiveStorageBroker = read.GetBroker <IArchiveStudyStorageEntityBroker>(); var archiveStudyStorageCriteria = new ArchiveStudyStorageSelectCriteria(); archiveStudyStorageCriteria.StudyStorageKey.EqualTo(location.Key); archiveStudyStorageExists = archiveStorageBroker.Count(archiveStudyStorageCriteria) > 0; var filesystemQueueBroker = read.GetBroker <IFilesystemQueueEntityBroker>(); var filesystemQueueCriteria = new FilesystemQueueSelectCriteria(); filesystemQueueCriteria.StudyStorageKey.EqualTo(location.Key); filesystemQueueCriteria.FilesystemQueueTypeEnum.EqualTo(FilesystemQueueTypeEnum.DeleteStudy); filesystemDeleteExists = filesystemQueueBroker.Count(filesystemQueueCriteria) > 0; } using (var commandProcessor = new ServerCommandProcessor("Study Rule Processor") { PrimaryServerPartitionKey = partition.GetKey(), PrimaryStudyKey = location.Study.GetKey() }) { var context = new ServerActionContext(msg, location.FilesystemKey, partition, location.Key, commandProcessor); // Check if the Study has been archived if (archiveStudyStorageExists && !archiveQueueExists && !filesystemDeleteExists) { // Add a command to delete the current filesystemQueue entries, so that they can // be reinserted by the rules engine. context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyArchived)); // How to deal with exiting FilesystemQueue entries is problematic here. If the study // has been migrated off tier 1, we probably don't want to modify the tier migration // entries. Compression entries may have been entered when the Study was initially // processed, we don't want to delete them, because they might still be valid. // We just re-run the rules engine at this point, and delete only the StudyPurge entries, // since those we know at least would only be applied for archived studies. var studyRulesEngine = new StudyRulesEngine(postArchivalEngine, location, location.ServerPartition, location.LoadStudyXml()); studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); // Post Archive doesn't allow data access rules. Force Data Access rules to be reapplied // to these studies also. dataAccessEngine.Execute(context); } else { // Add a command to delete the current filesystemQueue entries, so that they can // be reinserted by the rules engine. context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyProcessed)); // Execute the rules engine, insert commands to update the database into the command processor. // Due to ticket #11673, we create a new rules engine instance for each study, since the Study QC rules // don't work right now with a single rules engine. //TODO CR (Jan 2014) - Check if we can go back to caching the rules engine to reduce database hits on the rules var studyRulesEngine = new StudyRulesEngine(location, location.ServerPartition, location.LoadStudyXml()); studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, commandProcessor); } // Do the actual database updates. if (false == context.CommandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure processing Study level rules for study {0}", location.StudyInstanceUid); } // Log the FilesystemQueue related entries location.LogFilesystemQueue(); } } finally { location.ReleaseWriteLock(); } } }
public void OnStudyDeleted() { if (!Enabled) { return; } if (_context.WorkQueueItem.WorkQueueTypeEnum == WorkQueueTypeEnum.WebDeleteStudy) { Study study = _context.Study; if (study == null) { Platform.Log(LogLevel.Info, "Not logging Study Delete information due to missing Study record for study: {0} on partition {1}", _context.StorageLocation.StudyInstanceUid, _context.ServerPartition.AeTitle); return; } StudyStorageLocation storage = _context.StorageLocation; using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { // Setup the parameters IStudyDeleteRecordEntityBroker broker = updateContext.GetBroker <IStudyDeleteRecordEntityBroker>(); StudyDeleteRecordUpdateColumns parms = new StudyDeleteRecordUpdateColumns(); parms.Timestamp = Platform.Time; WebDeleteStudyLevelQueueData extendedInfo = XmlUtils.Deserialize <WebDeleteStudyLevelQueueData>(_context.WorkQueueItem.Data); parms.Reason = extendedInfo != null? extendedInfo.Reason:_context.WorkQueueItem.WorkQueueTypeEnum.LongDescription; parms.ServerPartitionAE = _context.ServerPartition.AeTitle; parms.FilesystemKey = storage.FilesystemKey; parms.AccessionNumber = study.AccessionNumber; parms.PatientId = study.PatientId; parms.PatientsName = study.PatientsName; parms.StudyInstanceUid = study.StudyInstanceUid; parms.StudyDate = study.StudyDate; parms.StudyDescription = study.StudyDescription; parms.StudyTime = study.StudyTime; parms.BackupPath = BackupZipFileRelativePath; if (_archives != null && _archives.Count > 0) { parms.ArchiveInfo = XmlUtils.SerializeAsXmlDoc(_archives); } StudyDeleteExtendedInfo extInfo = new StudyDeleteExtendedInfo(); extInfo.ServerInstanceId = ServerPlatform.ServerInstanceId; extInfo.UserId = _context.UserId; extInfo.UserName = _context.UserName; parms.ExtendedInfo = XmlUtils.SerializeAsString(extInfo); StudyDeleteRecord deleteRecord = broker.Insert(parms); if (deleteRecord == null) { Platform.Log(LogLevel.Error, "Unexpected error when trying to create study delete record: {0} on partition {1}", study.StudyInstanceUid, _context.ServerPartition.Description); } else { updateContext.Commit(); } } } }
/// <summary> /// Archive the specified <see cref="ArchiveQueue"/> item. /// </summary> /// <param name="queueItem">The ArchiveQueue item to archive.</param> public void Run(ArchiveQueue queueItem) { using (ArchiveProcessorContext executionContext = new ArchiveProcessorContext(queueItem)) { try { if (!GetStudyStorageLocation(queueItem)) { Platform.Log(LogLevel.Error, "Unable to find readable study storage location for archival queue request {0}. Delaying request.", queueItem.Key); queueItem.FailureDescription = "Unable to find readable study storage location for archival queue request."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } // First, check to see if we can lock the study, if not just reschedule the queue entry. if (!_storageLocation.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle)) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is currently locked, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study is currently locked, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } StudyIntegrityValidator validator = new StudyIntegrityValidator(); validator.ValidateStudyState("Archive", _storageLocation, StudyIntegrityValidationModes.Default); using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.ArchiveScheduled }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to lock, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study failed to lock, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } update.Commit(); } string studyXmlFile = _storageLocation.GetStudyXmlPath(); // Load the study Xml file, this is used to generate the list of dicom files to archive. LoadStudyXml(studyXmlFile); DicomFile file = LoadFileFromStudyXml(); string patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, string.Empty); string patientId = file.DataSet[DicomTags.PatientId].GetString(0, string.Empty); string accessionNumber = file.DataSet[DicomTags.AccessionNumber].GetString(0, string.Empty); Platform.Log(LogLevel.Info, "Starting archival of study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} on archive {5}", _storageLocation.StudyInstanceUid, patientsName, patientId, accessionNumber, _hsmArchive.ServerPartition.Description, _hsmArchive.PartitionArchive.Description); // Use the command processor to do the archival. using (ServerCommandProcessor commandProcessor = new ServerCommandProcessor("Archive")) { var archiveStudyCmd = new ArchiveStudyCommand(_storageLocation, _hsmArchive.HsmPath, executionContext.TempDirectory, _hsmArchive.PartitionArchive) { ForceCompress = HsmSettings.Default.CompressZipFiles }; commandProcessor.AddCommand(archiveStudyCmd); commandProcessor.AddCommand(new UpdateArchiveQueueItemCommand(queueItem.GetKey(), _storageLocation.GetKey(), ArchiveQueueStatusEnum.Completed)); StudyRulesEngine studyEngine = new StudyRulesEngine(_storageLocation, _hsmArchive.ServerPartition, _studyXml); studyEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); if (!commandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure archiving study ({0}) to archive {1}: {2}, zip filename: {3}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, commandProcessor.FailureReason, archiveStudyCmd.OutputZipFilePath); queueItem.FailureDescription = commandProcessor.FailureReason; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } else { Platform.Log(LogLevel.Info, "Successfully archived study {0} on {1} to zip {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, archiveStudyCmd.OutputZipFilePath); } // Log the current FilesystemQueue settings _storageLocation.LogFilesystemQueue(); } } catch (StudyIntegrityValidationFailure ex) { StringBuilder error = new StringBuilder(); error.AppendLine(String.Format("Partition : {0}", ex.ValidationStudyInfo.ServerAE)); error.AppendLine(String.Format("Patient : {0}", ex.ValidationStudyInfo.PatientsName)); error.AppendLine(String.Format("Study Uid : {0}", ex.ValidationStudyInfo.StudyInstaneUid)); error.AppendLine(String.Format("Accession# : {0}", ex.ValidationStudyInfo.AccessionNumber)); error.AppendLine(String.Format("Study Date : {0}", ex.ValidationStudyInfo.StudyDate)); queueItem.FailureDescription = error.ToString(); _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } catch (Exception e) { String msg = String.Format("Unexpected exception archiving study: {0} on {1}: {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, e.Message); Platform.Log(LogLevel.Error, e, msg); queueItem.FailureDescription = msg; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } finally { // Unlock the Queue Entry using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is failed to unlock.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); } } } }
/// <summary> /// Gets a list of authority groups that can access a given partition /// </summary> /// <param name="partitionKey">The partition</param> /// <param name="dataAccessGrupsOnly">True to find data access groups only; False to find all authority groups</param> /// <param name="allStudiesGroup">Returns a list of groups that have access to all studies</param> /// <returns></returns> public IList <AuthorityGroupDetail> GetAuthorityGroupsForPartition(ServerEntityKey partitionKey, bool dataAccessGrupsOnly, out IList <AuthorityGroupDetail> allStudiesGroup) { using (var service = new AuthorityRead()) { IList <AuthorityGroupDetail> groups = dataAccessGrupsOnly ? service.ListDataAccessAuthorityGroupDetails() : service.ListAllAuthorityGroupDetails(); IList <AuthorityGroupDetail> resultGroups = new List <AuthorityGroupDetail>(); var internalAllStudiesGroup = new List <AuthorityGroupDetail>(); CollectionUtils.ForEach( groups, delegate(AuthorityGroupDetail group) { bool allPartitions = false; bool allStudies = false; foreach (var token in group.AuthorityTokens) { if (token.Name.Equals(ClearCanvas.Enterprise.Common.AuthorityTokens.DataAccess.AllPartitions)) { allPartitions = true; } else if (token.Name.Equals(ClearCanvas.Enterprise.Common.AuthorityTokens.DataAccess.AllStudies)) { allStudies = true; } if (allPartitions && allStudies) { break; } } if (allPartitions && allStudies) { internalAllStudiesGroup.Add(group); return; } if (!allPartitions) { using (IReadContext readContext = PersistentStoreRegistry.GetDefaultStore().OpenReadContext()) { var criteria = new ServerPartitionDataAccessSelectCriteria(); criteria.ServerPartitionKey.EqualTo(partitionKey); var dataCriteria = new DataAccessGroupSelectCriteria(); dataCriteria.AuthorityGroupOID.EqualTo(new ServerEntityKey("AuthorityGroupOID", new Guid(group.AuthorityGroupRef.ToString(false, false)))); dataCriteria.ServerPartitionDataAccessRelatedEntityCondition.Exists(criteria); var broker = readContext.GetBroker <IDataAccessGroupEntityBroker>(); if (broker.Count(dataCriteria) == 0) { return; } } } if (allStudies) { internalAllStudiesGroup.Add(group); return; } resultGroups.Add(group); }); allStudiesGroup = internalAllStudiesGroup; return(resultGroups); } }
/// <summary> /// Updates the 'State' of the filesystem associated with the 'FilesystemDelete' <see cref="ServiceLock"/> item /// </summary> /// <param name="item"></param> /// <param name="fs"></param> private static void UpdateState(Model.ServiceLock item, ServerFilesystemInfo fs) { FilesystemState state = null; if (item.State != null && item.State.DocumentElement != null) { //load from datatabase state = XmlUtils.Deserialize <FilesystemState>(item.State.DocumentElement); } if (state == null) { state = new FilesystemState(); } if (fs.AboveHighWatermark) { // we don't want to generate alert if the filesystem is offline or not accessible. if (fs.Online && (fs.Readable || fs.Writeable)) { TimeSpan ALERT_INTERVAL = TimeSpan.FromMinutes(ServiceLockSettings.Default.HighWatermarkAlertInterval); if (state.AboveHighWatermarkTimestamp == null) { state.AboveHighWatermarkTimestamp = Platform.Time; } TimeSpan elapse = (state.LastHighWatermarkAlertTimestamp != null) ? Platform.Time - state.LastHighWatermarkAlertTimestamp.Value : Platform.Time - state.AboveHighWatermarkTimestamp.Value; if (elapse.Duration() >= ALERT_INTERVAL) { ServerPlatform.Alert(AlertCategory.System, AlertLevel.Warning, "Filesystem", AlertTypeCodes.LowResources, null, TimeSpan.Zero, SR.AlertFilesystemAboveHW, fs.Filesystem.Description, TimeSpanFormatter.Format(Platform.Time - state.AboveHighWatermarkTimestamp.Value, true)); state.LastHighWatermarkAlertTimestamp = Platform.Time; } } else { state.AboveHighWatermarkTimestamp = null; state.LastHighWatermarkAlertTimestamp = null; } } else { state.AboveHighWatermarkTimestamp = null; state.LastHighWatermarkAlertTimestamp = null; } XmlDocument stateXml = new XmlDocument(); stateXml.AppendChild(stateXml.ImportNode(XmlUtils.Serialize(state), true)); IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { ServiceLockUpdateColumns columns = new ServiceLockUpdateColumns(); columns.State = stateXml; IServiceLockEntityBroker broker = ctx.GetBroker <IServiceLockEntityBroker>(); broker.Update(item.GetKey(), columns); ctx.Commit(); } }