public bool Update(ServerPartition partition, List <string> groupsWithDataAccess) { using (IUpdateContext context = PersistentStore.OpenUpdateContext(UpdateContextSyncMode.Flush)) { var parms = new ServerPartitionUpdateColumns { AeTitle = partition.AeTitle, Description = partition.Description, Enabled = partition.Enabled, PartitionFolder = partition.PartitionFolder, Port = partition.Port, AcceptAnyDevice = partition.AcceptAnyDevice, AutoInsertDevice = partition.AutoInsertDevice, DefaultRemotePort = partition.DefaultRemotePort, DuplicateSopPolicyEnum = partition.DuplicateSopPolicyEnum, MatchPatientsName = partition.MatchPatientsName, MatchPatientId = partition.MatchPatientId, MatchPatientsBirthDate = partition.MatchPatientsBirthDate, MatchAccessionNumber = partition.MatchAccessionNumber, MatchIssuerOfPatientId = partition.MatchIssuerOfPatientId, MatchPatientsSex = partition.MatchPatientsSex, AuditDeleteStudy = partition.AuditDeleteStudy, AcceptLatestReport = partition.AcceptLatestReport }; var broker = context.GetBroker <IServerPartitionEntityBroker>(); if (!broker.Update(partition.Key, parms)) { return(false); } UpdateDataAccess(context, partition, groupsWithDataAccess); context.Commit(); return(true); } }
public void Execute() { // Wrap the upgrade in a single commit. using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { UpdateContext context = updateContext as UpdateContext; if (context == null) { Console.WriteLine("Unexpected error opening connection to the database."); throw new ApplicationException("Error opening connection to the database."); } ExecuteSql(context, GetScript()); DatabaseVersionUpdateColumns columns = new DatabaseVersionUpdateColumns(); DatabaseVersionSelectCriteria criteria = new DatabaseVersionSelectCriteria(); columns.Revision = DestinationVersion.Revision.ToString(); columns.Build = DestinationVersion.Build.ToString(); columns.Minor = DestinationVersion.Minor.ToString(); columns.Major = DestinationVersion.Major.ToString(); IDatabaseVersionEntityBroker broker = context.GetBroker <IDatabaseVersionEntityBroker>(); broker.Update(criteria, columns); updateContext.Commit(); } if (_upgradeStoredProcs) { RunSqlScriptApplication app = new RunSqlScriptApplication(); app.RunApplication(new string[] { "-storedprocedures" }); } return; }
public bool ReprocessWorkQueueItem(WorkQueue item) { // #10620: Get a list of remaining WorkQueueUids which need to be reprocess // Note: currently only WorkQueueUIDs in failed StudyProcess will be reprocessed var remainingWorkQueueUidPaths = item.GetAllWorkQueueUidPaths(); IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { // delete current workqueue IWorkQueueUidEntityBroker uidBroker = ctx.GetBroker <IWorkQueueUidEntityBroker>(); WorkQueueUidSelectCriteria criteria = new WorkQueueUidSelectCriteria(); criteria.WorkQueueKey.EqualTo(item.GetKey()); if (uidBroker.Delete(criteria) >= 0) { IWorkQueueEntityBroker workQueueBroker = ctx.GetBroker <IWorkQueueEntityBroker>(); if (workQueueBroker.Delete(item.GetKey())) { IList <StudyStorageLocation> locations = item.LoadStudyLocations(ctx); if (locations != null && locations.Count > 0) { StudyReprocessor reprocessor = new StudyReprocessor(); String reason = String.Format("User reprocesses failed {0}", item.WorkQueueTypeEnum); WorkQueue reprocessEntry = reprocessor.ReprocessStudy(ctx, reason, locations[0], remainingWorkQueueUidPaths, Platform.Time); if (reprocessEntry != null) { ctx.Commit(); } return(reprocessEntry != null); } } } } return(false); }
/// <summary> /// Get candidates for archival on the <see cref="PartitionArchive"/>. /// </summary> /// <returns>A list of archive candidates. The list will be empty if no candidates exist.</returns> public ArchiveQueue GetArchiveCandidate() { ArchiveQueue queueItem; using (IUpdateContext updateContext = PersistentStore.OpenUpdateContext(UpdateContextSyncMode.Flush)) { QueryArchiveQueueParameters parms = new QueryArchiveQueueParameters(); parms.PartitionArchiveKey = _partitionArchive.GetKey(); parms.ProcessorId = ServerPlatform.ProcessorId; IQueryArchiveQueue broker = updateContext.GetBroker <IQueryArchiveQueue>(); // Stored procedure only returns 1 result. queueItem = broker.FindOne(parms); if (queueItem != null) { updateContext.Commit(); } } return(queueItem); }
public bool DeleteOrderItem(ServerEntityKey partitionKey, ServerEntityKey orderKey) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { // Disconnect studies from order var studyBroker = updateContext.GetBroker <IStudyEntityBroker>(); var criteria = new StudySelectCriteria(); criteria.OrderKey.EqualTo(orderKey); criteria.ServerPartitionKey.EqualTo(partitionKey); var updateColumns = new StudyUpdateColumns { OrderKey = null }; studyBroker.Update(criteria, updateColumns); bool retValue = _adaptor.Delete(updateContext, orderKey); updateContext.Commit(); return(retValue); } }
/// <summary> /// Archive the specified <see cref="ArchiveQueue"/> item. /// </summary> /// <param name="queueItem">The ArchiveQueue item to archive.</param> public void Run(ArchiveQueue queueItem) { using (ArchiveProcessorContext executionContext = new ArchiveProcessorContext(queueItem)) { try { if (!GetStudyStorageLocation(queueItem)) { Platform.Log(LogLevel.Error, "Unable to find readable study storage location for archival queue request {0}. Delaying request.", queueItem.Key); queueItem.FailureDescription = "Unable to find readable study storage location for archival queue request."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } // First, check to see if we can lock the study, if not just reschedule the queue entry. if (!_storageLocation.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle)) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is currently locked, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study is currently locked, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } StudyIntegrityValidator validator = new StudyIntegrityValidator(); validator.ValidateStudyState("Archive", _storageLocation, StudyIntegrityValidationModes.Default); using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.ArchiveScheduled }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to lock, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study failed to lock, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } update.Commit(); } string studyXmlFile = _storageLocation.GetStudyXmlPath(); // Load the study Xml file, this is used to generate the list of dicom files to archive. LoadStudyXml(studyXmlFile); DicomFile file = LoadFileFromStudyXml(); string patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, string.Empty); string patientId = file.DataSet[DicomTags.PatientId].GetString(0, string.Empty); string accessionNumber = file.DataSet[DicomTags.AccessionNumber].GetString(0, string.Empty); Platform.Log(LogLevel.Info, "Starting archival of study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} on archive {5}", _storageLocation.StudyInstanceUid, patientsName, patientId, accessionNumber, _hsmArchive.ServerPartition.Description, _hsmArchive.PartitionArchive.Description); // Use the command processor to do the archival. using (ServerCommandProcessor commandProcessor = new ServerCommandProcessor("Archive")) { var archiveStudyCmd = new ArchiveStudyCommand(_storageLocation, _hsmArchive.HsmPath, executionContext.TempDirectory, _hsmArchive.PartitionArchive) { ForceCompress = HsmSettings.Default.CompressZipFiles }; commandProcessor.AddCommand(archiveStudyCmd); commandProcessor.AddCommand(new UpdateArchiveQueueItemCommand(queueItem.GetKey(), _storageLocation.GetKey(), ArchiveQueueStatusEnum.Completed)); StudyRulesEngine studyEngine = new StudyRulesEngine(_storageLocation, _hsmArchive.ServerPartition, _studyXml); studyEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); if (!commandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure archiving study ({0}) to archive {1}: {2}, zip filename: {3}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, commandProcessor.FailureReason, archiveStudyCmd.OutputZipFilePath); queueItem.FailureDescription = commandProcessor.FailureReason; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } else { Platform.Log(LogLevel.Info, "Successfully archived study {0} on {1} to zip {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, archiveStudyCmd.OutputZipFilePath); } // Log the current FilesystemQueue settings _storageLocation.LogFilesystemQueue(); } } catch (StudyIntegrityValidationFailure ex) { StringBuilder error = new StringBuilder(); error.AppendLine(String.Format("Partition : {0}", ex.ValidationStudyInfo.ServerAE)); error.AppendLine(String.Format("Patient : {0}", ex.ValidationStudyInfo.PatientsName)); error.AppendLine(String.Format("Study Uid : {0}", ex.ValidationStudyInfo.StudyInstaneUid)); error.AppendLine(String.Format("Accession# : {0}", ex.ValidationStudyInfo.AccessionNumber)); error.AppendLine(String.Format("Study Date : {0}", ex.ValidationStudyInfo.StudyDate)); queueItem.FailureDescription = error.ToString(); _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } catch (Exception e) { String msg = String.Format("Unexpected exception archiving study: {0} on {1}: {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, e.Message); Platform.Log(LogLevel.Error, e, msg); queueItem.FailureDescription = msg; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } finally { // Unlock the Queue Entry using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is failed to unlock.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); } } } }
/// <summary> /// Method for getting next <see cref="WorkQueue"/> entry. /// </summary> /// <param name="processorId">The Id of the processor.</param> /// <remarks> /// </remarks> /// <returns> /// A <see cref="WorkQueue"/> entry if found, or else null; /// </returns> public Model.WorkQueue GetWorkQueueItem(string processorId) { Model.WorkQueue queueListItem = null; // First check for Stat WorkQueue items. if (_threadPool.MemoryLimitedThreadsAvailable) { using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Stat }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // If we don't have the max high priority threads in use, // first see if there's any available if (queueListItem == null && _threadPool.HighPriorityThreadsAvailable) { using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.High }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // If we didn't find a high priority work queue item, and we have threads // available for memory limited work queue items, query for the next queue item available. if (queueListItem == null && _threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // This logic only accessed if memory limited and priority threads are used up if (queueListItem == null && !_threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Stat, MemoryLimited = true }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // This logic only accessed if memory limited and priority threads are used up if (queueListItem == null && !_threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, MemoryLimited = true }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } return(queueListItem); }
/// <summary> /// Simple routine for failing a work queue item. /// </summary> /// <param name="item">The item to fail.</param> /// <param name="failureDescription">The reason for the failure.</param> private void FailQueueItem(Model.WorkQueue item, string failureDescription) { // Must retry to reset the status of the entry in case of db error // Failure to do so will create stale work queue entry (stuck in "In Progress" state) // which can only be recovered by restarting the service. while (true) { try { WorkQueueTypeProperties prop = _propertiesDictionary[item.WorkQueueTypeEnum]; using (IUpdateContext updateContext = _store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IUpdateWorkQueue update = updateContext.GetBroker <IUpdateWorkQueue>(); UpdateWorkQueueParameters parms = new UpdateWorkQueueParameters { ProcessorID = ServerPlatform.ProcessorId, WorkQueueKey = item.GetKey(), StudyStorageKey = item.StudyStorageKey, FailureCount = item.FailureCount + 1, FailureDescription = failureDescription }; var settings = WorkQueueSettings.Instance; if ((item.FailureCount + 1) > prop.MaxFailureCount) { Platform.Log(LogLevel.Error, "Failing {0} WorkQueue entry ({1}), reached max retry count of {2}. Failure Reason: {3}", item.WorkQueueTypeEnum, item.GetKey(), item.FailureCount + 1, failureDescription); parms.WorkQueueStatusEnum = WorkQueueStatusEnum.Failed; parms.ScheduledTime = Platform.Time; parms.ExpirationTime = Platform.Time.AddDays(1); OnWorkQueueEntryFailed(item, failureDescription); } else { Platform.Log(LogLevel.Error, "Resetting {0} WorkQueue entry ({1}) to Pending, current retry count {2}. Failure Reason: {3}", item.WorkQueueTypeEnum, item.GetKey(), item.FailureCount + 1, failureDescription); parms.WorkQueueStatusEnum = WorkQueueStatusEnum.Pending; parms.ScheduledTime = Platform.Time.AddMilliseconds(settings.WorkQueueQueryDelay); parms.ExpirationTime = Platform.Time.AddSeconds((prop.MaxFailureCount - item.FailureCount) * prop.FailureDelaySeconds); } if (false == update.Execute(parms)) { Platform.Log(LogLevel.Error, "Unable to update {0} WorkQueue GUID: {1}", item.WorkQueueTypeEnum, item.GetKey().ToString()); } else { updateContext.Commit(); break; // done } } } catch (Exception ex) { Platform.Log(LogLevel.Error, "Error occurred when calling FailQueueItem. Retry later. {0}", ex.Message); _terminateEvent.WaitOne(2000, false); if (_stop) { Platform.Log(LogLevel.Warn, "Service is stopping. Retry to fail the entry is terminated."); break; } } } }
private bool ArchiveLogs(ServerFilesystemInfo archiveFs) { string archivePath = Path.Combine(archiveFs.Filesystem.FilesystemPath, "AlertLog"); DateTime cutOffTime = Platform.Time.Date.AddDays(ServiceLockSettings.Default.AlertCachedDays * -1); AlertSelectCriteria criteria = new AlertSelectCriteria(); criteria.InsertTime.LessThan(cutOffTime); criteria.InsertTime.SortAsc(0); using (ServerExecutionContext context = new ServerExecutionContext()) { IAlertEntityBroker broker = context.ReadContext.GetBroker <IAlertEntityBroker>(); ImageServerLogWriter <Alert> writer = new ImageServerLogWriter <Alert>(archivePath, "Alert"); List <ServerEntityKey> keyList = new List <ServerEntityKey>(500); try { broker.Find(criteria, delegate(Alert result) { keyList.Add(result.Key); // If configured, don't flush to disk. We just delete the contents of keyList below. if (!ServiceLockSettings.Default.AlertDelete) { if (writer.WriteLog(result, result.InsertTime)) { // The logs been flushed, delete the log entries cached. using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush) ) { IApplicationLogEntityBroker updateBroker = update.GetBroker <IApplicationLogEntityBroker>(); foreach (ServerEntityKey key in keyList) { updateBroker.Delete(key); } update.Commit(); } keyList = new List <ServerEntityKey>(); } } }); writer.FlushLog(); if (keyList.Count > 0) { using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IAlertEntityBroker updateBroker = update.GetBroker <IAlertEntityBroker>(); foreach (ServerEntityKey key in keyList) { updateBroker.Delete(key); } update.Commit(); } } } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when purging Alert log files."); writer.Dispose(); return(false); } writer.Dispose(); return(true); } }
public void RestoreNearlineStudy(RestoreQueue queueItem, string zipFile, string studyFolder) { ServerFilesystemInfo fs = _hsmArchive.Selector.SelectFilesystem(); if (fs == null) { DateTime scheduleTime = Platform.Time.AddMinutes(5); Platform.Log(LogLevel.Error, "No writeable filesystem for restore, rescheduling restore request to {0}", scheduleTime); queueItem.FailureDescription = "No writeable filesystem for restore, rescheduling restore request"; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Pending, scheduleTime); return; } string destinationFolder = Path.Combine(fs.Filesystem.FilesystemPath, _hsmArchive.ServerPartition.PartitionFolder); StudyStorageLocation restoredLocation = null; try { using (var processor = new ServerCommandProcessor("HSM Restore Offline Study")) { processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); destinationFolder = Path.Combine(destinationFolder, studyFolder); processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); destinationFolder = Path.Combine(destinationFolder, _studyStorage.StudyInstanceUid); processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); processor.AddCommand(new ExtractZipCommand(zipFile, destinationFolder)); // We rebuild the StudyXml, in case any settings or issues have happened since archival processor.AddCommand(new RebuildStudyXmlCommand(_studyStorage.StudyInstanceUid, destinationFolder)); // Apply the rules engine. var context = new ServerActionContext(null, fs.Filesystem.GetKey(), _hsmArchive.ServerPartition, queueItem.StudyStorageKey) { CommandProcessor = processor }; processor.AddCommand( new ApplyRulesCommand(destinationFolder, _studyStorage.StudyInstanceUid, context)); // Do the actual insert into the DB var insertStorageCommand = new InsertFilesystemStudyStorageCommand( _hsmArchive.PartitionArchive.ServerPartitionKey, _studyStorage.StudyInstanceUid, studyFolder, fs.Filesystem.GetKey(), _syntax); processor.AddCommand(insertStorageCommand); if (!processor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = processor.FailureReason; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } else { restoredLocation = insertStorageCommand.Location; // Unlock the Queue Entry using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { bool retVal = _hsmArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60)); var studyLock = update.GetBroker <ILockStudy>(); var parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; retVal = retVal && studyLock.Execute(parms); if (!parms.Successful || !retVal) { string message = String.Format("Study {0} on partition {1} failed to unlock.", _studyStorage.StudyInstanceUid, _hsmArchive.ServerPartition.Description); Platform.Log(LogLevel.Info, message); throw new ApplicationException(message); } update.Commit(); Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); OnStudyRestored(restoredLocation); } } } } catch (StudyIntegrityValidationFailure ex) { Debug.Assert(restoredLocation != null); // study has been restored but it seems corrupted. Need to reprocess it. ReprocessStudy(restoredLocation, ex.Message); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } }
/// <summary> /// Process study migration candidates retrieved from the <see cref="Model.FilesystemQueue"/> table /// </summary> /// <param name="candidateList">The list of candidate studies for deleting.</param> private void ProcessStudyMigrateCandidates(IList <FilesystemQueue> candidateList) { Platform.CheckForNullReference(candidateList, "candidateList"); if (candidateList.Count > 0) { Platform.Log(LogLevel.Debug, "Scheduling tier-migration for {0} eligible studies...", candidateList.Count); } FilesystemProcessStatistics summaryStats = new FilesystemProcessStatistics("FilesystemTierMigrateInsert"); foreach (FilesystemQueue queueItem in candidateList) { if (_bytesToRemove < 0 || CancelPending) { Platform.Log(LogLevel.Debug, "Estimated disk space has been reached."); break; } StudyProcessStatistics stats = new StudyProcessStatistics("TierMigrateStudy"); stats.TotalTime.Start(); stats.StudyStorageTime.Start(); // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } stats.StudyStorageTime.End(); stats.CalculateDirectorySizeTime.Start(); // Get the disk usage float studySize = EstimateFolderSizeFromStudyXml(location); stats.CalculateDirectorySizeTime.End(); stats.DirectorySize = (ulong)studySize; stats.DbUpdateTime.Start(); using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters { StudyStorageKey = location.Key, QueueStudyStateEnum = QueueStudyStateEnum.MigrationScheduled }; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Tier Migration. Reason:{0}. Skipping study ({1})", lockParms.FailureReason, location.StudyInstanceUid); continue; } IInsertWorkQueueFromFilesystemQueue broker = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters { StudyStorageKey = location.GetKey(), ServerPartitionKey = location.ServerPartitionKey, ScheduledTime = _scheduledTime, DeleteFilesystemQueue = true, WorkQueueTypeEnum = WorkQueueTypeEnum.MigrateStudy, FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.TierMigrate }; Platform.Log(LogLevel.Debug, "Scheduling tier-migration for study {0} from {1} at {2}...", location.StudyInstanceUid, location.FilesystemTierEnum, _scheduledTime); WorkQueue insertItem = broker.FindOne(insertParms); if (insertItem == null) { Platform.Log(LogLevel.Error, "Unexpected problem inserting 'MigrateStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); } else { update.Commit(); _bytesToRemove -= studySize; _studiesMigrated++; // spread out the scheduled migration entries based on the size // assuming that the larger the study the longer it will take to migrate // The assumed migration speed is arbitarily chosen. double migrationSpeed = ServiceLockSettings.Default.TierMigrationSpeed * 1024 * 1024; // MB / sec TimeSpan estMigrateTime = TimeSpan.FromSeconds(studySize / migrationSpeed); _scheduledTime = _scheduledTime.Add(estMigrateTime); } } stats.DbUpdateTime.End(); stats.TotalTime.End(); summaryStats.AddSubStats(stats); StatisticsLogger.Log(LogLevel.Debug, stats); } summaryStats.CalculateAverage(); StatisticsLogger.Log(LogLevel.Info, false, summaryStats); }
private static void ReconcileStudy(string command, StudyIntegrityQueue item) { //Ignore the reconcile command if the item is null. if (item == null) { return; } // Preload the change description so its not done during the DB transaction XmlDocument changeDescription = new XmlDocument(); changeDescription.LoadXml(command); // The Xml in the SIQ item was generated when the images were received and put into the SIQ. // We now add the user info to it so that it will be logged in the history ReconcileStudyWorkQueueData queueData = XmlUtils.Deserialize <ReconcileStudyWorkQueueData>(item.Details); queueData.TimeStamp = Platform.Time; queueData.UserId = ServerHelper.CurrentUserName; using (IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { LockStudyParameters lockParms = new LockStudyParameters { QueueStudyStateEnum = QueueStudyStateEnum.ReconcileScheduled, StudyStorageKey = item.StudyStorageKey }; ILockStudy broker = context.GetBroker <ILockStudy>(); broker.Execute(lockParms); if (!lockParms.Successful) { throw new ApplicationException(lockParms.FailureReason); } //Add to Study History StudyHistoryeAdaptor historyAdaptor = new StudyHistoryeAdaptor(); StudyHistoryUpdateColumns parameters = new StudyHistoryUpdateColumns { StudyData = item.StudyData, ChangeDescription = changeDescription, StudyStorageKey = item.StudyStorageKey, StudyHistoryTypeEnum = StudyHistoryTypeEnum.StudyReconciled }; StudyHistory history = historyAdaptor.Add(context, parameters); //Create WorkQueue Entry WorkQueueAdaptor workQueueAdaptor = new WorkQueueAdaptor(); WorkQueueUpdateColumns row = new WorkQueueUpdateColumns { Data = XmlUtils.SerializeAsXmlDoc(queueData), ServerPartitionKey = item.ServerPartitionKey, StudyStorageKey = item.StudyStorageKey, StudyHistoryKey = history.GetKey(), WorkQueueTypeEnum = WorkQueueTypeEnum.ReconcileStudy, WorkQueueStatusEnum = WorkQueueStatusEnum.Pending, ScheduledTime = Platform.Time, ExpirationTime = Platform.Time.AddHours(1), GroupID = item.GroupID }; WorkQueue newWorkQueueItem = workQueueAdaptor.Add(context, row); StudyIntegrityQueueUidAdaptor studyIntegrityQueueUidAdaptor = new StudyIntegrityQueueUidAdaptor(); StudyIntegrityQueueUidSelectCriteria crit = new StudyIntegrityQueueUidSelectCriteria(); crit.StudyIntegrityQueueKey.EqualTo(item.GetKey()); IList <StudyIntegrityQueueUid> uidList = studyIntegrityQueueUidAdaptor.Get(context, crit); WorkQueueUidAdaptor workQueueUidAdaptor = new WorkQueueUidAdaptor(); WorkQueueUidUpdateColumns update = new WorkQueueUidUpdateColumns(); foreach (StudyIntegrityQueueUid uid in uidList) { update.WorkQueueKey = newWorkQueueItem.GetKey(); update.SeriesInstanceUid = uid.SeriesInstanceUid; update.SopInstanceUid = uid.SopInstanceUid; update.RelativePath = uid.RelativePath; workQueueUidAdaptor.Add(context, update); } //DeleteStudyIntegrityQueue Item StudyIntegrityQueueUidSelectCriteria criteria = new StudyIntegrityQueueUidSelectCriteria(); criteria.StudyIntegrityQueueKey.EqualTo(item.GetKey()); studyIntegrityQueueUidAdaptor.Delete(context, criteria); StudyIntegrityQueueAdaptor studyIntegrityQueueAdaptor = new StudyIntegrityQueueAdaptor(); studyIntegrityQueueAdaptor.Delete(context, item.GetKey()); context.Commit(); } }
public bool UpdateStudyAuthorityGroups(string studyInstanceUid, string accessionNumber, ServerEntityKey studyStorageKey, IList <string> assignedGroupOids) { List <AuthorityGroupDetail> nonAddedAuthorityGroups; Dictionary <ServerEntityKey, AuthorityGroupDetail> dic = LoadAuthorityGroups(out nonAddedAuthorityGroups); IList <AuthorityGroupStudyAccessInfo> assignedList = ListDataAccessGroupsForStudy(dic, studyStorageKey); List <string> groupList = new List <string>(); foreach (AuthorityGroupStudyAccessInfo group in assignedList) { bool found = false; foreach (var oid in assignedGroupOids) { if (group.AuthorityOID.Equals(oid)) { found = true; break; } } if (!found) { using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IStudyDataAccessEntityBroker broker = update.GetBroker <IStudyDataAccessEntityBroker>(); broker.Delete(group.StudyDataAccess.Key); update.Commit(); } groupList.Add(group.Description); } } if (groupList.Count > 0) { ServerAuditHelper.RemoveAuthorityGroupAccess(studyInstanceUid, accessionNumber, groupList); groupList.Clear(); } foreach (var oid in assignedGroupOids) { bool found = false; foreach (AuthorityGroupStudyAccessInfo group in assignedList) { if (group.AuthorityOID.Equals(oid)) { found = true; break; } } if (!found) { DataAccessGroup accessGroup = AddDataAccessIfNotExists(oid); using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { StudyDataAccessUpdateColumns insertColumns = new StudyDataAccessUpdateColumns { DataAccessGroupKey = accessGroup.Key, StudyStorageKey = studyStorageKey }; IStudyDataAccessEntityBroker insert = updateContext.GetBroker <IStudyDataAccessEntityBroker>(); insert.Insert(insertColumns); updateContext.Commit(); } foreach (AuthorityGroupDetail group in nonAddedAuthorityGroups) { if (group.AuthorityGroupRef.ToString(false, false).Equals(accessGroup.AuthorityGroupOID.Key.ToString())) { groupList.Add(group.Name); } } } } if (groupList.Count > 0) { ServerAuditHelper.AddAuthorityGroupAccess(studyInstanceUid, accessionNumber, groupList); } return(true); }
/// <summary> /// Migrates the study to new tier /// </summary> /// <param name="storage"></param> /// <param name="newFilesystem"></param> private void DoMigrateStudy(StudyStorageLocation storage, ServerFilesystemInfo newFilesystem) { Platform.CheckForNullReference(storage, "storage"); Platform.CheckForNullReference(newFilesystem, "newFilesystem"); TierMigrationStatistics stat = new TierMigrationStatistics { StudyInstanceUid = storage.StudyInstanceUid }; stat.ProcessSpeed.Start(); StudyXml studyXml = storage.LoadStudyXml(); stat.StudySize = (ulong)studyXml.GetStudySize(); Platform.Log(LogLevel.Info, "About to migrate study {0} from {1} to {2}", storage.StudyInstanceUid, storage.FilesystemTierEnum, newFilesystem.Filesystem.Description); string newPath = Path.Combine(newFilesystem.Filesystem.FilesystemPath, storage.PartitionFolder); DateTime startTime = Platform.Time; DateTime lastLog = Platform.Time; int fileCounter = 0; ulong bytesCopied = 0; long instanceCountInXml = studyXml.NumberOfStudyRelatedInstances; using (ServerCommandProcessor processor = new ServerCommandProcessor("Migrate Study")) { TierMigrationContext context = new TierMigrationContext { OriginalStudyLocation = storage, Destination = newFilesystem }; // The multiple CreateDirectoryCommands are done so that rollback of the directories being created happens properly if either of the directories already exist. var origFolder = context.OriginalStudyLocation.GetStudyPath(); processor.AddCommand(new CreateDirectoryCommand(newPath)); newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyFolder); processor.AddCommand(new CreateDirectoryCommand(newPath)); newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyInstanceUid); // don't create this directory so that it won't be backed up by MoveDirectoryCommand var copyDirCommand = new CopyDirectoryCommand(origFolder, newPath, delegate(string path) { // Update the progress. This is useful if the migration takes long time to complete. FileInfo file = new FileInfo(path); bytesCopied += (ulong)file.Length; fileCounter++; if (file.Extension != null && file.Extension.Equals(ServerPlatform.DicomFileExtension, StringComparison.InvariantCultureIgnoreCase)) { TimeSpan elapsed = Platform.Time - lastLog; TimeSpan totalElapsed = Platform.Time - startTime; double speedInMBPerSecond = 0; if (totalElapsed.TotalSeconds > 0) { speedInMBPerSecond = (bytesCopied / 1024f / 1024f) / totalElapsed.TotalSeconds; } if (elapsed > TimeSpan.FromSeconds(WorkQueueSettings.Instance.TierMigrationProgressUpdateInSeconds)) { #region Log Progress StringBuilder stats = new StringBuilder(); if (instanceCountInXml != 0) { float pct = (float)fileCounter / instanceCountInXml; stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2} ({3:0}% completed). Speed={4:0.00}MB/s", fileCounter, bytesCopied / 1024f / 1024f, startTime, pct * 100, speedInMBPerSecond); } else { stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2}. Speed={3:0.00}MB/s", fileCounter, bytesCopied / 1024f / 1024f, startTime, speedInMBPerSecond); } Platform.Log(LogLevel.Info, "Tier migration for study {0}: {1}", storage.StudyInstanceUid, stats.ToString()); try { using (IUpdateContext ctx = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IWorkQueueEntityBroker broker = ctx.GetBroker <IWorkQueueEntityBroker>(); WorkQueueUpdateColumns parameters = new WorkQueueUpdateColumns { FailureDescription = stats.ToString() }; broker.Update(WorkQueueItem.GetKey(), parameters); ctx.Commit(); } } catch { // can't log the progress so far... just ignore it } finally { lastLog = DateTime.Now; } #endregion } } }); processor.AddCommand(copyDirCommand); DeleteDirectoryCommand delDirCommand = new DeleteDirectoryCommand(origFolder, false) { RequiresRollback = false }; processor.AddCommand(delDirCommand); TierMigrateDatabaseUpdateCommand updateDbCommand = new TierMigrateDatabaseUpdateCommand(context); processor.AddCommand(updateDbCommand); Platform.Log(LogLevel.Info, "Start migrating study {0}.. expecting {1} to be moved", storage.StudyInstanceUid, ByteCountFormatter.Format(stat.StudySize)); if (!processor.Execute()) { if (processor.FailureException != null) { throw processor.FailureException; } throw new ApplicationException(processor.FailureReason); } stat.DBUpdate = updateDbCommand.Statistics; stat.CopyFiles = copyDirCommand.CopySpeed; stat.DeleteDirTime = delDirCommand.Statistics; } stat.ProcessSpeed.SetData(bytesCopied); stat.ProcessSpeed.End(); Platform.Log(LogLevel.Info, "Successfully migrated study {0} from {1} to {2} in {3} [ {4} files, {5} @ {6}, DB Update={7}, Remove Dir={8}]", storage.StudyInstanceUid, storage.FilesystemTierEnum, newFilesystem.Filesystem.FilesystemTierEnum, TimeSpanFormatter.Format(stat.ProcessSpeed.ElapsedTime), fileCounter, ByteCountFormatter.Format(bytesCopied), stat.CopyFiles.FormattedValue, stat.DBUpdate.FormattedValue, stat.DeleteDirTime.FormattedValue); string originalPath = storage.GetStudyPath(); if (Directory.Exists(storage.GetStudyPath())) { Platform.Log(LogLevel.Info, "Original study folder could not be deleted. It must be cleaned up manually: {0}", originalPath); ServerPlatform.Alert(AlertCategory.Application, AlertLevel.Warning, WorkQueueItem.WorkQueueTypeEnum.ToString(), 1000, GetWorkQueueContextData(WorkQueueItem), TimeSpan.Zero, "Study has been migrated to a new tier. Original study folder must be cleaned up manually: {0}", originalPath); } UpdateAverageStatistics(stat); }
/// <summary> /// Archive the specified <see cref="ArchiveQueue"/> item. /// </summary> /// <param name="queueItem">The ArchiveQueue item to archive.</param> public void Run(ArchiveQueue queueItem) { using (ArchiveProcessorContext executionContext = new ArchiveProcessorContext(queueItem)) { try { if (!GetStudyStorageLocation(queueItem)) { Platform.Log(LogLevel.Error, "Unable to find readable study storage location for archival queue request {0}. Delaying request.", queueItem.Key); queueItem.FailureDescription = "Unable to find readable study storage location for archival queue request."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } // First, check to see if we can lock the study, if not just reschedule the queue entry. if (!_storageLocation.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle)) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is currently locked, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study is currently locked, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } StudyIntegrityValidator validator = new StudyIntegrityValidator(); validator.ValidateStudyState("Archive", _storageLocation, StudyIntegrityValidationModes.Default); using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.ArchiveScheduled }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to lock, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study failed to lock, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } update.Commit(); } string studyFolder = _storageLocation.GetStudyPath(); string studyXmlFile = _storageLocation.GetStudyXmlPath(); // Load the study Xml file, this is used to generate the list of dicom files to archive. LoadStudyXml(studyXmlFile); DicomFile file = LoadFileFromStudyXml(); string patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, string.Empty); string patientId = file.DataSet[DicomTags.PatientId].GetString(0, string.Empty); string accessionNumber = file.DataSet[DicomTags.AccessionNumber].GetString(0, string.Empty); Platform.Log(LogLevel.Info, "Starting archival of study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} on archive {5}", _storageLocation.StudyInstanceUid, patientsName, patientId, accessionNumber, _hsmArchive.ServerPartition.Description, _hsmArchive.PartitionArchive.Description); // Use the command processor to do the archival. using (ServerCommandProcessor commandProcessor = new ServerCommandProcessor("Archive")) { _archiveXml = new XmlDocument(); // Create the study date folder string zipFilename = Path.Combine(_hsmArchive.HsmPath, _storageLocation.StudyFolder); commandProcessor.AddCommand(new CreateDirectoryCommand(zipFilename)); // Create a folder for the study zipFilename = Path.Combine(zipFilename, _storageLocation.StudyInstanceUid); commandProcessor.AddCommand(new CreateDirectoryCommand(zipFilename)); // Save the archive data in the study folder, based on a filename with a date / time stamp string filename = String.Format("{0}.zip", Platform.Time.ToString("yyyy-MM-dd-HHmm")); zipFilename = Path.Combine(zipFilename, filename); // Create the Xml data to store in the ArchiveStudyStorage table telling // where the archived study is located. XmlElement hsmArchiveElement = _archiveXml.CreateElement("HsmArchive"); _archiveXml.AppendChild(hsmArchiveElement); XmlElement studyFolderElement = _archiveXml.CreateElement("StudyFolder"); hsmArchiveElement.AppendChild(studyFolderElement); studyFolderElement.InnerText = _storageLocation.StudyFolder; XmlElement filenameElement = _archiveXml.CreateElement("Filename"); hsmArchiveElement.AppendChild(filenameElement); filenameElement.InnerText = filename; XmlElement studyInstanceUidElement = _archiveXml.CreateElement("Uid"); hsmArchiveElement.AppendChild(studyInstanceUidElement); studyInstanceUidElement.InnerText = _storageLocation.StudyInstanceUid; // Create the Zip file commandProcessor.AddCommand( new CreateStudyZipCommand(zipFilename, _studyXml, studyFolder, executionContext.TempDirectory)); // Update the database. commandProcessor.AddCommand(new InsertArchiveStudyStorageCommand(queueItem.StudyStorageKey, queueItem.PartitionArchiveKey, queueItem.GetKey(), _storageLocation.ServerTransferSyntaxKey, _archiveXml)); StudyRulesEngine studyEngine = new StudyRulesEngine(_storageLocation, _hsmArchive.ServerPartition, _studyXml); studyEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); if (!commandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure archiving study ({0}) to archive {1}: {2}, zip filename: {3}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, commandProcessor.FailureReason, zipFilename); queueItem.FailureDescription = commandProcessor.FailureReason; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } else { Platform.Log(LogLevel.Info, "Successfully archived study {0} on {1} to zip {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, zipFilename); } // Log the current FilesystemQueue settings _storageLocation.LogFilesystemQueue(); } } catch (StudyIntegrityValidationFailure ex) { StringBuilder error = new StringBuilder(); error.AppendLine(String.Format("Partition : {0}", ex.ValidationStudyInfo.ServerAE)); error.AppendLine(String.Format("Patient : {0}", ex.ValidationStudyInfo.PatientsName)); error.AppendLine(String.Format("Study Uid : {0}", ex.ValidationStudyInfo.StudyInstaneUid)); error.AppendLine(String.Format("Accession# : {0}", ex.ValidationStudyInfo.AccessionNumber)); error.AppendLine(String.Format("Study Date : {0}", ex.ValidationStudyInfo.StudyDate)); queueItem.FailureDescription = error.ToString(); _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } catch (Exception e) { String msg = String.Format("Unexpected exception archiving study: {0} on {1}: {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, e.Message); Platform.Log(LogLevel.Error, e, msg); queueItem.FailureDescription = msg; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } finally { // Unlock the Queue Entry using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is failed to unlock.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); } } } }
/// <summary> /// Updates the 'State' of the filesystem associated with the 'FilesystemDelete' <see cref="ServiceLock"/> item /// </summary> /// <param name="item"></param> /// <param name="fs"></param> private static void UpdateState(Model.ServiceLock item, ServerFilesystemInfo fs) { FilesystemState state = null; if (item.State != null && item.State.DocumentElement != null) { //load from datatabase state = XmlUtils.Deserialize <FilesystemState>(item.State.DocumentElement); } if (state == null) { state = new FilesystemState(); } if (fs.AboveHighWatermark) { // we don't want to generate alert if the filesystem is offline or not accessible. if (fs.Online && (fs.Readable || fs.Writeable)) { TimeSpan ALERT_INTERVAL = TimeSpan.FromMinutes(ServiceLockSettings.Default.HighWatermarkAlertInterval); if (state.AboveHighWatermarkTimestamp == null) { state.AboveHighWatermarkTimestamp = Platform.Time; } TimeSpan elapse = (state.LastHighWatermarkAlertTimestamp != null) ? Platform.Time - state.LastHighWatermarkAlertTimestamp.Value : Platform.Time - state.AboveHighWatermarkTimestamp.Value; if (elapse.Duration() >= ALERT_INTERVAL) { ServerPlatform.Alert(AlertCategory.System, AlertLevel.Warning, "Filesystem", AlertTypeCodes.LowResources, null, TimeSpan.Zero, SR.AlertFilesystemAboveHW, fs.Filesystem.Description, TimeSpanFormatter.Format(Platform.Time - state.AboveHighWatermarkTimestamp.Value, true)); state.LastHighWatermarkAlertTimestamp = Platform.Time; } } else { state.AboveHighWatermarkTimestamp = null; state.LastHighWatermarkAlertTimestamp = null; } } else { state.AboveHighWatermarkTimestamp = null; state.LastHighWatermarkAlertTimestamp = null; } XmlDocument stateXml = new XmlDocument(); stateXml.AppendChild(stateXml.ImportNode(XmlUtils.Serialize(state), true)); IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { ServiceLockUpdateColumns columns = new ServiceLockUpdateColumns(); columns.State = stateXml; IServiceLockEntityBroker broker = ctx.GetBroker <IServiceLockEntityBroker>(); broker.Update(item.GetKey(), columns); ctx.Commit(); } }
/// <summary> /// Process StudyPurge <see cref="FilesystemQueue"/> entries. /// </summary> /// <param name="candidateList">The list of candidates for purging</param> private void ProcessStudyPurgeCandidates(IList <FilesystemQueue> candidateList) { if (candidateList.Count > 0) { Platform.Log(LogLevel.Debug, "Scheduling purge study for {0} eligible studies...", candidateList.Count); } FilesystemProcessStatistics summaryStats = new FilesystemProcessStatistics("FilesystemPurgeInsert"); foreach (FilesystemQueue queueItem in candidateList) { if (_bytesToRemove < 0 || CancelPending) { break; } StudyProcessStatistics stats = new StudyProcessStatistics("PurgeStudy"); stats.TotalTime.Start(); stats.StudyStorageTime.Start(); // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } stats.StudyStorageTime.End(); stats.CalculateDirectorySizeTime.Start(); // Get the disk usage float studySize = EstimateFolderSizeFromStudyXml(location); stats.CalculateDirectorySizeTime.End(); stats.DirectorySize = (ulong)studySize; stats.DbUpdateTime.Start(); // Update the DB using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.PurgeScheduled; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Study Purge, skipping study ({0}", location.StudyInstanceUid); continue; } IInsertWorkQueueFromFilesystemQueue insertBroker = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters(); insertParms.StudyStorageKey = location.GetKey(); insertParms.ServerPartitionKey = location.ServerPartitionKey; insertParms.ScheduledTime = _scheduledTime; insertParms.DeleteFilesystemQueue = true; insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.PurgeStudy; insertParms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.PurgeStudy; WorkQueue insertItem = insertBroker.FindOne(insertParms); if (insertItem == null) { Platform.Log(LogLevel.Error, "Unexpected problem inserting 'PurgeStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); } else { update.Commit(); _bytesToRemove -= studySize; _studiesPurged++; _scheduledTime = _scheduledTime.AddSeconds(2); } } stats.DbUpdateTime.End(); stats.TotalTime.End(); summaryStats.AddSubStats(stats); StatisticsLogger.Log(LogLevel.Debug, stats); } summaryStats.CalculateAverage(); StatisticsLogger.Log(LogLevel.Info, false, summaryStats); }
private void ReinventoryFilesystem(Filesystem filesystem) { ServerPartition partition; DirectoryInfo filesystemDir = new DirectoryInfo(filesystem.FilesystemPath); foreach (DirectoryInfo partitionDir in filesystemDir.GetDirectories()) { if (GetServerPartition(partitionDir.Name, out partition) == false) { continue; } foreach (DirectoryInfo dateDir in partitionDir.GetDirectories()) { if (dateDir.FullName.EndsWith("Deleted", StringComparison.InvariantCultureIgnoreCase) || dateDir.FullName.EndsWith(ServerPlatform.ReconcileStorageFolder, StringComparison.InvariantCultureIgnoreCase)) { continue; } List <FileInfo> fileList; foreach (DirectoryInfo studyDir in dateDir.GetDirectories()) { if (studyDir.FullName.EndsWith("Deleted", StringComparison.InvariantCultureIgnoreCase)) { continue; } // Check for Cancel message if (CancelPending) { return; } String studyInstanceUid = studyDir.Name; StudyStorageLocation location; if (GetStudyStorageLocation(partition.Key, studyInstanceUid, out location)) { #region Study record exists in db int integrityQueueCount; int workQueueCount; Study theStudy = GetStudyAndQueues(location, out integrityQueueCount, out workQueueCount); if (theStudy != null) { continue; } if (integrityQueueCount != 0 && workQueueCount != 0) { continue; } fileList = LoadSopFiles(studyDir, false); if (fileList.Count == 0) { Platform.Log(LogLevel.Warn, "Found empty study folder with StorageLocation, deleteing StorageLocation: {0}\\{1}", dateDir.Name, studyDir.Name); studyDir.Delete(true); RemoveStudyStorage(location); continue; } // WriteLock the new study storage for study processing if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.ProcessingScheduled)) { string failureReason; if (!ServerHelper.LockStudy(location.Key, QueueStudyStateEnum.ProcessingScheduled, out failureReason)) { Platform.Log(LogLevel.Error, "Unable to lock study {0} for Study Processing", location.StudyInstanceUid); } } #endregion } else { #region Directory not in DB, fileList = LoadSopFiles(studyDir, true); if (fileList.Count == 0) { Platform.Log(LogLevel.Warn, "Found empty study folder: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } DicomFile file = LoadFileFromList(fileList); if (file == null) { Platform.Log(LogLevel.Warn, "Found directory with no readable files: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } // Do a second check, using the study instance uid from a file in the directory. // had an issue with trailing periods on uids causing us to not find the // study storage, and insert a new record into the database. studyInstanceUid = file.DataSet[DicomTags.StudyInstanceUid].ToString(); if (GetStudyStorageLocation(partition.Key, studyInstanceUid, out location)) { continue; } StudyStorage storage; if (GetStudyStorage(partition, studyInstanceUid, out storage)) { Platform.Log(LogLevel.Warn, "Study {0} on filesystem partition {1} is offline {2}", studyInstanceUid, partition.Description, studyDir.ToString()); continue; } Platform.Log(LogLevel.Info, "Reinventory inserting study storage location for {0} on partition {1}", studyInstanceUid, partition.Description); // Insert StudyStorage using (IUpdateContext update = _store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IInsertStudyStorage studyInsert = update.GetBroker <IInsertStudyStorage>(); InsertStudyStorageParameters insertParms = new InsertStudyStorageParameters { ServerPartitionKey = partition.GetKey(), StudyInstanceUid = studyInstanceUid, Folder = dateDir.Name, FilesystemKey = filesystem.GetKey(), QueueStudyStateEnum = QueueStudyStateEnum.Idle }; if (file.TransferSyntax.LosslessCompressed) { insertParms.TransferSyntaxUid = file.TransferSyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossless; } else if (file.TransferSyntax.LossyCompressed) { insertParms.TransferSyntaxUid = file.TransferSyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossy; } else { insertParms.TransferSyntaxUid = file.TransferSyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.Online; } location = studyInsert.FindOne(insertParms); // WriteLock the new study storage for study processing ILockStudy lockStudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters { StudyStorageKey = location.Key, QueueStudyStateEnum = QueueStudyStateEnum.ProcessingScheduled }; if (!lockStudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Error, "Unable to lock study {0} for Study Processing", location.StudyInstanceUid); } update.Commit(); } #endregion } string studyXml = location.GetStudyXmlPath(); if (File.Exists(studyXml)) { FileUtils.Delete(studyXml); } string studyGZipXml = location.GetCompressedStudyXmlPath(); if (File.Exists(studyGZipXml)) { FileUtils.Delete(studyGZipXml); } foreach (FileInfo sopFile in fileList) { String sopInstanceUid = sopFile.Name.Replace(sopFile.Extension, string.Empty); using (ServerExecutionContext context = new ServerExecutionContext()) { // Just use a read context here, in hopes of improving // performance. Every other place in the code should use // Update contexts when doing transactions. IInsertWorkQueue workQueueInsert = context.ReadContext.GetBroker <IInsertWorkQueue>(); InsertWorkQueueParameters queueInsertParms = new InsertWorkQueueParameters { WorkQueueTypeEnum = WorkQueueTypeEnum.StudyProcess, StudyStorageKey = location.GetKey(), ServerPartitionKey = partition.GetKey(), SeriesInstanceUid = sopFile.Directory.Name, SopInstanceUid = sopInstanceUid, ScheduledTime = Platform.Time }; if (workQueueInsert.FindOne(queueInsertParms) == null) { Platform.Log(LogLevel.Error, "Failure attempting to insert SOP Instance into WorkQueue during Reinventory."); } } } } // Cleanup the date directory, if its empty. DirectoryUtility.DeleteIfEmpty(dateDir.FullName); } } }
/// <summary> /// Process StudyCompress Candidates retrieved from the <see cref="Model.FilesystemQueue"/> table /// </summary> /// <param name="candidateList">The list of candidate studies for deleting.</param> /// <param name="type">The type of compress candidate (lossy or lossless)</param> private void ProcessCompressCandidates(IEnumerable <FilesystemQueue> candidateList, FilesystemQueueTypeEnum type) { using (ServerExecutionContext context = new ServerExecutionContext()) { DateTime scheduledTime = Platform.Time.AddSeconds(10); foreach (FilesystemQueue queueItem in candidateList) { // Check for Shutdown/Cancel if (CancelPending) { break; } // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } StudyXml studyXml; try { studyXml = LoadStudyXml(location); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress candidate, unexpected exception loading StudyXml file for {0}", location.GetStudyPath()); continue; } using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.CompressScheduled; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Lossless Compress. Reason:{0}. Skipping study ({1})", lockParms.FailureReason, location.StudyInstanceUid); continue; } scheduledTime = scheduledTime.AddSeconds(3); IInsertWorkQueueFromFilesystemQueue workQueueInsert = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters(); insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; insertParms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.LosslessCompress; insertParms.StudyStorageKey = location.GetKey(); insertParms.ServerPartitionKey = location.ServerPartitionKey; DateTime expirationTime = scheduledTime; insertParms.ScheduledTime = expirationTime; insertParms.DeleteFilesystemQueue = true; insertParms.Data = queueItem.QueueXml; insertParms.FilesystemQueueTypeEnum = type; insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; try { WorkQueue entry = workQueueInsert.FindOne(insertParms); InsertWorkQueueUidFromStudyXml(studyXml, update, entry.GetKey()); update.Commit(); _studiesInserted++; } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress record, unexpected problem inserting 'CompressStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); // throw; -- would cause abort of inserts, go ahead and try everything } } } } }
/// <summary> /// Lookup the device entity in the database corresponding to the remote AE of the association. /// </summary> /// <param name="partition">The partition to look up the devices</param> /// <param name="association">The association</param> /// <param name="isNew">Indicates whether the device returned is created by the call.</param> /// <returns>The device record corresponding to the called AE of the association</returns> public static Device LookupDevice(ServerPartition partition, AssociationParameters association, out bool isNew) { isNew = false; Device device; if (DeviceCache.TryGetValue(association.CallingAE + partition.Key, out device)) { return(device); } using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { var deviceEntityBroker = updateContext.GetBroker <IDeviceEntityBroker>(); // Setup the select parameters. var queryParameters = new DeviceSelectCriteria(); queryParameters.AeTitle.EqualTo(association.CallingAE); queryParameters.ServerPartitionKey.EqualTo(partition.GetKey()); var devices = deviceEntityBroker.Find(queryParameters); foreach (var d in devices) { if (string.Compare(d.AeTitle, association.CallingAE, false, CultureInfo.InvariantCulture) == 0) { device = d; break; } } if (device == null) { if (!partition.AcceptAnyDevice) { return(null); } if (partition.AutoInsertDevice) { // Auto-insert a new entry in the table. var updateColumns = new DeviceUpdateColumns { AeTitle = association.CallingAE, Enabled = true, Description = String.Format("AE: {0}", association.CallingAE), Dhcp = false, IpAddress = association.RemoteEndPoint.Address.ToString(), ServerPartitionKey = partition.GetKey(), Port = partition.DefaultRemotePort, AllowQuery = true, AllowRetrieve = true, AllowStorage = true, ThrottleMaxConnections = ImageServerCommonConfiguration.Device.MaxConnections, DeviceTypeEnum = DeviceTypeEnum.Workstation }; var insert = updateContext.GetBroker <IDeviceEntityBroker>(); device = insert.Insert(updateColumns); updateContext.Commit(); isNew = true; } } if (device != null) { // For DHCP devices, we always update the remote ip address, if its changed from what is in the DB. if (device.Dhcp && !association.RemoteEndPoint.Address.ToString().Equals(device.IpAddress)) { var updateColumns = new DeviceUpdateColumns { IpAddress = association.RemoteEndPoint.Address.ToString(), LastAccessedTime = Platform.Time }; if (!deviceEntityBroker.Update(device.GetKey(), updateColumns)) { Platform.Log(LogLevel.Error, "Unable to update IP Address for DHCP device {0} on partition '{1}'", device.AeTitle, partition.Description); } else { updateContext.Commit(); } } else if (!isNew) { var updateColumns = new DeviceUpdateColumns { LastAccessedTime = Platform.Time }; if (!deviceEntityBroker.Update(device.GetKey(), updateColumns)) { Platform.Log(LogLevel.Error, "Unable to update LastAccessedTime device {0} on partition '{1}'", device.AeTitle, partition.Description); } else { updateContext.Commit(); } } DeviceCache.Add(device.AeTitle + partition.Key, device); } } return(device); }
private void RestoreOnlineStudy(RestoreQueue queueItem, string zipFile, string destinationFolder) { try { using (var processor = new ServerCommandProcessor("HSM Restore Online Study")) { var zipService = Platform.GetService <IZipService>(); using (var zipWriter = zipService.OpenWrite(zipFile)) { foreach (string file in zipWriter.EntryFileNames) { processor.AddCommand(new ExtractZipFileAndReplaceCommand(zipFile, file, destinationFolder)); } } // We rebuild the StudyXml, in case any settings or issues have happened since archival processor.AddCommand(new RebuildStudyXmlCommand(_location.StudyInstanceUid, destinationFolder)); StudyStatusEnum status; if (_syntax.Encapsulated && _syntax.LosslessCompressed) { status = StudyStatusEnum.OnlineLossless; } else if (_syntax.Encapsulated && _syntax.LossyCompressed) { status = StudyStatusEnum.OnlineLossy; } else { status = StudyStatusEnum.Online; } processor.AddCommand(new UpdateStudyStateCommand(_location, status, _serverSyntax)); // Apply the rules engine. var context = new ServerActionContext(null, _location.FilesystemKey, _hsmArchive.ServerPartition, queueItem.StudyStorageKey) { CommandProcessor = processor }; processor.AddCommand( new ApplyRulesCommand(destinationFolder, _location.StudyInstanceUid, context)); if (!processor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = processor.FailureReason; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } else { // Unlock the Queue Entry and set to complete using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { _hsmArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60)); var studyLock = update.GetBroker <ILockStudy>(); var parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to unlock.", _location.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); _location = ReloadStorageLocation(); OnStudyRestored(_location); } } } } catch (StudyIntegrityValidationFailure ex) { // study has been restored but it seems corrupted. Need to reprocess it. ReprocessStudy(_location, ex.Message); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = e.Message; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } }
private void EnsureConsistentObjectCount(StudyXml studyXml, IDictionary <string, List <string> > processedSeriesMap) { Platform.CheckForNullReference(studyXml, "studyXml"); // We have to ensure that the counts in studyXml and what we have processed are consistent. // Files or folder may be reprocessed but then become missing when then entry is resumed. // We have to removed them from the studyXml before committing the it. Platform.Log(LogLevel.Info, "Verifying study xml against the filesystems"); int filesProcessed = 0; foreach (string seriesUid in processedSeriesMap.Keys) { filesProcessed += processedSeriesMap[seriesUid].Count; } // Used to keep track of the series to be removed. // We can't remove the item from the study xml while we are // interating through it var seriesToRemove = new List <string>(); foreach (SeriesXml seriesXml in studyXml) { if (!processedSeriesMap.ContainsKey(seriesXml.SeriesInstanceUid)) { seriesToRemove.Add(seriesXml.SeriesInstanceUid); } else { //check all instance in the series List <string> foundInstances = processedSeriesMap[seriesXml.SeriesInstanceUid]; var instanceToRemove = new List <string>(); foreach (InstanceXml instanceXml in seriesXml) { if (!foundInstances.Contains(instanceXml.SopInstanceUid)) { // the sop no long exists in the filesystem instanceToRemove.Add(instanceXml.SopInstanceUid); } } foreach (string instanceUid in instanceToRemove) { seriesXml[instanceUid] = null; Platform.Log(LogLevel.Info, "Removed SOP {0} in the study xml: it no longer exists.", instanceUid); } } } foreach (string seriesUid in seriesToRemove) { studyXml[seriesUid] = null; Platform.Log(LogLevel.Info, "Removed Series {0} in the study xml: it no longer exists.", seriesUid); } Platform.CheckTrue(studyXml.NumberOfStudyRelatedSeries == processedSeriesMap.Count, String.Format("Number of series in the xml do not match number of series reprocessed: {0} vs {1}", studyXml.NumberOfStudyRelatedInstances, processedSeriesMap.Count)); Platform.CheckTrue(studyXml.NumberOfStudyRelatedInstances == filesProcessed, String.Format("Number of instances in the xml do not match number of reprocessed: {0} vs {1}", studyXml.NumberOfStudyRelatedInstances, filesProcessed)); Platform.Log(LogLevel.Info, "Study xml has been verified."); if (StorageLocation.Study != null) { // update the instance count in the db using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { var broker = updateContext.GetBroker <IStudyEntityBroker>(); var columns = new StudyUpdateColumns { NumberOfStudyRelatedInstances = studyXml.NumberOfStudyRelatedInstances, NumberOfStudyRelatedSeries = studyXml.NumberOfStudyRelatedSeries }; broker.Update(StorageLocation.Study.GetKey(), columns); updateContext.Commit(); } } else { // alert orphaned StudyStorage entry RaiseAlert(WorkQueueItem, AlertLevel.Critical, String.Format("Study {0} has been reprocessed but Study record was NOT created. Images reprocessed: {1}. Path={2}", StorageLocation.StudyInstanceUid, filesProcessed, StorageLocation.GetStudyPath())); } }
public void OnStudyDeleted() { if (!Enabled) { return; } if (_context.WorkQueueItem.WorkQueueTypeEnum == WorkQueueTypeEnum.WebDeleteStudy) { Study study = _context.Study; if (study == null) { Platform.Log(LogLevel.Info, "Not logging Study Delete information due to missing Study record for study: {0} on partition {1}", _context.StorageLocation.StudyInstanceUid, _context.ServerPartition.AeTitle); return; } StudyStorageLocation storage = _context.StorageLocation; using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { // Setup the parameters IStudyDeleteRecordEntityBroker broker = updateContext.GetBroker <IStudyDeleteRecordEntityBroker>(); StudyDeleteRecordUpdateColumns parms = new StudyDeleteRecordUpdateColumns(); parms.Timestamp = Platform.Time; WebDeleteStudyLevelQueueData extendedInfo = XmlUtils.Deserialize <WebDeleteStudyLevelQueueData>(_context.WorkQueueItem.Data); parms.Reason = extendedInfo != null? extendedInfo.Reason:_context.WorkQueueItem.WorkQueueTypeEnum.LongDescription; parms.ServerPartitionAE = _context.ServerPartition.AeTitle; parms.FilesystemKey = storage.FilesystemKey; parms.AccessionNumber = study.AccessionNumber; parms.PatientId = study.PatientId; parms.PatientsName = study.PatientsName; parms.StudyInstanceUid = study.StudyInstanceUid; parms.StudyDate = study.StudyDate; parms.StudyDescription = study.StudyDescription; parms.StudyTime = study.StudyTime; parms.BackupPath = BackupZipFileRelativePath; if (_archives != null && _archives.Count > 0) { parms.ArchiveInfo = XmlUtils.SerializeAsXmlDoc(_archives); } StudyDeleteExtendedInfo extInfo = new StudyDeleteExtendedInfo(); extInfo.ServerInstanceId = ServerPlatform.ServerInstanceId; extInfo.UserId = _context.UserId; extInfo.UserName = _context.UserName; parms.ExtendedInfo = XmlUtils.SerializeAsString(extInfo); StudyDeleteRecord deleteRecord = broker.Insert(parms); if (deleteRecord == null) { Platform.Log(LogLevel.Error, "Unexpected error when trying to create study delete record: {0} on partition {1}", study.StudyInstanceUid, _context.ServerPartition.Description); } else { updateContext.Commit(); } } } }
/// <summary> /// Inserts work queue entry to process the duplicates. /// </summary> /// <param name="entryKey"><see cref="ServerEntityKey"/> of the <see cref="StudyIntegrityQueue"/> entry that has <see cref="StudyIntegrityReasonEnum"/> equal to <see cref="StudyIntegrityReasonEnum.Duplicate"/> </param> /// <param name="action"></param> public void Process(ServerEntityKey entryKey, ProcessDuplicateAction action) { DuplicateSopReceivedQueue entry = DuplicateSopReceivedQueue.Load(HttpContextData.Current.ReadContext, entryKey); Platform.CheckTrue(entry.StudyIntegrityReasonEnum == StudyIntegrityReasonEnum.Duplicate, "Invalid type of entry"); IList <StudyIntegrityQueueUid> uids = LoadDuplicateSopUid(entry); using (IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ProcessDuplicateQueueEntryQueueData data = new ProcessDuplicateQueueEntryQueueData { Action = action, DuplicateSopFolder = entry.GetFolderPath(context), UserName = ServerHelper.CurrentUserName, }; LockStudyParameters lockParms = new LockStudyParameters { QueueStudyStateEnum = QueueStudyStateEnum.ReconcileScheduled, StudyStorageKey = entry.StudyStorageKey }; ILockStudy lockBbroker = context.GetBroker <ILockStudy>(); lockBbroker.Execute(lockParms); if (!lockParms.Successful) { throw new ApplicationException(lockParms.FailureReason); } IWorkQueueProcessDuplicateSopBroker broker = context.GetBroker <IWorkQueueProcessDuplicateSopBroker>(); WorkQueueProcessDuplicateSopUpdateColumns columns = new WorkQueueProcessDuplicateSopUpdateColumns { Data = XmlUtils.SerializeAsXmlDoc(data), GroupID = entry.GroupID, ScheduledTime = Platform.Time, ExpirationTime = Platform.Time.Add(TimeSpan.FromMinutes(15)), ServerPartitionKey = entry.ServerPartitionKey, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Medium, StudyStorageKey = entry.StudyStorageKey, WorkQueueStatusEnum = WorkQueueStatusEnum.Pending }; WorkQueueProcessDuplicateSop processDuplicateWorkQueueEntry = broker.Insert(columns); IWorkQueueUidEntityBroker workQueueUidBroker = context.GetBroker <IWorkQueueUidEntityBroker>(); IStudyIntegrityQueueUidEntityBroker duplicateUidBroke = context.GetBroker <IStudyIntegrityQueueUidEntityBroker>(); foreach (StudyIntegrityQueueUid uid in uids) { WorkQueueUidUpdateColumns uidColumns = new WorkQueueUidUpdateColumns { Duplicate = true, Extension = ServerPlatform.DuplicateFileExtension, SeriesInstanceUid = uid.SeriesInstanceUid, SopInstanceUid = uid.SopInstanceUid, RelativePath = uid.RelativePath, WorkQueueKey = processDuplicateWorkQueueEntry.GetKey() }; workQueueUidBroker.Insert(uidColumns); duplicateUidBroke.Delete(uid.GetKey()); } IDuplicateSopEntryEntityBroker duplicateEntryBroker = context.GetBroker <IDuplicateSopEntryEntityBroker>(); duplicateEntryBroker.Delete(entry.GetKey()); context.Commit(); } }