private void Apply_Click(object sender, EventArgs e) { var view = dataGridView1.SelectedRows[0].DataBoundItem as DataRowView; if (view != null) { var guid = (Guid) view.Row["GUID"]; IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { var studyBroker = ctx.GetBroker<IStudyEntityBroker>(); var key = new ServerEntityKey("Study", guid); Model.Study study = studyBroker.Load(key); var storageBroker = ctx.GetBroker<IStudyStorageEntityBroker>(); var parms = new StudyStorageSelectCriteria(); parms.ServerPartitionKey.EqualTo(study.ServerPartitionKey); parms.StudyInstanceUid.EqualTo(study.StudyInstanceUid); Model.StudyStorage storage = storageBroker.Find(parms)[0]; var workQueueBroker = ctx.GetBroker<IWorkQueueEntityBroker>(); var columns = new WorkQueueUpdateColumns { ServerPartitionKey = study.ServerPartitionKey, StudyStorageKey = storage.GetKey(), ExpirationTime = DateTime.Now.AddHours(1), ScheduledTime = DateTime.Now, InsertTime = DateTime.Now, WorkQueuePriorityEnum = Model.WorkQueuePriorityEnum.Medium, WorkQueueStatusEnum = Model.WorkQueueStatusEnum.Pending, WorkQueueTypeEnum = Model.WorkQueueTypeEnum.WebEditStudy }; var doc = new XmlDocument(); doc.Load(new StringReader(textBox1.Text)); columns.Data = doc; workQueueBroker.Insert(columns); ctx.Commit(); } } }
private void SaveState(Model.WorkQueue item, ReprocessStudyQueueData queueData) { // Update the queue state using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { queueData.State.ExecuteAtLeastOnce = true; var broker = updateContext.GetBroker<IWorkQueueEntityBroker>(); var parms = new WorkQueueUpdateColumns {Data = XmlUtils.SerializeAsXmlDoc(_queueData)}; broker.Update(item.GetKey(), parms); updateContext.Commit(); } }
public bool MoveStudy(Study study, Device device, IList<Series> seriesList) { if (seriesList != null) { using ( IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ServerPartition partition = ServerPartition.Load(study.ServerPartitionKey); List<string> seriesUids = new List<string>(); foreach (Series series in seriesList) { seriesUids.Add(series.SeriesInstanceUid); } IList<WorkQueue> entries = StudyEditorHelper.MoveSeries(context, partition, study.StudyInstanceUid, device.Key, seriesUids); if(entries != null) context.Commit(); return true; } } WorkQueueAdaptor workqueueAdaptor = new WorkQueueAdaptor(); DateTime time = Platform.Time; WorkQueueUpdateColumns columns = new WorkQueueUpdateColumns { WorkQueueTypeEnum = WorkQueueTypeEnum.WebMoveStudy, WorkQueueStatusEnum = WorkQueueStatusEnum.Pending, ServerPartitionKey = study.ServerPartitionKey, StudyStorageKey = study.StudyStorageKey, FailureCount = 0, DeviceKey = device.Key, ScheduledTime = time, ExpirationTime = time.AddMinutes(4) }; workqueueAdaptor.Add(columns); return true; }
static public WorkQueue Insert(IUpdateContext update, WorkQueue entity) { var broker = update.GetBroker<IWorkQueueEntityBroker>(); var updateColumns = new WorkQueueUpdateColumns(); updateColumns.ServerPartitionKey = entity.ServerPartitionKey; updateColumns.StudyStorageKey = entity.StudyStorageKey; updateColumns.WorkQueueTypeEnum = entity.WorkQueueTypeEnum; updateColumns.WorkQueueStatusEnum = entity.WorkQueueStatusEnum; updateColumns.WorkQueuePriorityEnum = entity.WorkQueuePriorityEnum; updateColumns.FailureCount = entity.FailureCount; updateColumns.ScheduledTime = entity.ScheduledTime; updateColumns.InsertTime = entity.InsertTime; updateColumns.LastUpdatedTime = entity.LastUpdatedTime; updateColumns.FailureDescription = entity.FailureDescription; updateColumns.Data = entity.Data; updateColumns.ExternalRequestQueueKey = entity.ExternalRequestQueueKey; updateColumns.ProcessorID = entity.ProcessorID; updateColumns.GroupID = entity.GroupID; updateColumns.ExpirationTime = entity.ExpirationTime; updateColumns.DeviceKey = entity.DeviceKey; updateColumns.StudyHistoryKey = entity.StudyHistoryKey; WorkQueue newEntity = broker.Insert(updateColumns); return newEntity; }
/// <summary> /// Reschedule a list of <see cref="WorkQueue"/> items /// </summary> /// <param name="items">List of <see cref="WorkQueue"/> items to be rescheduled</param> /// <param name="newScheduledTime">New schedule start date/time</param> /// <param name="expirationTime">New expiration date/time</param> /// <param name="priority">New priority</param> /// <returns>A value indicating whether all <see cref="WorkQueue"/> items in <paramref name="items"/> are updated successfully.</returns> /// <remarks> /// If one or more <see cref="WorkQueue"/> in <paramref name="items"/> cannot be rescheduled, all changes will be /// reverted and <b>false</b> will be returned. /// </remarks> public bool RescheduleWorkQueueItems(IList<WorkQueue> items, DateTime newScheduledTime, DateTime expirationTime, WorkQueuePriorityEnum priority) { if (items == null || items.Count == 0) return false; WorkQueueUpdateColumns updatedColumns = new WorkQueueUpdateColumns(); updatedColumns.WorkQueuePriorityEnum = priority; updatedColumns.ScheduledTime = newScheduledTime; updatedColumns.ExpirationTime = expirationTime; updatedColumns.FailureCount = 0; updatedColumns.FailureDescription = String.Empty; updatedColumns.LastUpdatedTime = Platform.Time; bool result = false; IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IWorkQueueEntityBroker workQueueBroker = ctx.GetBroker<IWorkQueueEntityBroker>(); foreach (WorkQueue item in items) { result = workQueueBroker.Update(item.Key, updatedColumns); if (!result) { break; } } if (result) ctx.Commit(); } return result; }
private void AddWorkQueueData() { WebMoveWorkQueueEntryData data = new WebMoveWorkQueueEntryData { Timestamp = DateTime.Now, UserId = ServerHelper.CurrentUserName }; using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IWorkQueueEntityBroker broker = update.GetBroker<IWorkQueueEntityBroker>(); WorkQueueUpdateColumns cols = new WorkQueueUpdateColumns(); cols.Data = XmlUtils.SerializeAsXmlDoc(data); broker.Update(WorkQueueItem.Key, cols); update.Commit(); } }
private void UpdateQueueData() { using(IUpdateContext ctx = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { // make a copy of the current queue data with updated info ProcessDuplicateQueueEntryQueueData data = new ProcessDuplicateQueueEntryQueueData { Action = _processDuplicateEntry.QueueData.Action, DuplicateSopFolder = _processDuplicateEntry.QueueData.DuplicateSopFolder, UserName = _processDuplicateEntry.QueueData.UserName, State = new ProcessDuplicateQueueState { HistoryLogged = HistoryLogged, ExistingStudyUpdated = _processDuplicateEntry.QueueData.State.ExistingStudyUpdated } }; // update the queue data in db IWorkQueueEntityBroker broker = ctx.GetBroker<IWorkQueueEntityBroker>(); WorkQueueUpdateColumns parameters = new WorkQueueUpdateColumns { Data = XmlUtils.SerializeAsXmlDoc(data) }; if (broker.Update(WorkQueueItem.Key, parameters)) { ctx.Commit(); HistoryLogged = _processDuplicateEntry.QueueData.State.HistoryLogged = true; } } }
/// <summary> /// Migrates the study to new tier /// </summary> /// <param name="storage"></param> /// <param name="newFilesystem"></param> private void DoMigrateStudy(StudyStorageLocation storage, ServerFilesystemInfo newFilesystem) { Platform.CheckForNullReference(storage, "storage"); Platform.CheckForNullReference(newFilesystem, "newFilesystem"); TierMigrationStatistics stat = new TierMigrationStatistics {StudyInstanceUid = storage.StudyInstanceUid}; stat.ProcessSpeed.Start(); StudyXml studyXml = storage.LoadStudyXml(); stat.StudySize = (ulong) studyXml.GetStudySize(); Platform.Log(LogLevel.Info, "About to migrate study {0} from {1} to {2}", storage.StudyInstanceUid, storage.FilesystemTierEnum, newFilesystem.Filesystem.Description); string newPath = Path.Combine(newFilesystem.Filesystem.FilesystemPath, storage.PartitionFolder); DateTime startTime = Platform.Time; DateTime lastLog = Platform.Time; int fileCounter = 0; ulong bytesCopied = 0; long instanceCountInXml = studyXml.NumberOfStudyRelatedInstances; using (ServerCommandProcessor processor = new ServerCommandProcessor("Migrate Study")) { TierMigrationContext context = new TierMigrationContext { OriginalStudyLocation = storage, Destination = newFilesystem }; string origFolder = context.OriginalStudyLocation.GetStudyPath(); processor.AddCommand(new CreateDirectoryCommand(newPath)); newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyFolder); processor.AddCommand(new CreateDirectoryCommand(newPath)); newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyInstanceUid); // don't create this directory so that it won't be backed up by MoveDirectoryCommand CopyDirectoryCommand copyDirCommand = new CopyDirectoryCommand(origFolder, newPath, delegate (string path) { // Update the progress. This is useful if the migration takes long time to complete. FileInfo file = new FileInfo(path); bytesCopied += (ulong)file.Length; fileCounter++; if (file.Extension != null && file.Extension.Equals(ServerPlatform.DicomFileExtension, StringComparison.InvariantCultureIgnoreCase)) { TimeSpan elapsed = Platform.Time - lastLog; TimeSpan totalElapsed = Platform.Time - startTime; double speedInMBPerSecond = 0; if (totalElapsed.TotalSeconds > 0) { speedInMBPerSecond = (bytesCopied / 1024f / 1024f) / totalElapsed.TotalSeconds; } if (elapsed > TimeSpan.FromSeconds(WorkQueueSettings.Instance.TierMigrationProgressUpdateInSeconds)) { #region Log Progress StringBuilder stats = new StringBuilder(); if (instanceCountInXml != 0) { float pct = (float)fileCounter / instanceCountInXml; stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2} ({3:0}% completed). Speed={4:0.00}MB/s", fileCounter, bytesCopied / 1024f / 1024f, startTime, pct * 100, speedInMBPerSecond); } else { stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2}. Speed={3:0.00}MB/s", fileCounter, bytesCopied / 1024f / 1024f, startTime, speedInMBPerSecond); } Platform.Log(LogLevel.Info, "Tier migration for study {0}: {1}", storage.StudyInstanceUid, stats.ToString()); try { using (IUpdateContext ctx = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IWorkQueueEntityBroker broker = ctx.GetBroker<IWorkQueueEntityBroker>(); WorkQueueUpdateColumns parameters = new WorkQueueUpdateColumns {FailureDescription = stats.ToString()}; broker.Update(WorkQueueItem.GetKey(), parameters); ctx.Commit(); } } catch { // can't log the progress so far... just ignore it } finally { lastLog = DateTime.Now; } #endregion } } }); processor.AddCommand(copyDirCommand); DeleteDirectoryCommand delDirCommand = new DeleteDirectoryCommand(origFolder, false) {RequiresRollback = false}; processor.AddCommand(delDirCommand); TierMigrateDatabaseUpdateCommand updateDbCommand = new TierMigrateDatabaseUpdateCommand(context); processor.AddCommand(updateDbCommand); Platform.Log(LogLevel.Info, "Start migrating study {0}.. expecting {1} to be moved", storage.StudyInstanceUid, ByteCountFormatter.Format(stat.StudySize)); if (!processor.Execute()) { if (processor.FailureException != null) throw processor.FailureException; throw new ApplicationException(processor.FailureReason); } stat.DBUpdate = updateDbCommand.Statistics; stat.CopyFiles = copyDirCommand.CopySpeed; stat.DeleteDirTime = delDirCommand.Statistics; } stat.ProcessSpeed.SetData(bytesCopied); stat.ProcessSpeed.End(); Platform.Log(LogLevel.Info, "Successfully migrated study {0} from {1} to {2} in {3} [ {4} files, {5} @ {6}, DB Update={7}, Remove Dir={8}]", storage.StudyInstanceUid, storage.FilesystemTierEnum, newFilesystem.Filesystem.FilesystemTierEnum, TimeSpanFormatter.Format(stat.ProcessSpeed.ElapsedTime), fileCounter, ByteCountFormatter.Format(bytesCopied), stat.CopyFiles.FormattedValue, stat.DBUpdate.FormattedValue, stat.DeleteDirTime.FormattedValue); string originalPath = storage.GetStudyPath(); if (Directory.Exists(storage.GetStudyPath())) { Platform.Log(LogLevel.Info, "Original study folder could not be deleted. It must be cleaned up manually: {0}", originalPath); ServerPlatform.Alert(AlertCategory.Application, AlertLevel.Warning, WorkQueueItem.WorkQueueTypeEnum.ToString(), 1000, GetWorkQueueContextData(WorkQueueItem), TimeSpan.Zero, "Study has been migrated to a new tier. Original study folder must be cleaned up manually: {0}", originalPath); } UpdateAverageStatistics(stat); }
private static void UpdateState(ServerEntityKey key, TierMigrationProcessingState state) { TierMigrationWorkQueueData data = new TierMigrationWorkQueueData {State = state}; using(IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IWorkQueueEntityBroker broker = context.GetBroker<IWorkQueueEntityBroker>(); WorkQueueUpdateColumns parms = new WorkQueueUpdateColumns {Data = XmlUtils.SerializeAsXmlDoc(data)}; if (!broker.Update(key, parms)) throw new ApplicationException("Unable to update work queue state"); context.Commit(); } }
private static void ReconcileStudy(string command,StudyIntegrityQueue item ) { //Ignore the reconcile command if the item is null. if (item == null) return; // Preload the change description so its not done during the DB transaction XmlDocument changeDescription = new XmlDocument(); changeDescription.LoadXml(command); // The Xml in the SIQ item was generated when the images were received and put into the SIQ. // We now add the user info to it so that it will be logged in the history ReconcileStudyWorkQueueData queueData = XmlUtils.Deserialize<ReconcileStudyWorkQueueData>(item.Details); queueData.TimeStamp = Platform.Time; queueData.UserId = ServerHelper.CurrentUserName; using (IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { LockStudyParameters lockParms = new LockStudyParameters { QueueStudyStateEnum = QueueStudyStateEnum.ReconcileScheduled, StudyStorageKey = item.StudyStorageKey }; ILockStudy broker = context.GetBroker<ILockStudy>(); broker.Execute(lockParms); if (!lockParms.Successful) { throw new ApplicationException(lockParms.FailureReason); } //Add to Study History StudyHistoryeAdaptor historyAdaptor = new StudyHistoryeAdaptor(); StudyHistoryUpdateColumns parameters = new StudyHistoryUpdateColumns { StudyData = item.StudyData, ChangeDescription = changeDescription, StudyStorageKey = item.StudyStorageKey, StudyHistoryTypeEnum = StudyHistoryTypeEnum.StudyReconciled }; StudyHistory history = historyAdaptor.Add(context, parameters); //Create WorkQueue Entry WorkQueueAdaptor workQueueAdaptor = new WorkQueueAdaptor(); WorkQueueUpdateColumns row = new WorkQueueUpdateColumns { Data = XmlUtils.SerializeAsXmlDoc(queueData), ServerPartitionKey = item.ServerPartitionKey, StudyStorageKey = item.StudyStorageKey, StudyHistoryKey = history.GetKey(), WorkQueueTypeEnum = WorkQueueTypeEnum.ReconcileStudy, WorkQueueStatusEnum = WorkQueueStatusEnum.Pending, ScheduledTime = Platform.Time, ExpirationTime = Platform.Time.AddHours(1), GroupID = item.GroupID }; WorkQueue newWorkQueueItem = workQueueAdaptor.Add(context, row); StudyIntegrityQueueUidAdaptor studyIntegrityQueueUidAdaptor = new StudyIntegrityQueueUidAdaptor(); StudyIntegrityQueueUidSelectCriteria crit = new StudyIntegrityQueueUidSelectCriteria(); crit.StudyIntegrityQueueKey.EqualTo(item.GetKey()); IList<StudyIntegrityQueueUid> uidList = studyIntegrityQueueUidAdaptor.Get(context, crit); WorkQueueUidAdaptor workQueueUidAdaptor = new WorkQueueUidAdaptor(); WorkQueueUidUpdateColumns update = new WorkQueueUidUpdateColumns(); foreach (StudyIntegrityQueueUid uid in uidList) { update.WorkQueueKey = newWorkQueueItem.GetKey(); update.SeriesInstanceUid = uid.SeriesInstanceUid; update.SopInstanceUid = uid.SopInstanceUid; update.RelativePath = uid.RelativePath; workQueueUidAdaptor.Add(context, update); } //DeleteStudyIntegrityQueue Item StudyIntegrityQueueUidSelectCriteria criteria = new StudyIntegrityQueueUidSelectCriteria(); criteria.StudyIntegrityQueueKey.EqualTo(item.GetKey()); studyIntegrityQueueUidAdaptor.Delete(context, criteria); StudyIntegrityQueueAdaptor studyIntegrityQueueAdaptor = new StudyIntegrityQueueAdaptor(); studyIntegrityQueueAdaptor.Delete(context, item.GetKey()); context.Commit(); } }
/// <summary> /// Insert an EditStudy request. /// </summary> /// <param name="context"></param> /// <param name="studyStorageKey"></param> /// <param name="serverPartitionKey"></param> /// <param name="type"></param> /// <param name="updateItems"></param> /// <param name="reason"></param> /// <param name="user"></param> /// <param name="editType"></param> /// <returns></returns> private static WorkQueue InsertExternalEditStudyRequest(IUpdateContext context, ServerEntityKey studyStorageKey, ServerEntityKey serverPartitionKey, WorkQueueTypeEnum type, List<UpdateItem> updateItems, string reason, string user, EditType editType) { var propertiesBroker = context.GetBroker<IWorkQueueTypePropertiesEntityBroker>(); var criteria = new WorkQueueTypePropertiesSelectCriteria(); criteria.WorkQueueTypeEnum.EqualTo(type); WorkQueueTypeProperties properties = propertiesBroker.FindOne(criteria); var broker = context.GetBroker<IWorkQueueEntityBroker>(); var insert = new WorkQueueUpdateColumns(); DateTime now = Platform.Time; var data = new EditStudyWorkQueueData { EditRequest = { TimeStamp = now, UserId = user, UpdateEntries = updateItems, Reason = reason, EditType = editType } }; insert.WorkQueueTypeEnum = type; insert.StudyStorageKey = studyStorageKey; insert.ServerPartitionKey = serverPartitionKey; insert.ScheduledTime = now; insert.ExpirationTime = now.AddSeconds(properties.ExpireDelaySeconds); insert.WorkQueueStatusEnum = WorkQueueStatusEnum.Pending; insert.WorkQueuePriorityEnum = properties.WorkQueuePriorityEnum; insert.Data = XmlUtils.SerializeAsXmlDoc(data); WorkQueue editEntry = broker.Insert(insert); if (editEntry == null) { throw new ApplicationException(string.Format("Unable to insert an Edit request of type {0} for study for user {1}", type.Description, user)); } return editEntry; }