protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { // Update StudyStatusEnum in the StudyStorageTable IStudyStorageEntityBroker studyStorageUpdate = updateContext.GetBroker<IStudyStorageEntityBroker>(); StudyStorageUpdateColumns studyStorageUpdateColumns = new StudyStorageUpdateColumns(); studyStorageUpdateColumns.StudyStatusEnum = _newStatus; studyStorageUpdate.Update(_location.Key, studyStorageUpdateColumns); // Update ServerTransferSyntaxGUID in FilesystemStudyStorage IFilesystemStudyStorageEntityBroker filesystemUpdate = updateContext.GetBroker<IFilesystemStudyStorageEntityBroker>(); FilesystemStudyStorageUpdateColumns filesystemUpdateColumns = new FilesystemStudyStorageUpdateColumns(); filesystemUpdateColumns.ServerTransferSyntaxKey = _newSyntax.Key; filesystemUpdate.Update(_location.FilesystemStudyStorageKey, filesystemUpdateColumns); }
/// <summary> /// Execute the insert. /// </summary> /// <param name="theProcessor">The command processor calling us</param> /// <param name="updateContext">The persistent store connection to use for the update.</param> protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var locInsert = updateContext.GetBroker<IInsertStudyStorage>(); var insertParms = new InsertStudyStorageParameters { ServerPartitionKey = _serverPartitionKey, StudyInstanceUid = _studyInstanceUid, Folder = _folder, FilesystemKey = _filesystemKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; if (_transfersyntax.LosslessCompressed) { insertParms.TransferSyntaxUid = _transfersyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossless; } else if (_transfersyntax.LossyCompressed) { insertParms.TransferSyntaxUid = _transfersyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossy; } else { insertParms.TransferSyntaxUid = _transfersyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.Online; } // Find one so we don't uselessly process all the results. _location = locInsert.FindOne(insertParms); }
/// <summary> /// Imports the specified set of authority tokens. /// </summary> /// <param name="tokenDefs"></param> /// <param name="addToGroups"></param> /// <param name="context"></param> /// <returns></returns> public IList<AuthorityToken> Import(IEnumerable<AuthorityTokenDefinition> tokenDefs, IList<string> addToGroups, IUpdateContext context) { // first load all the existing tokens into memory // there should not be that many tokens ( < 500), so this should not be a problem var broker = context.GetBroker<IAuthorityTokenBroker>(); var existingTokens = broker.FindAll(); // if there are groups to add to, load the groups var groups = addToGroups != null && addToGroups.Count > 0 ? LoadGroups(addToGroups, context) : new List<AuthorityGroup>(); // order the input such that the renames are processed first // otherwise there may be a corner case where a newly imported token is immediately renamed tokenDefs = tokenDefs.OrderBy(t => t.FormerIdentities.Length > 0); foreach (var tokenDef in tokenDefs) { var token = ProcessToken(tokenDef, existingTokens, context); // add to groups CollectionUtils.ForEach(groups, g => g.AuthorityTokens.Add(token)); } return existingTokens; }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var insert = updateContext.GetBroker<IInsertWorkQueue>(); var parms = new InsertWorkQueueParameters { WorkQueueTypeEnum = WorkQueueTypeEnum.StudyProcess, StudyStorageKey = _storageLocation.GetKey(), ServerPartitionKey = _storageLocation.ServerPartitionKey, SeriesInstanceUid = _message.DataSet[DicomTags.SeriesInstanceUid].GetString(0, String.Empty), SopInstanceUid = _message.DataSet[DicomTags.SopInstanceUid].GetString(0, String.Empty), ScheduledTime = Platform.Time, WorkQueueGroupID = _uidGroupId }; if (_duplicate) { parms.Duplicate = _duplicate; parms.Extension = _extension; parms.UidGroupID = _uidGroupId; } _insertedWorkQueue = insert.FindOne(parms); if (_insertedWorkQueue == null) throw new ApplicationException("UpdateWorkQueueCommand failed"); }
/// <summary> /// Execute the command /// </summary> /// <param name="updateContext">Database update context.</param> /// <param name="theProcessor">The processor executing the command.</param> protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var columns = new ArchiveStudyStorageUpdateColumns { ArchiveTime = Platform.Time, PartitionArchiveKey = _partitionArchiveKey, StudyStorageKey = _studyStorageKey, ArchiveXml = _archiveXml, ServerTransferSyntaxKey = _serverTransferSyntaxKey }; var insertBroker = updateContext.GetBroker<IArchiveStudyStorageEntityBroker>(); ArchiveStudyStorage storage = insertBroker.Insert(columns); var parms = new UpdateArchiveQueueParameters { ArchiveQueueKey = _archiveQueueKey, ArchiveQueueStatusEnum = ArchiveQueueStatusEnum.Completed, ScheduledTime = Platform.Time, StudyStorageKey = _studyStorageKey }; var broker = updateContext.GetBroker<IUpdateArchiveQueue>(); if (!broker.Execute(parms)) throw new ApplicationException("InsertArchiveStudyStorageCommand failed"); }
/// <summary> /// Import authority groups. /// </summary> /// <remarks> /// Creates any authority groups that do not already exist. /// This method performs an additive import. It will never remove an existing authority group or /// remove authority tokens from an existing group. /// </remarks> /// <param name="groupDefs"></param> /// <param name="context"></param> public IList<AuthorityGroup> Import(IEnumerable<AuthorityGroupDefinition> groupDefs, IUpdateContext context) { // first load all the existing tokens into memory // there should not be that many tokens ( < 500), so this should not be a problem IAuthorityTokenBroker tokenBroker = context.GetBroker<IAuthorityTokenBroker>(); IList<AuthorityToken> existingTokens = tokenBroker.FindAll(); // load existing groups IAuthorityGroupBroker groupBroker = context.GetBroker<IAuthorityGroupBroker>(); IList<AuthorityGroup> existingGroups = groupBroker.FindAll(); foreach (AuthorityGroupDefinition groupDef in groupDefs) { AuthorityGroup group = CollectionUtils.SelectFirst(existingGroups, g => g.Name == groupDef.Name); // if group does not exist, create it if (group == null) { group = new AuthorityGroup { Name = groupDef.Name, Description = groupDef.Description, DataGroup = groupDef.DataGroup }; context.Lock(group, DirtyState.New); existingGroups.Add(group); } // process all token nodes contained in group foreach (string tokenName in groupDef.Tokens) { AuthorityToken token = CollectionUtils.SelectFirst(existingTokens, t => t.Name == tokenName); // ignore non-existent tokens if (token == null) continue; // add the token to the group group.AuthorityTokens.Add(token); } } return existingGroups; }
static public StudyDataAccess Insert(IUpdateContext update, StudyDataAccess entity) { var broker = update.GetBroker<IStudyDataAccessEntityBroker>(); var updateColumns = new StudyDataAccessUpdateColumns(); updateColumns.StudyStorageKey = entity.StudyStorageKey; updateColumns.DataAccessGroupKey = entity.DataAccessGroupKey; StudyDataAccess newEntity = broker.Insert(updateColumns); return newEntity; }
static public ServerPartitionDataAccess Insert(IUpdateContext update, ServerPartitionDataAccess entity) { var broker = update.GetBroker<IServerPartitionDataAccessEntityBroker>(); var updateColumns = new ServerPartitionDataAccessUpdateColumns(); updateColumns.ServerPartitionKey = entity.ServerPartitionKey; updateColumns.DataAccessGroupKey = entity.DataAccessGroupKey; ServerPartitionDataAccess newEntity = broker.Insert(updateColumns); return newEntity; }
static public DataAccessGroup Insert(IUpdateContext update, DataAccessGroup entity) { var broker = update.GetBroker<IDataAccessGroupEntityBroker>(); var updateColumns = new DataAccessGroupUpdateColumns(); updateColumns.AuthorityGroupOID = entity.AuthorityGroupOID; updateColumns.Deleted = entity.Deleted; DataAccessGroup newEntity = broker.Insert(updateColumns); return newEntity; }
/// <summary> /// Import external practitioner from CSV format. /// </summary> /// <param name="rows"> /// Each string in the list must contain 4 CSV fields, as follows: /// 0 - Facility ID /// 1 - Facility Name /// 2 - Information Authority ID /// 3 - Information Authoirty Name /// </param> /// <param name="context"></param> public override void Import(List<string> rows, IUpdateContext context) { _enumBroker = context.GetBroker<IEnumBroker>(); _authorities = new List<InformationAuthorityEnum>(_enumBroker.Load<InformationAuthorityEnum>(true)); List<Facility> facilities = new List<Facility>(); foreach (string line in rows) { // expect 4 fields in the row string[] fields = ParseCsv(line, 4); string facilityId = fields[0]; string facilityName = fields[1]; string facilityDescription = fields[2]; string informationAuthorityId = fields[3]; string informationAuthorityName = fields[4]; // first check if we have it in memory Facility facility = CollectionUtils.SelectFirst(facilities, delegate(Facility f) { return f.Code == facilityId && f.Name == facilityName; }); // if not, check the database if (facility == null) { FacilitySearchCriteria where = new FacilitySearchCriteria(); where.Code.EqualTo(facilityId); where.Name.EqualTo(facilityName); IFacilityBroker broker = context.GetBroker<IFacilityBroker>(); facility = CollectionUtils.FirstElement(broker.Find(where)); // if not, create a new instance if (facility == null) { facility = new Facility(facilityId, facilityName, facilityDescription, GetAuthority(informationAuthorityId, informationAuthorityName)); context.Lock(facility, DirtyState.New); } facilities.Add(facility); } } }
static public RequestAttributes Insert(IUpdateContext update, RequestAttributes entity) { var broker = update.GetBroker<IRequestAttributesEntityBroker>(); var updateColumns = new RequestAttributesUpdateColumns(); updateColumns.SeriesKey = entity.SeriesKey; updateColumns.RequestedProcedureId = entity.RequestedProcedureId; updateColumns.ScheduledProcedureStepId = entity.ScheduledProcedureStepId; RequestAttributes newEntity = broker.Insert(updateColumns); return newEntity; }
static public PartitionSopClass Insert(IUpdateContext update, PartitionSopClass entity) { var broker = update.GetBroker<IPartitionSopClassEntityBroker>(); var updateColumns = new PartitionSopClassUpdateColumns(); updateColumns.ServerPartitionKey = entity.ServerPartitionKey; updateColumns.ServerSopClassKey = entity.ServerSopClassKey; updateColumns.Enabled = entity.Enabled; PartitionSopClass newEntity = broker.Insert(updateColumns); return newEntity; }
static public ServerTransferSyntax Insert(IUpdateContext update, ServerTransferSyntax entity) { var broker = update.GetBroker<IServerTransferSyntaxEntityBroker>(); var updateColumns = new ServerTransferSyntaxUpdateColumns(); updateColumns.Uid = entity.Uid; updateColumns.Description = entity.Description; updateColumns.Lossless = entity.Lossless; ServerTransferSyntax newEntity = broker.Insert(updateColumns); return newEntity; }
static public CannedText Insert(IUpdateContext update, CannedText entity) { var broker = update.GetBroker<ICannedTextEntityBroker>(); var updateColumns = new CannedTextUpdateColumns(); updateColumns.Label = entity.Label; updateColumns.Category = entity.Category; updateColumns.Text = entity.Text; CannedText newEntity = broker.Insert(updateColumns); return newEntity; }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var filesystemQueueBroker= updateContext.GetBroker<IFilesystemQueueEntityBroker>(); var criteria = new FilesystemQueueSelectCriteria(); criteria.StudyStorageKey.EqualTo(_storageLocationKey); IList<FilesystemQueue> filesystemQueueItems = filesystemQueueBroker.Find(criteria); var workQueueBroker = updateContext.GetBroker<IWorkQueueEntityBroker>(); var workQueueCriteria = new WorkQueueSelectCriteria(); workQueueCriteria.StudyStorageKey.EqualTo(_storageLocationKey); workQueueCriteria.WorkQueueTypeEnum.In(new[] { WorkQueueTypeEnum.PurgeStudy, WorkQueueTypeEnum.DeleteStudy, WorkQueueTypeEnum.CompressStudy, WorkQueueTypeEnum.MigrateStudy }); IList<WorkQueue> workQueueItems = workQueueBroker.Find(workQueueCriteria); foreach (FilesystemQueue queue in filesystemQueueItems) { bool delete = false; if (_applyTime.Equals(ServerRuleApplyTimeEnum.StudyArchived)) { if (queue.FilesystemQueueTypeEnum.Equals(FilesystemQueueTypeEnum.PurgeStudy)) delete = true; } else { delete = true; } if (delete) { if (!filesystemQueueBroker.Delete(queue.GetKey())) throw new ApplicationException("Unable to delete items in the filesystem queue"); } } if (!_applyTime.Equals(ServerRuleApplyTimeEnum.StudyArchived)) { // delete work queue foreach (Model.WorkQueue item in workQueueItems) { if (!item.Delete(updateContext)) throw new ApplicationException("Unable to delete items in the work queue"); } } }
static public ServerSopClass Insert(IUpdateContext update, ServerSopClass entity) { var broker = update.GetBroker<IServerSopClassEntityBroker>(); var updateColumns = new ServerSopClassUpdateColumns(); updateColumns.SopClassUid = entity.SopClassUid; updateColumns.Description = entity.Description; updateColumns.NonImage = entity.NonImage; ServerSopClass newEntity = broker.Insert(updateColumns); return newEntity; }
static public DevicePreferredTransferSyntax Insert(IUpdateContext update, DevicePreferredTransferSyntax entity) { var broker = update.GetBroker<IDevicePreferredTransferSyntaxEntityBroker>(); var updateColumns = new DevicePreferredTransferSyntaxUpdateColumns(); updateColumns.DeviceKey = entity.DeviceKey; updateColumns.ServerSopClassKey = entity.ServerSopClassKey; updateColumns.ServerTransferSyntaxKey = entity.ServerTransferSyntaxKey; DevicePreferredTransferSyntax newEntity = broker.Insert(updateColumns); return newEntity; }
static public PartitionTransferSyntax Insert(IUpdateContext update, PartitionTransferSyntax entity) { var broker = update.GetBroker<IPartitionTransferSyntaxEntityBroker>(); var updateColumns = new PartitionTransferSyntaxUpdateColumns(); updateColumns.ServerPartitionKey = entity.ServerPartitionKey; updateColumns.ServerTransferSyntaxKey = entity.ServerTransferSyntaxKey; updateColumns.Enabled = entity.Enabled; PartitionTransferSyntax newEntity = broker.Insert(updateColumns); return newEntity; }
/// <summary> /// Do the insertion of the AutoRoute. /// </summary> protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { DeviceSelectCriteria deviceSelectCriteria = new DeviceSelectCriteria(); deviceSelectCriteria.AeTitle.EqualTo(_deviceAe); deviceSelectCriteria.ServerPartitionKey.EqualTo(_context.ServerPartitionKey); IDeviceEntityBroker selectDevice = updateContext.GetBroker<IDeviceEntityBroker>(); Device dev = selectDevice.FindOne(deviceSelectCriteria); if (dev == null) { Platform.Log(LogLevel.Warn, "Device '{0}' on partition {1} not in database for autoroute request! Ignoring request.", _deviceAe, _context.ServerPartition.AeTitle); ServerPlatform.Alert( AlertCategory.Application, AlertLevel.Warning, SR.AlertComponentAutorouteRule, AlertTypeCodes.UnableToProcess, null, TimeSpan.FromMinutes(5), SR.AlertAutoRouteUnknownDestination, _deviceAe, _context.ServerPartition.AeTitle); return; } if (!dev.AllowAutoRoute) { Platform.Log(LogLevel.Warn, "Auto-route attempted to device {0} on partition {1} with autoroute support disabled. Ignoring request.", dev.AeTitle, _context.ServerPartition.AeTitle); ServerPlatform.Alert(AlertCategory.Application, AlertLevel.Warning, SR.AlertComponentAutorouteRule, AlertTypeCodes.UnableToProcess, null, TimeSpan.FromMinutes(5), SR.AlertAutoRouteDestinationAEDisabled, dev.AeTitle, _context.ServerPartition.AeTitle); return; } InsertWorkQueueParameters parms = new InsertWorkQueueParameters { WorkQueueTypeEnum = WorkQueueTypeEnum.AutoRoute, ScheduledTime = _scheduledTime.HasValue ? _scheduledTime.Value : Platform.Time.AddSeconds(10), StudyStorageKey = _context.StudyLocationKey, ServerPartitionKey = _context.ServerPartitionKey, DeviceKey = dev.GetKey(), SeriesInstanceUid = _context.Message.DataSet[DicomTags.SeriesInstanceUid].GetString(0, string.Empty), SopInstanceUid = _context.Message.DataSet[DicomTags.SopInstanceUid].GetString(0, string.Empty) }; IInsertWorkQueue broker = updateContext.GetBroker<IInsertWorkQueue>(); if (broker.FindOne(parms)==null) { throw new ApplicationException("InsertAutoRouteCommand failed"); } }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { // update FilesystemStudyStorage if (Context != null) { Platform.Log(LogLevel.Info, "Updating database..."); IFilesystemStudyStorageEntityBroker broker = updateContext.GetBroker<IFilesystemStudyStorageEntityBroker>(); FilesystemStudyStorageSelectCriteria searchCriteria = new FilesystemStudyStorageSelectCriteria(); searchCriteria.StudyStorageKey.EqualTo(Context.OriginalStudyLocation.GetKey()); searchCriteria.FilesystemKey.EqualTo(Context.OriginalStudyLocation.FilesystemKey); FilesystemStudyStorage filesystemStudyStorage = broker.FindOne(searchCriteria); Debug.Assert(filesystemStudyStorage != null); // Update Filesystem for the StudyStorage entry filesystemStudyStorage.FilesystemKey = Context.Destination.Filesystem.GetKey(); broker.Update(filesystemStudyStorage); // Update Filesystem for the remaining FilesystemQueue entries IFilesystemQueueEntityBroker fsQueueBroker = updateContext.GetBroker<IFilesystemQueueEntityBroker>(); FilesystemQueueSelectCriteria fsQueueSearchCriteria = new FilesystemQueueSelectCriteria(); fsQueueSearchCriteria.StudyStorageKey.EqualTo(Context.OriginalStudyLocation.GetKey()); fsQueueSearchCriteria.FilesystemKey.EqualTo(Context.OriginalStudyLocation.FilesystemKey); FilesystemQueueUpdateColumns fsQueueUpdateColumns = new FilesystemQueueUpdateColumns(); fsQueueUpdateColumns.FilesystemKey = Context.Destination.Filesystem.GetKey(); fsQueueBroker.Update(fsQueueSearchCriteria, fsQueueUpdateColumns); // Insert or update Filesystem Queue table. IInsertFilesystemQueue insertFilesystemQueueBroker = updateContext.GetBroker<IInsertFilesystemQueue>(); FilesystemQueueInsertParameters parms = new FilesystemQueueInsertParameters(); parms.FilesystemKey = Context.Destination.Filesystem.GetKey(); parms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.TierMigrate; parms.ScheduledTime = Platform.Time; parms.StudyStorageKey = Context.OriginalStudyLocation.GetKey(); insertFilesystemQueueBroker.Execute(parms); Platform.Log(LogLevel.Info, "Database is updated."); } }
static public DatabaseVersion Insert(IUpdateContext update, DatabaseVersion entity) { var broker = update.GetBroker<IDatabaseVersionEntityBroker>(); var updateColumns = new DatabaseVersionUpdateColumns(); updateColumns.Major = entity.Major; updateColumns.Minor = entity.Minor; updateColumns.Build = entity.Build; updateColumns.Revision = entity.Revision; DatabaseVersion newEntity = broker.Insert(updateColumns); return newEntity; }
static public FilesystemStudyStorage Insert(IUpdateContext update, FilesystemStudyStorage entity) { var broker = update.GetBroker<IFilesystemStudyStorageEntityBroker>(); var updateColumns = new FilesystemStudyStorageUpdateColumns(); updateColumns.StudyStorageKey = entity.StudyStorageKey; updateColumns.FilesystemKey = entity.FilesystemKey; updateColumns.ServerTransferSyntaxKey = entity.ServerTransferSyntaxKey; updateColumns.StudyFolder = entity.StudyFolder; FilesystemStudyStorage newEntity = broker.Insert(updateColumns); return newEntity; }
/// <summary> /// Do the insertion of the AutoRoute. /// </summary> protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var criteria = new DataAccessGroupSelectCriteria(); criteria.AuthorityGroupOID.EqualTo(new ServerEntityKey("AuthorityGroupOID",_authorityGroupOid)); var authorityGroup = updateContext.GetBroker<IDataAccessGroupEntityBroker>(); DataAccessGroup group = authorityGroup.FindOne(criteria); if (group == null) { Platform.Log(LogLevel.Warn, "AuthorityGroupOID '{0}' on partition {1} not in database for GrantAccess request! Ignoring request.", _authorityGroupOid, _context.ServerPartition.AeTitle); ServerPlatform.Alert( AlertCategory.Application, AlertLevel.Warning, SR.AlertComponentDataAccessRule, AlertTypeCodes.UnableToProcess, null, TimeSpan.FromMinutes(5), SR.AlertDataAccessUnknownAuthorityGroup, _authorityGroupOid, _context.ServerPartition.AeTitle); return; } var entityBroker = updateContext.GetBroker<IStudyDataAccessEntityBroker>(); var selectStudyDataAccess = new StudyDataAccessSelectCriteria(); selectStudyDataAccess.DataAccessGroupKey.EqualTo(group.Key); selectStudyDataAccess.StudyStorageKey.EqualTo(_context.StudyLocationKey); if (entityBroker.Count(selectStudyDataAccess) == 0) { var insertColumns = new StudyDataAccessUpdateColumns { DataAccessGroupKey = group.Key, StudyStorageKey = _context.StudyLocationKey }; entityBroker.Insert(insertColumns); } }
public void Archive(IUpdateContext context) { var insertArchiveQueueBroker = context.GetBroker<IInsertArchiveQueue>(); var parms = new InsertArchiveQueueParameters { ServerPartitionKey = ServerPartitionKey, StudyStorageKey = Key }; if (!insertArchiveQueueBroker.Execute(parms)) { throw new ApplicationException("Unable to schedule study archive"); } }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { IStudyHistoryEntityBroker historyUpdateBroker = updateContext.GetBroker<IStudyHistoryEntityBroker>(); StudyHistoryUpdateColumns parms = new StudyHistoryUpdateColumns {DestStudyStorageKey = _destStudy.Key}; if (_map != null) { // replace the mapping in the history StudyReconcileDescriptor changeLog = XmlUtils.Deserialize<StudyReconcileDescriptor>(_studyHistory.ChangeDescription); changeLog.SeriesMappings = new System.Collections.Generic.List<SeriesMapping>(_map.GetSeriesMappings()); parms.ChangeDescription = XmlUtils.SerializeAsXmlDoc(changeLog); } historyUpdateBroker.Update(_studyHistory.Key, parms); }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var deleteInstanceBroker = updateContext.GetBroker<IDeleteInstance>(); var parameters = new DeleteInstanceParameters { StudyStorageKey = _studyLocation.GetKey(), SeriesInstanceUid = _seriesInstanceUid, SOPInstanceUid = _sopInstanceUid }; if (!deleteInstanceBroker.Execute(parameters)) { throw new ApplicationException("Unable to update instance count in db"); } }
protected override void OnExecute(ServerCommandProcessor theProcessor, IUpdateContext updateContext) { Study study = _location.Study ?? Study.Find(updateContext, _location.Key); if (study.StudySizeInKB != _studySizeInKB) { IStudyEntityBroker broker = updateContext.GetBroker<IStudyEntityBroker>(); StudyUpdateColumns parameters = new StudyUpdateColumns() { StudySizeInKB = _studySizeInKB }; if (!broker.Update(study.Key, parameters)) throw new ApplicationException("Unable to update study size in the database"); } }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { // Setup the insert parameters var parms = new InsertArchiveQueueParameters { ServerPartitionKey = _serverPartitionKey, StudyStorageKey = _studyStorageKey }; // Get the Insert ArchiveQueue broker and do the insert var insert = updateContext.GetBroker<IInsertArchiveQueue>(); // Do the insert if (!insert.Execute(parms)) throw new ApplicationException("InsertArchiveQueueCommand failed"); }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var broker = updateContext.GetBroker<IInsertDuplicateSopReceivedQueue>(); var parms = new InsertDuplicateSopReceivedQueueParameters { GroupID = _groupId, ServerPartitionKey = _studyLocation.ServerPartitionKey, StudyStorageKey = _studyLocation.Key, StudyInstanceUid = _file.DataSet[DicomTags.StudyInstanceUid].ToString(), SeriesDescription = _file.DataSet[DicomTags.SeriesDescription].ToString(), SeriesInstanceUid = _file.DataSet[DicomTags.SeriesInstanceUid].ToString(), SopInstanceUid = _file.MediaStorageSopInstanceUid }; ReconcileStudyQueueDescription queueDesc = CreateQueueEntryDescription(_file); parms.Description = queueDesc != null ? queueDesc.ToString() : String.Empty; var queueData = new DuplicateSIQQueueData { StoragePath = _duplicateStoragePath, Details = new ImageSetDetails(_file.DataSet), TimeStamp = Platform.Time }; if (_reasons != null && _reasons.Count>0) { queueData.ComparisonResults = _reasons; } var imageSet = new ImageSetDescriptor(_file.DataSet); parms.StudyData = XmlUtils.SerializeAsXmlDoc(imageSet); parms.Details = XmlUtils.SerializeAsXmlDoc(queueData); parms.UidRelativePath = _relativePath; IList<DuplicateSopReceivedQueue> entries = broker.Find(parms); Platform.CheckForNullReference(entries, "entries"); Platform.CheckTrue(entries.Count == 1, "entries.Count==1"); DuplicateSopReceivedQueue queueEntry = entries[0]; var data = XmlUtils.Deserialize<DuplicateSIQQueueData>(queueEntry.Details); data.Details.InsertFile(_file); queueEntry.Details = XmlUtils.SerializeAsXmlDoc(data); var siqBroker = updateContext.GetBroker<IStudyIntegrityQueueEntityBroker>(); if (!siqBroker.Update(queueEntry)) throw new ApplicationException("Unable to update duplicate queue entry"); }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { if (_rebuildCommand != null) _studySizeInKB = _rebuildCommand.StudyXml.GetStudySize() / KB; Study study = _location.Study ?? Study.Find(updateContext, _location.Key); if (study != null && study.StudySizeInKB != _studySizeInKB) { var broker = updateContext.GetBroker<IStudyEntityBroker>(); var parameters = new StudyUpdateColumns { StudySizeInKB = _studySizeInKB }; if (!broker.Update(study.Key, parameters)) throw new ApplicationException("Unable to update study size in the database"); } }
/// <summary> /// Lookup the device entity in the database corresponding to the remote AE of the association. /// </summary> /// <param name="partition">The partition to look up the devices</param> /// <param name="association">The association</param> /// <param name="isNew">Indicates whether the device returned is created by the call.</param> /// <returns>The device record corresponding to the called AE of the association</returns> static public Device LookupDevice(ServerPartition partition, AssociationParameters association, out bool isNew) { isNew = false; Device device = null; using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { var queryDevice = updateContext.GetBroker <IDeviceEntityBroker>(); // Setup the select parameters. var queryParameters = new DeviceSelectCriteria(); queryParameters.AeTitle.EqualTo(association.CallingAE); queryParameters.ServerPartitionKey.EqualTo(partition.GetKey()); var devices = queryDevice.Find(queryParameters); foreach (var d in devices) { if (string.Compare(d.AeTitle, association.CallingAE, false, CultureInfo.InvariantCulture) == 0) { device = d; break; } } if (device == null) { if (!partition.AcceptAnyDevice) { return(null); } if (partition.AutoInsertDevice) { // Auto-insert a new entry in the table. var updateColumns = new DeviceUpdateColumns { AeTitle = association.CallingAE, Enabled = true, Description = String.Format("AE: {0}", association.CallingAE), Dhcp = false, IpAddress = association.RemoteEndPoint.Address.ToString(), ServerPartitionKey = partition.GetKey(), Port = partition.DefaultRemotePort, AllowQuery = true, AllowRetrieve = true, AllowStorage = true, ThrottleMaxConnections = ImageServerCommonConfiguration.Device.MaxConnections, DeviceTypeEnum = DeviceTypeEnum.Workstation }; var insert = updateContext.GetBroker <IDeviceEntityBroker>(); device = insert.Insert(updateColumns); updateContext.Commit(); isNew = true; } } if (device != null) { // For DHCP devices, we always update the remote ip address, if its changed from what is in the DB. if (device.Dhcp && !association.RemoteEndPoint.Address.ToString().Equals(device.IpAddress)) { var updateColumns = new DeviceUpdateColumns { IpAddress = association.RemoteEndPoint.Address.ToString(), LastAccessedTime = Platform.Time }; var update = updateContext.GetBroker <IDeviceEntityBroker>(); if (!update.Update(device.GetKey(), updateColumns)) { Platform.Log(LogLevel.Error, "Unable to update IP Address for DHCP device {0} on partition '{1}'", device.AeTitle, partition.Description); } else { updateContext.Commit(); } } else if (!isNew) { var updateColumns = new DeviceUpdateColumns { LastAccessedTime = Platform.Time }; var update = updateContext.GetBroker <IDeviceEntityBroker>(); if (!update.Update(device.GetKey(), updateColumns)) { Platform.Log(LogLevel.Error, "Unable to update LastAccessedTime device {0} on partition '{1}'", device.AeTitle, partition.Description); } else { updateContext.Commit(); } } } } return(device); }
/// <summary> /// Updates the 'State' of the filesystem associated with the 'FilesystemDelete' <see cref="ServiceLock"/> item /// </summary> /// <param name="item"></param> /// <param name="fs"></param> private static void UpdateState(Model.ServiceLock item, ServerFilesystemInfo fs) { FilesystemState state = null; if (item.State != null && item.State.DocumentElement != null) { //load from datatabase state = XmlUtils.Deserialize <FilesystemState>(item.State.DocumentElement); } if (state == null) { state = new FilesystemState(); } if (fs.AboveHighWatermark) { // we don't want to generate alert if the filesystem is offline or not accessible. if (fs.Online && (fs.Readable || fs.Writeable)) { TimeSpan ALERT_INTERVAL = TimeSpan.FromMinutes(ServiceLockSettings.Default.HighWatermarkAlertInterval); if (state.AboveHighWatermarkTimestamp == null) { state.AboveHighWatermarkTimestamp = Platform.Time; } TimeSpan elapse = (state.LastHighWatermarkAlertTimestamp != null) ? Platform.Time - state.LastHighWatermarkAlertTimestamp.Value : Platform.Time - state.AboveHighWatermarkTimestamp.Value; if (elapse.Duration() >= ALERT_INTERVAL) { ServerPlatform.Alert(AlertCategory.System, AlertLevel.Warning, "Filesystem", AlertTypeCodes.LowResources, null, TimeSpan.Zero, SR.AlertFilesystemAboveHW, fs.Filesystem.Description, TimeSpanFormatter.Format(Platform.Time - state.AboveHighWatermarkTimestamp.Value, true)); state.LastHighWatermarkAlertTimestamp = Platform.Time; } } else { state.AboveHighWatermarkTimestamp = null; state.LastHighWatermarkAlertTimestamp = null; } } else { state.AboveHighWatermarkTimestamp = null; state.LastHighWatermarkAlertTimestamp = null; } XmlDocument stateXml = new XmlDocument(); stateXml.AppendChild(stateXml.ImportNode(XmlUtils.Serialize(state), true)); IPersistentStore store = PersistentStoreRegistry.GetDefaultStore(); using (IUpdateContext ctx = store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { ServiceLockUpdateColumns columns = new ServiceLockUpdateColumns(); columns.State = stateXml; IServiceLockEntityBroker broker = ctx.GetBroker <IServiceLockEntityBroker>(); broker.Update(item.GetKey(), columns); ctx.Commit(); } }
/// <summary> /// Process StudyPurge <see cref="FilesystemQueue"/> entries. /// </summary> /// <param name="candidateList">The list of candidates for purging</param> private void ProcessStudyPurgeCandidates(IList <FilesystemQueue> candidateList) { if (candidateList.Count > 0) { Platform.Log(LogLevel.Debug, "Scheduling purge study for {0} eligible studies...", candidateList.Count); } FilesystemProcessStatistics summaryStats = new FilesystemProcessStatistics("FilesystemPurgeInsert"); foreach (FilesystemQueue queueItem in candidateList) { if (_bytesToRemove < 0 || CancelPending) { break; } StudyProcessStatistics stats = new StudyProcessStatistics("PurgeStudy"); stats.TotalTime.Start(); stats.StudyStorageTime.Start(); // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } stats.StudyStorageTime.End(); stats.CalculateDirectorySizeTime.Start(); // Get the disk usage float studySize = EstimateFolderSizeFromStudyXml(location); stats.CalculateDirectorySizeTime.End(); stats.DirectorySize = (ulong)studySize; stats.DbUpdateTime.Start(); // Update the DB using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.PurgeScheduled; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Study Purge, skipping study ({0}", location.StudyInstanceUid); continue; } IInsertWorkQueueFromFilesystemQueue insertBroker = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters(); insertParms.StudyStorageKey = location.GetKey(); insertParms.ServerPartitionKey = location.ServerPartitionKey; insertParms.ScheduledTime = _scheduledTime; insertParms.DeleteFilesystemQueue = true; insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.PurgeStudy; insertParms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.PurgeStudy; WorkQueue insertItem = insertBroker.FindOne(insertParms); if (insertItem == null) { Platform.Log(LogLevel.Error, "Unexpected problem inserting 'PurgeStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); } else { update.Commit(); _bytesToRemove -= studySize; _studiesPurged++; _scheduledTime = _scheduledTime.AddSeconds(2); } } stats.DbUpdateTime.End(); stats.TotalTime.End(); summaryStats.AddSubStats(stats); StatisticsLogger.Log(LogLevel.Debug, stats); } summaryStats.CalculateAverage(); StatisticsLogger.Log(LogLevel.Info, false, summaryStats); }
/// <summary> /// Process study migration candidates retrieved from the <see cref="Model.FilesystemQueue"/> table /// </summary> /// <param name="candidateList">The list of candidate studies for deleting.</param> private void ProcessStudyMigrateCandidates(IList <FilesystemQueue> candidateList) { Platform.CheckForNullReference(candidateList, "candidateList"); if (candidateList.Count > 0) { Platform.Log(LogLevel.Debug, "Scheduling tier-migration for {0} eligible studies...", candidateList.Count); } FilesystemProcessStatistics summaryStats = new FilesystemProcessStatistics("FilesystemTierMigrateInsert"); foreach (FilesystemQueue queueItem in candidateList) { if (_bytesToRemove < 0 || CancelPending) { Platform.Log(LogLevel.Debug, "Estimated disk space has been reached."); break; } StudyProcessStatistics stats = new StudyProcessStatistics("TierMigrateStudy"); stats.TotalTime.Start(); stats.StudyStorageTime.Start(); // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } stats.StudyStorageTime.End(); stats.CalculateDirectorySizeTime.Start(); // Get the disk usage float studySize = EstimateFolderSizeFromStudyXml(location); stats.CalculateDirectorySizeTime.End(); stats.DirectorySize = (ulong)studySize; stats.DbUpdateTime.Start(); using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters { StudyStorageKey = location.Key, QueueStudyStateEnum = QueueStudyStateEnum.MigrationScheduled }; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Tier Migration. Reason:{0}. Skipping study ({1})", lockParms.FailureReason, location.StudyInstanceUid); continue; } IInsertWorkQueueFromFilesystemQueue broker = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters { StudyStorageKey = location.GetKey(), ServerPartitionKey = location.ServerPartitionKey, ScheduledTime = _scheduledTime, DeleteFilesystemQueue = true, WorkQueueTypeEnum = WorkQueueTypeEnum.MigrateStudy, FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.TierMigrate }; Platform.Log(LogLevel.Debug, "Scheduling tier-migration for study {0} from {1} at {2}...", location.StudyInstanceUid, location.FilesystemTierEnum, _scheduledTime); WorkQueue insertItem = broker.FindOne(insertParms); if (insertItem == null) { Platform.Log(LogLevel.Error, "Unexpected problem inserting 'MigrateStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); } else { update.Commit(); _bytesToRemove -= studySize; _studiesMigrated++; // spread out the scheduled migration entries based on the size // assuming that the larger the study the longer it will take to migrate // The assumed migration speed is arbitarily chosen. double migrationSpeed = ServiceLockSettings.Default.TierMigrationSpeed * 1024 * 1024; // MB / sec TimeSpan estMigrateTime = TimeSpan.FromSeconds(studySize / migrationSpeed); _scheduledTime = _scheduledTime.Add(estMigrateTime); } } stats.DbUpdateTime.End(); stats.TotalTime.End(); summaryStats.AddSubStats(stats); StatisticsLogger.Log(LogLevel.Debug, stats); } summaryStats.CalculateAverage(); StatisticsLogger.Log(LogLevel.Info, false, summaryStats); }
/// <summary> /// Inserts work queue entry to process the duplicates. /// </summary> /// <param name="entryKey"><see cref="ServerEntityKey"/> of the <see cref="StudyIntegrityQueue"/> entry that has <see cref="StudyIntegrityReasonEnum"/> equal to <see cref="StudyIntegrityReasonEnum.Duplicate"/> </param> /// <param name="action"></param> public void Process(ServerEntityKey entryKey, ProcessDuplicateAction action) { DuplicateSopReceivedQueue entry = DuplicateSopReceivedQueue.Load(HttpContext.Current.GetSharedPersistentContext(), entryKey); Platform.CheckTrue(entry.StudyIntegrityReasonEnum == StudyIntegrityReasonEnum.Duplicate, "Invalid type of entry"); IList <StudyIntegrityQueueUid> uids = LoadDuplicateSopUid(entry); using (IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ProcessDuplicateQueueEntryQueueData data = new ProcessDuplicateQueueEntryQueueData { Action = action, DuplicateSopFolder = entry.GetFolderPath(context), UserName = ServerHelper.CurrentUserName, }; LockStudyParameters lockParms = new LockStudyParameters { QueueStudyStateEnum = QueueStudyStateEnum.ReconcileScheduled, StudyStorageKey = entry.StudyStorageKey }; ILockStudy lockBbroker = context.GetBroker <ILockStudy>(); lockBbroker.Execute(lockParms); if (!lockParms.Successful) { throw new ApplicationException(lockParms.FailureReason); } IWorkQueueProcessDuplicateSopBroker broker = context.GetBroker <IWorkQueueProcessDuplicateSopBroker>(); WorkQueueProcessDuplicateSopUpdateColumns columns = new WorkQueueProcessDuplicateSopUpdateColumns { Data = XmlUtils.SerializeAsXmlDoc(data), GroupID = entry.GroupID, ScheduledTime = Platform.Time, ExpirationTime = Platform.Time.Add(TimeSpan.FromMinutes(15)), ServerPartitionKey = entry.ServerPartitionKey, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Medium, StudyStorageKey = entry.StudyStorageKey, WorkQueueStatusEnum = WorkQueueStatusEnum.Pending }; WorkQueueProcessDuplicateSop processDuplicateWorkQueueEntry = broker.Insert(columns); IWorkQueueUidEntityBroker workQueueUidBroker = context.GetBroker <IWorkQueueUidEntityBroker>(); IStudyIntegrityQueueUidEntityBroker duplicateUidBroke = context.GetBroker <IStudyIntegrityQueueUidEntityBroker>(); foreach (StudyIntegrityQueueUid uid in uids) { WorkQueueUidUpdateColumns uidColumns = new WorkQueueUidUpdateColumns { Duplicate = true, Extension = ServerPlatform.DuplicateFileExtension, SeriesInstanceUid = uid.SeriesInstanceUid, SopInstanceUid = uid.SopInstanceUid, RelativePath = uid.RelativePath, WorkQueueKey = processDuplicateWorkQueueEntry.GetKey() }; workQueueUidBroker.Insert(uidColumns); duplicateUidBroke.Delete(uid.GetKey()); } IDuplicateSopEntryEntityBroker duplicateEntryBroker = context.GetBroker <IDuplicateSopEntryEntityBroker>(); duplicateEntryBroker.Delete(entry.GetKey()); context.Commit(); } }
/// <summary> /// Archive the specified <see cref="ArchiveQueue"/> item. /// </summary> /// <param name="queueItem">The ArchiveQueue item to archive.</param> public void Run(ArchiveQueue queueItem) { using (ArchiveProcessorContext executionContext = new ArchiveProcessorContext(queueItem)) { try { if (!GetStudyStorageLocation(queueItem)) { Platform.Log(LogLevel.Error, "Unable to find readable study storage location for archival queue request {0}. Delaying request.", queueItem.Key); queueItem.FailureDescription = "Unable to find readable study storage location for archival queue request."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } // First, check to see if we can lock the study, if not just reschedule the queue entry. if (!_storageLocation.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle)) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is currently locked, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study is currently locked, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } StudyIntegrityValidator validator = new StudyIntegrityValidator(); validator.ValidateStudyState("Archive", _storageLocation, StudyIntegrityValidationModes.Default); using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.ArchiveScheduled }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to lock, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study failed to lock, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } update.Commit(); } string studyFolder = _storageLocation.GetStudyPath(); string studyXmlFile = _storageLocation.GetStudyXmlPath(); // Load the study Xml file, this is used to generate the list of dicom files to archive. LoadStudyXml(studyXmlFile); DicomFile file = LoadFileFromStudyXml(); string patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, string.Empty); string patientId = file.DataSet[DicomTags.PatientId].GetString(0, string.Empty); string accessionNumber = file.DataSet[DicomTags.AccessionNumber].GetString(0, string.Empty); Platform.Log(LogLevel.Info, "Starting archival of study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} on archive {5}", _storageLocation.StudyInstanceUid, patientsName, patientId, accessionNumber, _hsmArchive.ServerPartition.Description, _hsmArchive.PartitionArchive.Description); // Use the command processor to do the archival. using (ServerCommandProcessor commandProcessor = new ServerCommandProcessor("Archive")) { _archiveXml = new XmlDocument(); // Create the study date folder string zipFilename = Path.Combine(_hsmArchive.HsmPath, _storageLocation.StudyFolder); commandProcessor.AddCommand(new CreateDirectoryCommand(zipFilename)); // Create a folder for the study zipFilename = Path.Combine(zipFilename, _storageLocation.StudyInstanceUid); commandProcessor.AddCommand(new CreateDirectoryCommand(zipFilename)); // Save the archive data in the study folder, based on a filename with a date / time stamp string filename = String.Format("{0}.zip", Platform.Time.ToString("yyyy-MM-dd-HHmm")); zipFilename = Path.Combine(zipFilename, filename); // Create the Xml data to store in the ArchiveStudyStorage table telling // where the archived study is located. XmlElement hsmArchiveElement = _archiveXml.CreateElement("HsmArchive"); _archiveXml.AppendChild(hsmArchiveElement); XmlElement studyFolderElement = _archiveXml.CreateElement("StudyFolder"); hsmArchiveElement.AppendChild(studyFolderElement); studyFolderElement.InnerText = _storageLocation.StudyFolder; XmlElement filenameElement = _archiveXml.CreateElement("Filename"); hsmArchiveElement.AppendChild(filenameElement); filenameElement.InnerText = filename; XmlElement studyInstanceUidElement = _archiveXml.CreateElement("Uid"); hsmArchiveElement.AppendChild(studyInstanceUidElement); studyInstanceUidElement.InnerText = _storageLocation.StudyInstanceUid; // Create the Zip file commandProcessor.AddCommand( new CreateStudyZipCommand(zipFilename, _studyXml, studyFolder, executionContext.TempDirectory)); // Update the database. commandProcessor.AddCommand(new InsertArchiveStudyStorageCommand(queueItem.StudyStorageKey, queueItem.PartitionArchiveKey, queueItem.GetKey(), _storageLocation.ServerTransferSyntaxKey, _archiveXml)); StudyRulesEngine studyEngine = new StudyRulesEngine(_storageLocation, _hsmArchive.ServerPartition, _studyXml); studyEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); if (!commandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure archiving study ({0}) to archive {1}: {2}, zip filename: {3}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, commandProcessor.FailureReason, zipFilename); queueItem.FailureDescription = commandProcessor.FailureReason; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } else { Platform.Log(LogLevel.Info, "Successfully archived study {0} on {1} to zip {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, zipFilename); } // Log the current FilesystemQueue settings _storageLocation.LogFilesystemQueue(); } } catch (StudyIntegrityValidationFailure ex) { StringBuilder error = new StringBuilder(); error.AppendLine(String.Format("Partition : {0}", ex.ValidationStudyInfo.ServerAE)); error.AppendLine(String.Format("Patient : {0}", ex.ValidationStudyInfo.PatientsName)); error.AppendLine(String.Format("Study Uid : {0}", ex.ValidationStudyInfo.StudyInstaneUid)); error.AppendLine(String.Format("Accession# : {0}", ex.ValidationStudyInfo.AccessionNumber)); error.AppendLine(String.Format("Study Date : {0}", ex.ValidationStudyInfo.StudyDate)); queueItem.FailureDescription = error.ToString(); _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } catch (Exception e) { String msg = String.Format("Unexpected exception archiving study: {0} on {1}: {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, e.Message); Platform.Log(LogLevel.Error, e, msg); queueItem.FailureDescription = msg; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } finally { // Unlock the Queue Entry using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is failed to unlock.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); } } } }
public void RestoreNearlineStudy(RestoreQueue queueItem, string zipFile, string studyFolder) { ServerFilesystemInfo fs = _hsmArchive.Selector.SelectFilesystem(); if (fs == null) { DateTime scheduleTime = Platform.Time.AddMinutes(5); Platform.Log(LogLevel.Error, "No writeable filesystem for restore, rescheduling restore request to {0}", scheduleTime); queueItem.FailureDescription = "No writeable filesystem for restore, rescheduling restore request"; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Pending, scheduleTime); return; } string destinationFolder = Path.Combine(fs.Filesystem.FilesystemPath, _hsmArchive.ServerPartition.PartitionFolder); StudyStorageLocation restoredLocation = null; try { using (var processor = new ServerCommandProcessor("HSM Restore Offline Study")) { processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); destinationFolder = Path.Combine(destinationFolder, studyFolder); processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); destinationFolder = Path.Combine(destinationFolder, _studyStorage.StudyInstanceUid); processor.AddCommand(new CreateDirectoryCommand(destinationFolder)); processor.AddCommand(new ExtractZipCommand(zipFile, destinationFolder)); // We rebuild the StudyXml, in case any settings or issues have happened since archival processor.AddCommand(new RebuildStudyXmlCommand(_studyStorage.StudyInstanceUid, destinationFolder)); // Apply the rules engine. var context = new ServerActionContext(null, fs.Filesystem.GetKey(), _hsmArchive.ServerPartition, queueItem.StudyStorageKey) { CommandProcessor = processor }; processor.AddCommand( new ApplyRulesCommand(destinationFolder, _studyStorage.StudyInstanceUid, context)); // Do the actual insert into the DB var insertStorageCommand = new InsertFilesystemStudyStorageCommand( _hsmArchive.PartitionArchive.ServerPartitionKey, _studyStorage.StudyInstanceUid, studyFolder, fs.Filesystem.GetKey(), _syntax); processor.AddCommand(insertStorageCommand); if (!processor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = processor.FailureReason; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } else { restoredLocation = insertStorageCommand.Location; // Unlock the Queue Entry using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { bool retVal = _hsmArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60)); var studyLock = update.GetBroker <ILockStudy>(); var parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; retVal = retVal && studyLock.Execute(parms); if (!parms.Successful || !retVal) { string message = String.Format("Study {0} on partition {1} failed to unlock.", _studyStorage.StudyInstanceUid, _hsmArchive.ServerPartition.Description); Platform.Log(LogLevel.Info, message); throw new ApplicationException(message); } update.Commit(); Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); OnStudyRestored(restoredLocation); } } } } catch (StudyIntegrityValidationFailure ex) { Debug.Assert(restoredLocation != null); // study has been restored but it seems corrupted. Need to reprocess it. ReprocessStudy(restoredLocation, ex.Message); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}", _studyStorage.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } }
protected override void ImportCore(IEnumerable <IImportItem> items) { using (PersistenceScope scope = new PersistenceScope(PersistenceContextType.Update)) { IUpdateContext context = (IUpdateContext)PersistenceScope.CurrentContext; context.ChangeSetRecorder.OperationName = this.GetType().FullName; IMetadataBroker metaBroker = context.GetBroker <IMetadataBroker>(); IEnumBroker enumBroker = context.GetBroker <IEnumBroker>(); IList <Type> enumClasses = metaBroker.ListEnumValueClasses(); foreach (IImportItem item in items) { EnumerationData data = (EnumerationData)Read(item.Read(), typeof(EnumerationData)); // find the enum class Type enumClass = CollectionUtils.SelectFirst(enumClasses, delegate(Type ec) { return(ec.FullName == data.EnumerationClass); }); if (enumClass == null) { Platform.Log(LogLevel.Error, string.Format("{0} is not a valid enumeration class name.", data.EnumerationClass)); continue; } IList <EnumValue> existingValues = enumBroker.Load(enumClass, true); foreach (EnumerationMemberData md in data.Members) { // check if a conflicting value exists // (this can happen if there is existing data in the db with the same value but different code) EnumValue conflict = CollectionUtils.SelectFirst(existingValues, delegate(EnumValue v) { return(v.Code != md.Code && v.Value == md.Value); }); if (conflict != null) { Platform.Log(LogLevel.Error, string.Format("{0} value {1} conflicts with existing value {2} and will not be imported.", data.EnumerationClass, md.Code, conflict.Code)); continue; } // check if the value already exists EnumValue value = CollectionUtils.SelectFirst(existingValues, delegate(EnumValue v) { return(v.Code == md.Code); }); if (value == null) { // value does not exist - add it value = enumBroker.AddValue(enumClass, md.Code, md.Value, md.Description, md.DisplayOrder, md.Deactivated); existingValues.Add(value); } else { // value exists - update it enumBroker.UpdateValue(enumClass, md.Code, md.Value, md.Description, md.DisplayOrder, md.Deactivated); } } context.SynchState(); } scope.Complete(); } }
private static void ReconcileStudy(string command, StudyIntegrityQueue item) { //Ignore the reconcile command if the item is null. if (item == null) { return; } // Preload the change description so its not done during the DB transaction XmlDocument changeDescription = new XmlDocument(); changeDescription.LoadXml(command); // The Xml in the SIQ item was generated when the images were received and put into the SIQ. // We now add the user info to it so that it will be logged in the history ReconcileStudyWorkQueueData queueData = XmlUtils.Deserialize <ReconcileStudyWorkQueueData>(item.Details); queueData.TimeStamp = Platform.Time; queueData.UserId = ServerHelper.CurrentUserName; using (IUpdateContext context = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { LockStudyParameters lockParms = new LockStudyParameters { QueueStudyStateEnum = QueueStudyStateEnum.ReconcileScheduled, StudyStorageKey = item.StudyStorageKey }; ILockStudy broker = context.GetBroker <ILockStudy>(); broker.Execute(lockParms); if (!lockParms.Successful) { throw new ApplicationException(lockParms.FailureReason); } //Add to Study History StudyHistoryeAdaptor historyAdaptor = new StudyHistoryeAdaptor(); StudyHistoryUpdateColumns parameters = new StudyHistoryUpdateColumns { StudyData = item.StudyData, ChangeDescription = changeDescription, StudyStorageKey = item.StudyStorageKey, StudyHistoryTypeEnum = StudyHistoryTypeEnum.StudyReconciled }; StudyHistory history = historyAdaptor.Add(context, parameters); //Create WorkQueue Entry WorkQueueAdaptor workQueueAdaptor = new WorkQueueAdaptor(); WorkQueueUpdateColumns row = new WorkQueueUpdateColumns { Data = XmlUtils.SerializeAsXmlDoc(queueData), ServerPartitionKey = item.ServerPartitionKey, StudyStorageKey = item.StudyStorageKey, StudyHistoryKey = history.GetKey(), WorkQueueTypeEnum = WorkQueueTypeEnum.ReconcileStudy, WorkQueueStatusEnum = WorkQueueStatusEnum.Pending, ScheduledTime = Platform.Time, ExpirationTime = Platform.Time.AddHours(1), GroupID = item.GroupID }; WorkQueue newWorkQueueItem = workQueueAdaptor.Add(context, row); StudyIntegrityQueueUidAdaptor studyIntegrityQueueUidAdaptor = new StudyIntegrityQueueUidAdaptor(); StudyIntegrityQueueUidSelectCriteria crit = new StudyIntegrityQueueUidSelectCriteria(); crit.StudyIntegrityQueueKey.EqualTo(item.GetKey()); IList <StudyIntegrityQueueUid> uidList = studyIntegrityQueueUidAdaptor.Get(context, crit); WorkQueueUidAdaptor workQueueUidAdaptor = new WorkQueueUidAdaptor(); WorkQueueUidUpdateColumns update = new WorkQueueUidUpdateColumns(); foreach (StudyIntegrityQueueUid uid in uidList) { update.WorkQueueKey = newWorkQueueItem.GetKey(); update.SeriesInstanceUid = uid.SeriesInstanceUid; update.SopInstanceUid = uid.SopInstanceUid; update.RelativePath = uid.RelativePath; workQueueUidAdaptor.Add(context, update); } //DeleteStudyIntegrityQueue Item StudyIntegrityQueueUidSelectCriteria criteria = new StudyIntegrityQueueUidSelectCriteria(); criteria.StudyIntegrityQueueKey.EqualTo(item.GetKey()); studyIntegrityQueueUidAdaptor.Delete(context, criteria); StudyIntegrityQueueAdaptor studyIntegrityQueueAdaptor = new StudyIntegrityQueueAdaptor(); studyIntegrityQueueAdaptor.Delete(context, item.GetKey()); context.Commit(); } }
private bool ArchiveLogs(ServerFilesystemInfo archiveFs) { string archivePath = Path.Combine(archiveFs.Filesystem.FilesystemPath, "AlertLog"); DateTime cutOffTime = Platform.Time.Date.AddDays(ServiceLockSettings.Default.AlertCachedDays * -1); AlertSelectCriteria criteria = new AlertSelectCriteria(); criteria.InsertTime.LessThan(cutOffTime); criteria.InsertTime.SortAsc(0); using (ServerExecutionContext context = new ServerExecutionContext()) { IAlertEntityBroker broker = context.ReadContext.GetBroker <IAlertEntityBroker>(); ImageServerLogWriter <Alert> writer = new ImageServerLogWriter <Alert>(archivePath, "Alert"); List <ServerEntityKey> keyList = new List <ServerEntityKey>(500); try { broker.Find(criteria, delegate(Alert result) { keyList.Add(result.Key); // If configured, don't flush to disk. We just delete the contents of keyList below. if (!ServiceLockSettings.Default.AlertDelete) { if (writer.WriteLog(result, result.InsertTime)) { // The logs been flushed, delete the log entries cached. using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush) ) { IApplicationLogEntityBroker updateBroker = update.GetBroker <IApplicationLogEntityBroker>(); foreach (ServerEntityKey key in keyList) { updateBroker.Delete(key); } update.Commit(); } keyList = new List <ServerEntityKey>(); } } }); writer.FlushLog(); if (keyList.Count > 0) { using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IAlertEntityBroker updateBroker = update.GetBroker <IAlertEntityBroker>(); foreach (ServerEntityKey key in keyList) { updateBroker.Delete(key); } update.Commit(); } } } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when purging Alert log files."); writer.Dispose(); return(false); } writer.Dispose(); return(true); } }
public bool UpdateStudyAuthorityGroups(string studyInstanceUid, string accessionNumber, ServerEntityKey studyStorageKey, IList <string> assignedGroupOids) { List <AuthorityGroupDetail> nonAddedAuthorityGroups; Dictionary <ServerEntityKey, AuthorityGroupDetail> dic = LoadAuthorityGroups(out nonAddedAuthorityGroups); IList <AuthorityGroupStudyAccessInfo> assignedList = ListDataAccessGroupsForStudy(dic, studyStorageKey); List <string> groupList = new List <string>(); foreach (AuthorityGroupStudyAccessInfo group in assignedList) { bool found = false; foreach (var oid in assignedGroupOids) { if (group.AuthorityOID.Equals(oid)) { found = true; break; } } if (!found) { using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IStudyDataAccessEntityBroker broker = update.GetBroker <IStudyDataAccessEntityBroker>(); broker.Delete(group.StudyDataAccess.Key); update.Commit(); } groupList.Add(group.Description); } } if (groupList.Count > 0) { ServerAuditHelper.RemoveAuthorityGroupAccess(studyInstanceUid, accessionNumber, groupList); groupList.Clear(); } foreach (var oid in assignedGroupOids) { bool found = false; foreach (AuthorityGroupStudyAccessInfo group in assignedList) { if (group.AuthorityOID.Equals(oid)) { found = true; break; } } if (!found) { DataAccessGroup accessGroup = AddDataAccessIfNotExists(oid); using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { StudyDataAccessUpdateColumns insertColumns = new StudyDataAccessUpdateColumns { DataAccessGroupKey = accessGroup.Key, StudyStorageKey = studyStorageKey }; IStudyDataAccessEntityBroker insert = updateContext.GetBroker <IStudyDataAccessEntityBroker>(); insert.Insert(insertColumns); updateContext.Commit(); } foreach (AuthorityGroupDetail group in nonAddedAuthorityGroups) { if (group.AuthorityGroupRef.ToString(false, false).Equals(accessGroup.AuthorityGroupOID.Key.ToString())) { groupList.Add(group.Name); } } } } if (groupList.Count > 0) { ServerAuditHelper.AddAuthorityGroupAccess(studyInstanceUid, accessionNumber, groupList); } return(true); }
public void OnStudyDeleted() { if (!Enabled) { return; } if (_context.WorkQueueItem.WorkQueueTypeEnum == WorkQueueTypeEnum.WebDeleteStudy) { Study study = _context.Study; if (study == null) { Platform.Log(LogLevel.Info, "Not logging Study Delete information due to missing Study record for study: {0} on partition {1}", _context.StorageLocation.StudyInstanceUid, _context.ServerPartition.AeTitle); return; } StudyStorageLocation storage = _context.StorageLocation; using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { // Setup the parameters IStudyDeleteRecordEntityBroker broker = updateContext.GetBroker <IStudyDeleteRecordEntityBroker>(); StudyDeleteRecordUpdateColumns parms = new StudyDeleteRecordUpdateColumns(); parms.Timestamp = Platform.Time; WebDeleteStudyLevelQueueData extendedInfo = XmlUtils.Deserialize <WebDeleteStudyLevelQueueData>(_context.WorkQueueItem.Data); parms.Reason = extendedInfo != null? extendedInfo.Reason:_context.WorkQueueItem.WorkQueueTypeEnum.LongDescription; parms.ServerPartitionAE = _context.ServerPartition.AeTitle; parms.FilesystemKey = storage.FilesystemKey; parms.AccessionNumber = study.AccessionNumber; parms.PatientId = study.PatientId; parms.PatientsName = study.PatientsName; parms.StudyInstanceUid = study.StudyInstanceUid; parms.StudyDate = study.StudyDate; parms.StudyDescription = study.StudyDescription; parms.StudyTime = study.StudyTime; parms.BackupPath = BackupZipFileRelativePath; if (_archives != null && _archives.Count > 0) { parms.ArchiveInfo = XmlUtils.SerializeAsXmlDoc(_archives); } StudyDeleteExtendedInfo extInfo = new StudyDeleteExtendedInfo(); extInfo.ServerInstanceId = ServerPlatform.ServerInstanceId; extInfo.UserId = _context.UserId; extInfo.UserName = _context.UserName; parms.ExtendedInfo = XmlUtils.SerializeAsString(extInfo); StudyDeleteRecord deleteRecord = broker.Insert(parms); if (deleteRecord == null) { Platform.Log(LogLevel.Error, "Unexpected error when trying to create study delete record: {0} on partition {1}", study.StudyInstanceUid, _context.ServerPartition.Description); } else { updateContext.Commit(); } } } }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var delete = updateContext.GetBroker <IWorkQueueUidEntityBroker>(); delete.Delete(_uid.GetKey()); }
/// <summary> /// Process StudyCompress Candidates retrieved from the <see cref="Model.FilesystemQueue"/> table /// </summary> /// <param name="candidateList">The list of candidate studies for deleting.</param> /// <param name="type">The type of compress candidate (lossy or lossless)</param> private void ProcessCompressCandidates(IEnumerable <FilesystemQueue> candidateList, FilesystemQueueTypeEnum type) { using (ServerExecutionContext context = new ServerExecutionContext()) { DateTime scheduledTime = Platform.Time.AddSeconds(10); foreach (FilesystemQueue queueItem in candidateList) { // Check for Shutdown/Cancel if (CancelPending) { break; } // First, get the StudyStorage locations for the study, and calculate the disk usage. StudyStorageLocation location; if (!FilesystemMonitor.Instance.GetWritableStudyStorageLocation(queueItem.StudyStorageKey, out location)) { continue; } StudyXml studyXml; try { studyXml = LoadStudyXml(location); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress candidate, unexpected exception loading StudyXml file for {0}", location.GetStudyPath()); continue; } using ( IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy lockstudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.CompressScheduled; if (!lockstudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Warn, "Unable to lock study for inserting Lossless Compress. Reason:{0}. Skipping study ({1})", lockParms.FailureReason, location.StudyInstanceUid); continue; } scheduledTime = scheduledTime.AddSeconds(3); IInsertWorkQueueFromFilesystemQueue workQueueInsert = update.GetBroker <IInsertWorkQueueFromFilesystemQueue>(); InsertWorkQueueFromFilesystemQueueParameters insertParms = new InsertWorkQueueFromFilesystemQueueParameters(); insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; insertParms.FilesystemQueueTypeEnum = FilesystemQueueTypeEnum.LosslessCompress; insertParms.StudyStorageKey = location.GetKey(); insertParms.ServerPartitionKey = location.ServerPartitionKey; DateTime expirationTime = scheduledTime; insertParms.ScheduledTime = expirationTime; insertParms.DeleteFilesystemQueue = true; insertParms.Data = queueItem.QueueXml; insertParms.FilesystemQueueTypeEnum = type; insertParms.WorkQueueTypeEnum = WorkQueueTypeEnum.CompressStudy; try { WorkQueue entry = workQueueInsert.FindOne(insertParms); InsertWorkQueueUidFromStudyXml(studyXml, update, entry.GetKey()); update.Commit(); _studiesInserted++; } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Skipping compress record, unexpected problem inserting 'CompressStudy' record into WorkQueue for Study {0}", location.StudyInstanceUid); // throw; -- would cause abort of inserts, go ahead and try everything } } } } }
/// <summary> /// Method for getting next <see cref="WorkQueue"/> entry. /// </summary> /// <param name="processorId">The Id of the processor.</param> /// <remarks> /// </remarks> /// <returns> /// A <see cref="WorkQueue"/> entry if found, or else null; /// </returns> public Model.WorkQueue GetWorkQueueItem(string processorId) { Model.WorkQueue queueListItem = null; // First check for Stat WorkQueue items. if (_threadPool.MemoryLimitedThreadsAvailable) { using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Stat }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // If we don't have the max high priority threads in use, // first see if there's any available if (queueListItem == null && _threadPool.HighPriorityThreadsAvailable) { using ( IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.High }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // If we didn't find a high priority work queue item, and we have threads // available for memory limited work queue items, query for the next queue item available. if (queueListItem == null && _threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // This logic only accessed if memory limited and priority threads are used up if (queueListItem == null && !_threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, WorkQueuePriorityEnum = WorkQueuePriorityEnum.Stat, MemoryLimited = true }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } // This logic only accessed if memory limited and priority threads are used up if (queueListItem == null && !_threadPool.MemoryLimitedThreadsAvailable) { using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryWorkQueue select = updateContext.GetBroker <IQueryWorkQueue>(); WorkQueueQueryParameters parms = new WorkQueueQueryParameters { ProcessorID = processorId, MemoryLimited = true }; queueListItem = select.FindOne(parms); if (queueListItem != null) { updateContext.Commit(); } } } return(queueListItem); }
private void RestoreOnlineStudy(RestoreQueue queueItem, string zipFile, string destinationFolder) { try { using (var processor = new ServerCommandProcessor("HSM Restore Online Study")) { var zipService = Platform.GetService <IZipService>(); using (var zipWriter = zipService.OpenWrite(zipFile)) { foreach (string file in zipWriter.EntryFileNames) { processor.AddCommand(new ExtractZipFileAndReplaceCommand(zipFile, file, destinationFolder)); } } // We rebuild the StudyXml, in case any settings or issues have happened since archival processor.AddCommand(new RebuildStudyXmlCommand(_location.StudyInstanceUid, destinationFolder)); StudyStatusEnum status; if (_syntax.Encapsulated && _syntax.LosslessCompressed) { status = StudyStatusEnum.OnlineLossless; } else if (_syntax.Encapsulated && _syntax.LossyCompressed) { status = StudyStatusEnum.OnlineLossy; } else { status = StudyStatusEnum.Online; } processor.AddCommand(new UpdateStudyStateCommand(_location, status, _serverSyntax)); // Apply the rules engine. var context = new ServerActionContext(null, _location.FilesystemKey, _hsmArchive.ServerPartition, queueItem.StudyStorageKey) { CommandProcessor = processor }; processor.AddCommand( new ApplyRulesCommand(destinationFolder, _location.StudyInstanceUid, context)); if (!processor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected error processing restore request for {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = processor.FailureReason; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } else { // Unlock the Queue Entry and set to complete using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { _hsmArchive.UpdateRestoreQueue(update, queueItem, RestoreQueueStatusEnum.Completed, Platform.Time.AddSeconds(60)); var studyLock = update.GetBroker <ILockStudy>(); var parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to unlock.", _location.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); Platform.Log(LogLevel.Info, "Successfully restored study: {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); _location = ReloadStorageLocation(); OnStudyRestored(_location); } } } } catch (StudyIntegrityValidationFailure ex) { // study has been restored but it seems corrupted. Need to reprocess it. ReprocessStudy(_location, ex.Message); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception processing restore request for {0} on archive {1}", _location.StudyInstanceUid, _hsmArchive.PartitionArchive.Description); queueItem.FailureDescription = e.Message; _hsmArchive.UpdateRestoreQueue(queueItem, RestoreQueueStatusEnum.Failed, Platform.Time); } }
public bool Delete(IUpdateContext context, ServerEntityKey key) { TIEntity update = context.GetBroker <TIEntity>(); return(update.Delete(key)); }
/// <summary> /// Inserts a <see cref="WorkQueue"/> request to reprocess the study /// </summary> /// <param name="ctx"></param> /// <param name="reason"></param> /// <param name="location"></param> /// <param name="additionalPaths"></param> /// <param name="scheduleTime"></param> /// <returns></returns> /// <exception cref="InvalidStudyStateOperationException">Study is in a state that reprocessing is not allowed</exception> /// public WorkQueue ReprocessStudy(IUpdateContext ctx, String reason, StudyStorageLocation location, List <FilesystemDynamicPath> additionalPaths, DateTime scheduleTime) { Platform.CheckForNullReference(location, "location"); if (location.StudyStatusEnum.Equals(StudyStatusEnum.OnlineLossy)) { if (location.IsLatestArchiveLossless) { string message = String.Format("Study has been archived as lossless and is currently lossy. It must be restored first"); throw new InvalidStudyStateOperationException(message); } } Study study = location.LoadStudy(ctx); // Unlock first. ILockStudy lockStudy = ctx.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters(); lockParms.StudyStorageKey = location.Key; lockParms.QueueStudyStateEnum = QueueStudyStateEnum.Idle; if (!lockStudy.Execute(lockParms) || !lockParms.Successful) { // Note: according to the stored proc, setting study state to Idle always succeeds so // this will never happen return(null); } // Now relock into ReprocessScheduled state. If another process locks the study before this occurs, // lockParms.QueueStudyStateEnum = QueueStudyStateEnum.ReprocessScheduled; if (!lockStudy.Execute(lockParms) || !lockParms.Successful) { throw new InvalidStudyStateOperationException(lockParms.FailureReason); } InsertWorkQueueParameters columns = new InsertWorkQueueParameters(); columns.ScheduledTime = scheduleTime; columns.ServerPartitionKey = location.ServerPartitionKey; columns.StudyStorageKey = location.Key; columns.WorkQueueTypeEnum = WorkQueueTypeEnum.ReprocessStudy; ReprocessStudyQueueData queueData = new ReprocessStudyQueueData(); queueData.State = new ReprocessStudyState(); queueData.State.ExecuteAtLeastOnce = false; queueData.ChangeLog = new ReprocessStudyChangeLog(); queueData.ChangeLog.Reason = reason; queueData.ChangeLog.TimeStamp = Platform.Time; queueData.ChangeLog.User = (Thread.CurrentPrincipal is CustomPrincipal) ? (Thread.CurrentPrincipal as CustomPrincipal).Identity.Name : String.Empty; if (additionalPaths != null) { queueData.AdditionalFiles = additionalPaths.ConvertAll <string>(path => path.ToString()); } columns.WorkQueueData = XmlUtils.SerializeAsXmlDoc(queueData); IInsertWorkQueue insertBroker = ctx.GetBroker <IInsertWorkQueue>(); WorkQueue reprocessEntry = insertBroker.FindOne(columns); if (reprocessEntry != null) { if (study != null) { Platform.Log(LogLevel.Info, "Study Reprocess Scheduled for Study {0}, A#: {1}, Patient: {2}, ID={3}", study.StudyInstanceUid, study.AccessionNumber, study.PatientsName, study.PatientId); } else { Platform.Log(LogLevel.Info, "Study Reprocess Scheduled for Study {0}.", location.StudyInstanceUid); } } return(reprocessEntry); }
protected override void Import(WorklistData data, IUpdateContext context) { var worklist = LoadOrCreateWorklist(data.Name, data.Class, context); worklist.Description = data.Description; if (data.StaffSubscribers != null) { foreach (var s in data.StaffSubscribers) { var criteria = new StaffSearchCriteria(); criteria.Id.EqualTo(s.StaffId); var staff = context.GetBroker <IStaffBroker>().Find(criteria); if (staff.Count == 1) { worklist.StaffSubscribers.Add(CollectionUtils.FirstElement(staff)); } } } if (data.GroupSubscribers != null) { foreach (var s in data.GroupSubscribers) { var criteria = new StaffGroupSearchCriteria(); criteria.Name.EqualTo(s.StaffGroupName); var groups = context.GetBroker <IStaffGroupBroker>().Find(criteria); if (groups.Count == 1) { worklist.GroupSubscribers.Add(CollectionUtils.FirstElement(groups)); } } } // proc type filter ImportFilter( worklist.ProcedureTypeFilter, data.Filters.ProcedureTypes, delegate(WorklistData.ProcedureTypeData s) { var criteria = new ProcedureTypeSearchCriteria(); criteria.Id.EqualTo(s.Id); var broker = context.GetBroker <IProcedureTypeBroker>(); return(CollectionUtils.FirstElement(broker.Find(criteria))); }); // proc type group filter ImportFilter( worklist.ProcedureTypeGroupFilter, data.Filters.ProcedureTypeGroups, delegate(WorklistData.ProcedureTypeGroupData s) { var criteria = new ProcedureTypeGroupSearchCriteria(); criteria.Name.EqualTo(s.Name); var broker = context.GetBroker <IProcedureTypeGroupBroker>(); return(CollectionUtils.FirstElement(broker.Find(criteria, ProcedureTypeGroup.GetSubClass(s.Class, context)))); }); //Bug #2284: don't forget to set the IncludeWorkingFacility property // facility filter worklist.FacilityFilter.IncludeWorkingFacility = data.Filters.Facilities.IncludeWorkingFacility; ImportFilter( worklist.FacilityFilter, data.Filters.Facilities, delegate(WorklistData.EnumValueData s) { var criteria = new FacilitySearchCriteria(); criteria.Code.EqualTo(s.Code); var broker = context.GetBroker <IFacilityBroker>(); return(CollectionUtils.FirstElement(broker.Find(criteria))); }); // department filter ImportFilter( worklist.DepartmentFilter, data.Filters.Departments, delegate(WorklistData.DepartmentData s) { var criteria = new DepartmentSearchCriteria(); criteria.Id.EqualTo(s.Id); var broker = context.GetBroker <IDepartmentBroker>(); return(CollectionUtils.FirstElement(broker.Find(criteria))); }); // priority filter ImportFilter( worklist.OrderPriorityFilter, data.Filters.OrderPriorities, delegate(WorklistData.EnumValueData s) { var broker = context.GetBroker <IEnumBroker>(); return(broker.Find <OrderPriorityEnum>(s.Code)); }); // ordering prac filter ImportFilter( worklist.OrderingPractitionerFilter, data.Filters.OrderingPractitioners, delegate(WorklistData.PractitionerData s) { var criteria = new ExternalPractitionerSearchCriteria(); criteria.Name.FamilyName.EqualTo(s.FamilyName); criteria.Name.GivenName.EqualTo(s.GivenName); // these criteria may not be provided (the data may not existed when exported), // but if available, they help to ensure the correct practitioner is being mapped if (!string.IsNullOrEmpty(s.BillingNumber)) { criteria.BillingNumber.EqualTo(s.BillingNumber); } if (!string.IsNullOrEmpty(s.LicenseNumber)) { criteria.LicenseNumber.EqualTo(s.LicenseNumber); } var broker = context.GetBroker <IExternalPractitionerBroker>(); return(CollectionUtils.FirstElement(broker.Find(criteria))); }); // patient class filter ImportFilter( worklist.PatientClassFilter, data.Filters.PatientClasses, delegate(WorklistData.EnumValueData s) { var broker = context.GetBroker <IEnumBroker>(); return(broker.Find <PatientClassEnum>(s.Code)); }); // patient location filter ImportFilter( worklist.PatientLocationFilter, data.Filters.PatientLocations, delegate(WorklistData.LocationData s) { var criteria = new LocationSearchCriteria(); criteria.Id.EqualTo(s.Id); var broker = context.GetBroker <ILocationBroker>(); return(CollectionUtils.FirstElement(broker.Find(criteria))); }); // portable filter worklist.PortableFilter.IsEnabled = data.Filters.Portable.Enabled; worklist.PortableFilter.Value = data.Filters.Portable.Value; //Bug #2429: don't forget to include the time filter // time filter worklist.TimeFilter.IsEnabled = data.Filters.TimeWindow.Enabled; worklist.TimeFilter.Value = data.Filters.TimeWindow == null || data.Filters.TimeWindow.Value == null ? null : data.Filters.TimeWindow.Value.CreateTimeRange(); // reporting filters if (Worklist.GetSupportsReportingStaffRoleFilter(worklist.GetClass())) { ImportReportingWorklistFilters(data, worklist.As <ReportingWorklist>(), context); } }
/// <summary> /// Simple routine for failing a work queue item. /// </summary> /// <param name="item">The item to fail.</param> /// <param name="failureDescription">The reason for the failure.</param> private void FailQueueItem(Model.WorkQueue item, string failureDescription) { // Must retry to reset the status of the entry in case of db error // Failure to do so will create stale work queue entry (stuck in "In Progress" state) // which can only be recovered by restarting the service. while (true) { try { WorkQueueTypeProperties prop = _propertiesDictionary[item.WorkQueueTypeEnum]; using (IUpdateContext updateContext = _store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IUpdateWorkQueue update = updateContext.GetBroker <IUpdateWorkQueue>(); UpdateWorkQueueParameters parms = new UpdateWorkQueueParameters { ProcessorID = ServerPlatform.ProcessorId, WorkQueueKey = item.GetKey(), StudyStorageKey = item.StudyStorageKey, FailureCount = item.FailureCount + 1, FailureDescription = failureDescription }; var settings = WorkQueueSettings.Instance; if ((item.FailureCount + 1) > prop.MaxFailureCount) { Platform.Log(LogLevel.Error, "Failing {0} WorkQueue entry ({1}), reached max retry count of {2}. Failure Reason: {3}", item.WorkQueueTypeEnum, item.GetKey(), item.FailureCount + 1, failureDescription); parms.WorkQueueStatusEnum = WorkQueueStatusEnum.Failed; parms.ScheduledTime = Platform.Time; parms.ExpirationTime = Platform.Time.AddDays(1); OnWorkQueueEntryFailed(item, failureDescription); } else { Platform.Log(LogLevel.Error, "Resetting {0} WorkQueue entry ({1}) to Pending, current retry count {2}. Failure Reason: {3}", item.WorkQueueTypeEnum, item.GetKey(), item.FailureCount + 1, failureDescription); parms.WorkQueueStatusEnum = WorkQueueStatusEnum.Pending; parms.ScheduledTime = Platform.Time.AddMilliseconds(settings.WorkQueueQueryDelay); parms.ExpirationTime = Platform.Time.AddSeconds((prop.MaxFailureCount - item.FailureCount) * prop.FailureDelaySeconds); } if (false == update.Execute(parms)) { Platform.Log(LogLevel.Error, "Unable to update {0} WorkQueue GUID: {1}", item.WorkQueueTypeEnum, item.GetKey().ToString()); } else { updateContext.Commit(); break; // done } } } catch (Exception ex) { Platform.Log(LogLevel.Error, "Error occurred when calling FailQueueItem. Retry later. {0}", ex.Message); _terminateEvent.WaitOne(2000, false); if (_stop) { Platform.Log(LogLevel.Warn, "Service is stopping. Retry to fail the entry is terminated."); break; } } } }
/// <summary> /// Migrates the study to new tier /// </summary> /// <param name="storage"></param> /// <param name="newFilesystem"></param> private void DoMigrateStudy(StudyStorageLocation storage, ServerFilesystemInfo newFilesystem) { Platform.CheckForNullReference(storage, "storage"); Platform.CheckForNullReference(newFilesystem, "newFilesystem"); TierMigrationStatistics stat = new TierMigrationStatistics { StudyInstanceUid = storage.StudyInstanceUid }; stat.ProcessSpeed.Start(); StudyXml studyXml = storage.LoadStudyXml(); stat.StudySize = (ulong)studyXml.GetStudySize(); Platform.Log(LogLevel.Info, "About to migrate study {0} from {1} to {2}", storage.StudyInstanceUid, storage.FilesystemTierEnum, newFilesystem.Filesystem.Description); string newPath = Path.Combine(newFilesystem.Filesystem.FilesystemPath, storage.PartitionFolder); DateTime startTime = Platform.Time; DateTime lastLog = Platform.Time; int fileCounter = 0; ulong bytesCopied = 0; long instanceCountInXml = studyXml.NumberOfStudyRelatedInstances; using (ServerCommandProcessor processor = new ServerCommandProcessor("Migrate Study")) { TierMigrationContext context = new TierMigrationContext { OriginalStudyLocation = storage, Destination = newFilesystem }; string origFolder = context.OriginalStudyLocation.GetStudyPath(); processor.AddCommand(new CreateDirectoryCommand(newPath)); newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyFolder); processor.AddCommand(new CreateDirectoryCommand(newPath)); newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyInstanceUid); // don't create this directory so that it won't be backed up by MoveDirectoryCommand CopyDirectoryCommand copyDirCommand = new CopyDirectoryCommand(origFolder, newPath, delegate(string path) { // Update the progress. This is useful if the migration takes long time to complete. FileInfo file = new FileInfo(path); bytesCopied += (ulong)file.Length; fileCounter++; if (file.Extension != null && file.Extension.Equals(ServerPlatform.DicomFileExtension, StringComparison.InvariantCultureIgnoreCase)) { TimeSpan elapsed = Platform.Time - lastLog; TimeSpan totalElapsed = Platform.Time - startTime; double speedInMBPerSecond = 0; if (totalElapsed.TotalSeconds > 0) { speedInMBPerSecond = (bytesCopied / 1024f / 1024f) / totalElapsed.TotalSeconds; } if (elapsed > TimeSpan.FromSeconds(WorkQueueSettings.Instance.TierMigrationProgressUpdateInSeconds)) { #region Log Progress StringBuilder stats = new StringBuilder(); if (instanceCountInXml != 0) { float pct = (float)fileCounter / instanceCountInXml; stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2} ({3:0}% completed). Speed={4:0.00}MB/s", fileCounter, bytesCopied / 1024f / 1024f, startTime, pct * 100, speedInMBPerSecond); } else { stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2}. Speed={3:0.00}MB/s", fileCounter, bytesCopied / 1024f / 1024f, startTime, speedInMBPerSecond); } Platform.Log(LogLevel.Info, "Tier migration for study {0}: {1}", storage.StudyInstanceUid, stats.ToString()); try { using (IUpdateContext ctx = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { IWorkQueueEntityBroker broker = ctx.GetBroker <IWorkQueueEntityBroker>(); WorkQueueUpdateColumns parameters = new WorkQueueUpdateColumns { FailureDescription = stats.ToString() }; broker.Update(WorkQueueItem.GetKey(), parameters); ctx.Commit(); } } catch { // can't log the progress so far... just ignore it } finally { lastLog = DateTime.Now; } #endregion } } }); processor.AddCommand(copyDirCommand); DeleteDirectoryCommand delDirCommand = new DeleteDirectoryCommand(origFolder, false) { RequiresRollback = false }; processor.AddCommand(delDirCommand); TierMigrateDatabaseUpdateCommand updateDbCommand = new TierMigrateDatabaseUpdateCommand(context); processor.AddCommand(updateDbCommand); Platform.Log(LogLevel.Info, "Start migrating study {0}.. expecting {1} to be moved", storage.StudyInstanceUid, ByteCountFormatter.Format(stat.StudySize)); if (!processor.Execute()) { if (processor.FailureException != null) { throw processor.FailureException; } throw new ApplicationException(processor.FailureReason); } stat.DBUpdate = updateDbCommand.Statistics; stat.CopyFiles = copyDirCommand.CopySpeed; stat.DeleteDirTime = delDirCommand.Statistics; } stat.ProcessSpeed.SetData(bytesCopied); stat.ProcessSpeed.End(); Platform.Log(LogLevel.Info, "Successfully migrated study {0} from {1} to {2} in {3} [ {4} files, {5} @ {6}, DB Update={7}, Remove Dir={8}]", storage.StudyInstanceUid, storage.FilesystemTierEnum, newFilesystem.Filesystem.FilesystemTierEnum, TimeSpanFormatter.Format(stat.ProcessSpeed.ElapsedTime), fileCounter, ByteCountFormatter.Format(bytesCopied), stat.CopyFiles.FormattedValue, stat.DBUpdate.FormattedValue, stat.DeleteDirTime.FormattedValue); string originalPath = storage.GetStudyPath(); if (Directory.Exists(storage.GetStudyPath())) { Platform.Log(LogLevel.Info, "Original study folder could not be deleted. It must be cleaned up manually: {0}", originalPath); ServerPlatform.Alert(AlertCategory.Application, AlertLevel.Warning, WorkQueueItem.WorkQueueTypeEnum.ToString(), 1000, GetWorkQueueContextData(WorkQueueItem), TimeSpan.Zero, "Study has been migrated to a new tier. Original study folder must be cleaned up manually: {0}", originalPath); } UpdateAverageStatistics(stat); }
/// <summary> /// Archive the specified <see cref="ArchiveQueue"/> item. /// </summary> /// <param name="queueItem">The ArchiveQueue item to archive.</param> public void Run(ArchiveQueue queueItem) { using (ArchiveProcessorContext executionContext = new ArchiveProcessorContext(queueItem)) { try { if (!GetStudyStorageLocation(queueItem)) { Platform.Log(LogLevel.Error, "Unable to find readable study storage location for archival queue request {0}. Delaying request.", queueItem.Key); queueItem.FailureDescription = "Unable to find readable study storage location for archival queue request."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } // First, check to see if we can lock the study, if not just reschedule the queue entry. if (!_storageLocation.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle)) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is currently locked, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study is currently locked, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } StudyIntegrityValidator validator = new StudyIntegrityValidator(); validator.ValidateStudyState("Archive", _storageLocation, StudyIntegrityValidationModes.Default); using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.ArchiveScheduled }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} failed to lock, delaying archival.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); queueItem.FailureDescription = "Study failed to lock, delaying archival."; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Pending, Platform.Time.AddMinutes(2)); return; } update.Commit(); } string studyXmlFile = _storageLocation.GetStudyXmlPath(); // Load the study Xml file, this is used to generate the list of dicom files to archive. LoadStudyXml(studyXmlFile); DicomFile file = LoadFileFromStudyXml(); string patientsName = file.DataSet[DicomTags.PatientsName].GetString(0, string.Empty); string patientId = file.DataSet[DicomTags.PatientId].GetString(0, string.Empty); string accessionNumber = file.DataSet[DicomTags.AccessionNumber].GetString(0, string.Empty); Platform.Log(LogLevel.Info, "Starting archival of study {0} for Patient {1} (PatientId:{2} A#:{3}) on Partition {4} on archive {5}", _storageLocation.StudyInstanceUid, patientsName, patientId, accessionNumber, _hsmArchive.ServerPartition.Description, _hsmArchive.PartitionArchive.Description); // Use the command processor to do the archival. using (ServerCommandProcessor commandProcessor = new ServerCommandProcessor("Archive")) { var archiveStudyCmd = new ArchiveStudyCommand(_storageLocation, _hsmArchive.HsmPath, executionContext.TempDirectory, _hsmArchive.PartitionArchive) { ForceCompress = HsmSettings.Default.CompressZipFiles }; commandProcessor.AddCommand(archiveStudyCmd); commandProcessor.AddCommand(new UpdateArchiveQueueItemCommand(queueItem.GetKey(), _storageLocation.GetKey(), ArchiveQueueStatusEnum.Completed)); StudyRulesEngine studyEngine = new StudyRulesEngine(_storageLocation, _hsmArchive.ServerPartition, _studyXml); studyEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor); if (!commandProcessor.Execute()) { Platform.Log(LogLevel.Error, "Unexpected failure archiving study ({0}) to archive {1}: {2}, zip filename: {3}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, commandProcessor.FailureReason, archiveStudyCmd.OutputZipFilePath); queueItem.FailureDescription = commandProcessor.FailureReason; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } else { Platform.Log(LogLevel.Info, "Successfully archived study {0} on {1} to zip {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, archiveStudyCmd.OutputZipFilePath); } // Log the current FilesystemQueue settings _storageLocation.LogFilesystemQueue(); } } catch (StudyIntegrityValidationFailure ex) { StringBuilder error = new StringBuilder(); error.AppendLine(String.Format("Partition : {0}", ex.ValidationStudyInfo.ServerAE)); error.AppendLine(String.Format("Patient : {0}", ex.ValidationStudyInfo.PatientsName)); error.AppendLine(String.Format("Study Uid : {0}", ex.ValidationStudyInfo.StudyInstaneUid)); error.AppendLine(String.Format("Accession# : {0}", ex.ValidationStudyInfo.AccessionNumber)); error.AppendLine(String.Format("Study Date : {0}", ex.ValidationStudyInfo.StudyDate)); queueItem.FailureDescription = error.ToString(); _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } catch (Exception e) { String msg = String.Format("Unexpected exception archiving study: {0} on {1}: {2}", _storageLocation.StudyInstanceUid, _hsmArchive.PartitionArchive.Description, e.Message); Platform.Log(LogLevel.Error, e, msg); queueItem.FailureDescription = msg; _hsmArchive.UpdateArchiveQueue(queueItem, ArchiveQueueStatusEnum.Failed, Platform.Time); } finally { // Unlock the Queue Entry using (IUpdateContext update = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { ILockStudy studyLock = update.GetBroker <ILockStudy>(); LockStudyParameters parms = new LockStudyParameters { StudyStorageKey = queueItem.StudyStorageKey, QueueStudyStateEnum = QueueStudyStateEnum.Idle }; bool retVal = studyLock.Execute(parms); if (!parms.Successful || !retVal) { Platform.Log(LogLevel.Info, "Study {0} on partition {1} is failed to unlock.", _storageLocation.StudyInstanceUid, _hsmArchive.ServerPartition.Description); } update.Commit(); } } } }
private void EnsureConsistentObjectCount(StudyXml studyXml, IDictionary <string, List <string> > processedSeriesMap) { Platform.CheckForNullReference(studyXml, "studyXml"); // We have to ensure that the counts in studyXml and what we have processed are consistent. // Files or folder may be reprocessed but then become missing when then entry is resumed. // We have to removed them from the studyXml before committing the it. Platform.Log(LogLevel.Info, "Verifying study xml against the filesystems"); int filesProcessed = 0; foreach (string seriesUid in processedSeriesMap.Keys) { filesProcessed += processedSeriesMap[seriesUid].Count; } // Used to keep track of the series to be removed. // We can't remove the item from the study xml while we are // interating through it var seriesToRemove = new List <string>(); foreach (SeriesXml seriesXml in studyXml) { if (!processedSeriesMap.ContainsKey(seriesXml.SeriesInstanceUid)) { seriesToRemove.Add(seriesXml.SeriesInstanceUid); } else { //check all instance in the series List <string> foundInstances = processedSeriesMap[seriesXml.SeriesInstanceUid]; var instanceToRemove = new List <string>(); foreach (InstanceXml instanceXml in seriesXml) { if (!foundInstances.Contains(instanceXml.SopInstanceUid)) { // the sop no long exists in the filesystem instanceToRemove.Add(instanceXml.SopInstanceUid); } } foreach (string instanceUid in instanceToRemove) { seriesXml[instanceUid] = null; Platform.Log(LogLevel.Info, "Removed SOP {0} in the study xml: it no longer exists.", instanceUid); } } } foreach (string seriesUid in seriesToRemove) { studyXml[seriesUid] = null; Platform.Log(LogLevel.Info, "Removed Series {0} in the study xml: it no longer exists.", seriesUid); } Platform.CheckTrue(studyXml.NumberOfStudyRelatedSeries == processedSeriesMap.Count, String.Format("Number of series in the xml do not match number of series reprocessed: {0} vs {1}", studyXml.NumberOfStudyRelatedInstances, processedSeriesMap.Count)); Platform.CheckTrue(studyXml.NumberOfStudyRelatedInstances == filesProcessed, String.Format("Number of instances in the xml do not match number of reprocessed: {0} vs {1}", studyXml.NumberOfStudyRelatedInstances, filesProcessed)); Platform.Log(LogLevel.Info, "Study xml has been verified."); if (StorageLocation.Study != null) { // update the instance count in the db using (IUpdateContext updateContext = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush)) { var broker = updateContext.GetBroker <IStudyEntityBroker>(); var columns = new StudyUpdateColumns { NumberOfStudyRelatedInstances = studyXml.NumberOfStudyRelatedInstances, NumberOfStudyRelatedSeries = studyXml.NumberOfStudyRelatedSeries }; broker.Update(StorageLocation.Study.GetKey(), columns); updateContext.Commit(); } } else { // alert orphaned StudyStorage entry RaiseAlert(WorkQueueItem, AlertLevel.Critical, String.Format("Study {0} has been reprocessed but Study record was NOT created. Images reprocessed: {1}. Path={2}", StorageLocation.StudyInstanceUid, filesProcessed, StorageLocation.GetStudyPath())); } }
/// <summary> /// The processing thread. /// </summary> /// <remarks> /// This method queries the database for ServiceLock entries to work on, and then uses /// a thread pool to process the entries. /// </remarks> public void Run() { // Start the thread pool if (!_threadPool.Active) { _threadPool.Start(); } // Reset any queue items related to this service that are have the Lock bit set. try { ResetLocked(); } catch (Exception e) { Platform.Log(LogLevel.Fatal, e, "Unable to reset ServiceLock items on startup. There may be ServiceLock items orphaned in the queue."); } Platform.Log(LogLevel.Info, "ServiceLock Processor is running"); while (true) { try { if (_threadPool.CanQueueItem) { Model.ServiceLock queueListItem; using (IUpdateContext updateContext = _store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IQueryServiceLock select = updateContext.GetBroker <IQueryServiceLock>(); ServiceLockQueryParameters parms = new ServiceLockQueryParameters(); parms.ProcessorId = ServerPlatform.ProcessorId; queueListItem = select.FindOne(parms); updateContext.Commit(); } if (queueListItem == null) { WaitHandle.WaitAny(new WaitHandle[] { _terminationEvent, _threadStop }, TimeSpan.FromSeconds(30), false); _threadStop.Reset(); } else { if (!_extensions.ContainsKey(queueListItem.ServiceLockTypeEnum)) { Platform.Log(LogLevel.Error, "No extensions loaded for ServiceLockTypeEnum item type: {0}. Failing item.", queueListItem.ServiceLockTypeEnum); //Just fail the ServiceLock item, not much else we can do ResetServiceLock(queueListItem); continue; } IServiceLockProcessorFactory factory = _extensions[queueListItem.ServiceLockTypeEnum]; IServiceLockItemProcessor processor; try { processor = factory.GetItemProcessor(); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception creating ServiceLock processor."); ResetServiceLock(queueListItem); continue; } _threadPool.Enqueue(processor, queueListItem, delegate(IServiceLockItemProcessor queueProcessor, Model.ServiceLock queueItem) { try { queueProcessor.Process(queueItem); } catch (Exception e) { Platform.Log(LogLevel.Error, e, "Unexpected exception when processing ServiceLock item of type {0}. Failing Queue item. (GUID: {1})", queueItem.ServiceLockTypeEnum, queueItem.GetKey()); ServerPlatform.Alert(AlertCategory.Application, AlertLevel.Error, "ServiceLockProcessor", AlertTypeCodes.UnableToProcess, null, TimeSpan.Zero, "Exception thrown when processing {0} ServiceLock item : {1}", queueItem.ServiceLockTypeEnum.Description, e.Message); ResetServiceLock(queueItem); } // Cleanup the processor queueProcessor.Dispose(); // Signal the thread to come out of sleep mode _threadStop.Set(); }); } } else { // Wait for only 5 seconds when the thread pool is all in use. WaitHandle.WaitAny(new WaitHandle[] { _terminationEvent, _threadStop }, TimeSpan.FromSeconds(5), false); _threadStop.Reset(); } } catch (Exception ex) { Platform.Log(LogLevel.Error, ex, "Exception has occurred : {0}. Retry later.", ex.Message); WaitHandle.WaitAny(new WaitHandle[] { _terminationEvent, _threadStop }, TimeSpan.FromSeconds(5), false); _threadStop.Reset(); } if (_stop) { return; } } }
/// <summary> /// Do the insertion of the AutoRoute. /// </summary> protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { var deviceSelectCriteria = new DeviceSelectCriteria(); deviceSelectCriteria.AeTitle.EqualTo(_deviceAe); deviceSelectCriteria.ServerPartitionKey.EqualTo(_context.ServerPartitionKey); var selectDevice = updateContext.GetBroker <IDeviceEntityBroker>(); var dev = selectDevice.FindOne(deviceSelectCriteria); if (dev == null) { Platform.Log(LogLevel.Warn, "Device '{0}' on partition {1} not in database for autoroute request! Ignoring request.", _deviceAe, _context.ServerPartition.AeTitle); ServerPlatform.Alert( AlertCategory.Application, AlertLevel.Warning, SR.AlertComponentAutorouteRule, AlertTypeCodes.UnableToProcess, null, TimeSpan.FromMinutes(5), SR.AlertAutoRouteUnknownDestination, _deviceAe, _context.ServerPartition.AeTitle); return; } if (!dev.AllowAutoRoute) { Platform.Log(LogLevel.Warn, "Study Auto-route attempted to device {0} on partition {1} with autoroute support disabled. Ignoring request.", dev.AeTitle, _context.ServerPartition.AeTitle); ServerPlatform.Alert(AlertCategory.Application, AlertLevel.Warning, SR.AlertComponentAutorouteRule, AlertTypeCodes.UnableToProcess, null, TimeSpan.FromMinutes(5), SR.AlertAutoRouteDestinationAEDisabled, dev.AeTitle, _context.ServerPartition.AeTitle); return; } if (_qcStatus != null) { var studyBroker = updateContext.GetBroker <IStudyEntityBroker>(); var studySelect = new StudySelectCriteria(); studySelect.StudyStorageKey.EqualTo(_context.StudyLocationKey); studySelect.ServerPartitionKey.EqualTo(_context.ServerPartitionKey); var study = studyBroker.FindOne(studySelect); if (!study.QCStatusEnum.Equals(_qcStatus)) { Platform.Log(LogLevel.Debug, "Ignoring Auto-route where the QCStatusEnum status must be {0}, but database has {1} for study {2}", _qcStatus.Description, study.QCStatusEnum.Description, study.StudyInstanceUid); return; } } var parms = new InsertWorkQueueParameters { WorkQueueTypeEnum = WorkQueueTypeEnum.StudyAutoRoute, ScheduledTime = _scheduledTime.HasValue ? _scheduledTime.Value : Platform.Time.AddSeconds(30), StudyStorageKey = _context.StudyLocationKey, ServerPartitionKey = _context.ServerPartitionKey, DeviceKey = dev.GetKey() }; var broker = updateContext.GetBroker <IInsertWorkQueue>(); if (broker.FindOne(parms) == null) { throw new ApplicationException("InsertWorkQueue for Study Auto-Route failed"); } }
protected override void OnExecute(CommandProcessor theProcessor, IUpdateContext updateContext) { // Check if the File is the same syntax as the TransferSyntax fileSyntax = _file.TransferSyntax; TransferSyntax dbSyntax = TransferSyntax.GetTransferSyntax(_location.TransferSyntaxUid); // Check if the syntaxes match the location if ((!fileSyntax.Encapsulated && !dbSyntax.Encapsulated) || (fileSyntax.LosslessCompressed && dbSyntax.LosslessCompressed) || (fileSyntax.LossyCompressed && dbSyntax.LossyCompressed)) { // no changes necessary, just return; return; } // Select the Server Transfer Syntax var syntaxCriteria = new ServerTransferSyntaxSelectCriteria(); var syntaxBroker = updateContext.GetBroker <IServerTransferSyntaxEntityBroker>(); syntaxCriteria.Uid.EqualTo(fileSyntax.UidString); ServerTransferSyntax serverSyntax = syntaxBroker.FindOne(syntaxCriteria); if (serverSyntax == null) { Platform.Log(LogLevel.Error, "Unable to load ServerTransferSyntax for {0}. Unable to update study status.", fileSyntax.Name); return; } // Get the FilesystemStudyStorage update broker ready var filesystemStudyStorageEntityBroker = updateContext.GetBroker <IFilesystemStudyStorageEntityBroker>(); var filesystemStorageUpdate = new FilesystemStudyStorageUpdateColumns(); var filesystemStorageCritiera = new FilesystemStudyStorageSelectCriteria(); filesystemStorageUpdate.ServerTransferSyntaxKey = serverSyntax.Key; filesystemStorageCritiera.StudyStorageKey.EqualTo(_location.Key); // Get the StudyStorage update broker ready var studyStorageBroker = updateContext.GetBroker <IStudyStorageEntityBroker>(); var studyStorageUpdate = new StudyStorageUpdateColumns(); StudyStatusEnum statusEnum = _location.StudyStatusEnum; if (fileSyntax.LossyCompressed) { studyStorageUpdate.StudyStatusEnum = statusEnum = StudyStatusEnum.OnlineLossy; } else if (fileSyntax.LosslessCompressed) { studyStorageUpdate.StudyStatusEnum = statusEnum = StudyStatusEnum.OnlineLossless; } studyStorageUpdate.LastAccessedTime = Platform.Time; if (!filesystemStudyStorageEntityBroker.Update(filesystemStorageCritiera, filesystemStorageUpdate)) { Platform.Log(LogLevel.Error, "Unable to update FilesystemQueue row: Study {0}, Server Entity {1}", _location.StudyInstanceUid, _location.ServerPartitionKey); } else if (!studyStorageBroker.Update(_location.GetKey(), studyStorageUpdate)) { Platform.Log(LogLevel.Error, "Unable to update StudyStorage row: Study {0}, Server Entity {1}", _location.StudyInstanceUid, _location.ServerPartitionKey); } else { // Update the location, so the next time we come in here, we don't try and update the database // for another sop in the study. _location.StudyStatusEnum = statusEnum; _location.TransferSyntaxUid = fileSyntax.UidString; _location.ServerTransferSyntaxKey = serverSyntax.Key; } }
private void ReinventoryFilesystem(Filesystem filesystem) { ServerPartition partition; DirectoryInfo filesystemDir = new DirectoryInfo(filesystem.FilesystemPath); foreach (DirectoryInfo partitionDir in filesystemDir.GetDirectories()) { if (GetServerPartition(partitionDir.Name, out partition) == false) { continue; } foreach (DirectoryInfo dateDir in partitionDir.GetDirectories()) { if (dateDir.FullName.EndsWith("Deleted", StringComparison.InvariantCultureIgnoreCase) || dateDir.FullName.EndsWith(ServerPlatform.ReconcileStorageFolder, StringComparison.InvariantCultureIgnoreCase)) { continue; } List <FileInfo> fileList; foreach (DirectoryInfo studyDir in dateDir.GetDirectories()) { if (studyDir.FullName.EndsWith("Deleted", StringComparison.InvariantCultureIgnoreCase)) { continue; } // Check for Cancel message if (CancelPending) { return; } String studyInstanceUid = studyDir.Name; StudyStorageLocation location; if (GetStudyStorageLocation(partition.Key, studyInstanceUid, out location)) { #region Study record exists in db int integrityQueueCount; int workQueueCount; Study theStudy = GetStudyAndQueues(location, out integrityQueueCount, out workQueueCount); if (theStudy != null) { continue; } if (integrityQueueCount != 0 && workQueueCount != 0) { continue; } fileList = LoadSopFiles(studyDir, false); if (fileList.Count == 0) { Platform.Log(LogLevel.Warn, "Found empty study folder with StorageLocation, deleteing StorageLocation: {0}\\{1}", dateDir.Name, studyDir.Name); studyDir.Delete(true); RemoveStudyStorage(location); continue; } // WriteLock the new study storage for study processing if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.ProcessingScheduled)) { string failureReason; if (!ServerHelper.LockStudy(location.Key, QueueStudyStateEnum.ProcessingScheduled, out failureReason)) { Platform.Log(LogLevel.Error, "Unable to lock study {0} for Study Processing", location.StudyInstanceUid); } } #endregion } else { #region Directory not in DB, fileList = LoadSopFiles(studyDir, true); if (fileList.Count == 0) { Platform.Log(LogLevel.Warn, "Found empty study folder: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } DicomFile file = LoadFileFromList(fileList); if (file == null) { Platform.Log(LogLevel.Warn, "Found directory with no readable files: {0}\\{1}", dateDir.Name, studyDir.Name); continue; } // Do a second check, using the study instance uid from a file in the directory. // had an issue with trailing periods on uids causing us to not find the // study storage, and insert a new record into the database. studyInstanceUid = file.DataSet[DicomTags.StudyInstanceUid].ToString(); if (GetStudyStorageLocation(partition.Key, studyInstanceUid, out location)) { continue; } StudyStorage storage; if (GetStudyStorage(partition, studyInstanceUid, out storage)) { Platform.Log(LogLevel.Warn, "Study {0} on filesystem partition {1} is offline {2}", studyInstanceUid, partition.Description, studyDir.ToString()); continue; } Platform.Log(LogLevel.Info, "Reinventory inserting study storage location for {0} on partition {1}", studyInstanceUid, partition.Description); // Insert StudyStorage using (IUpdateContext update = _store.OpenUpdateContext(UpdateContextSyncMode.Flush)) { IInsertStudyStorage studyInsert = update.GetBroker <IInsertStudyStorage>(); InsertStudyStorageParameters insertParms = new InsertStudyStorageParameters { ServerPartitionKey = partition.GetKey(), StudyInstanceUid = studyInstanceUid, Folder = dateDir.Name, FilesystemKey = filesystem.GetKey(), QueueStudyStateEnum = QueueStudyStateEnum.Idle }; if (file.TransferSyntax.LosslessCompressed) { insertParms.TransferSyntaxUid = file.TransferSyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossless; } else if (file.TransferSyntax.LossyCompressed) { insertParms.TransferSyntaxUid = file.TransferSyntax.UidString; insertParms.StudyStatusEnum = StudyStatusEnum.OnlineLossy; } else { insertParms.TransferSyntaxUid = TransferSyntax.ExplicitVrLittleEndianUid; insertParms.StudyStatusEnum = StudyStatusEnum.Online; } location = studyInsert.FindOne(insertParms); // WriteLock the new study storage for study processing ILockStudy lockStudy = update.GetBroker <ILockStudy>(); LockStudyParameters lockParms = new LockStudyParameters { StudyStorageKey = location.Key, QueueStudyStateEnum = QueueStudyStateEnum.ProcessingScheduled }; if (!lockStudy.Execute(lockParms) || !lockParms.Successful) { Platform.Log(LogLevel.Error, "Unable to lock study {0} for Study Processing", location.StudyInstanceUid); } update.Commit(); } #endregion } string studyXml = location.GetStudyXmlPath(); if (File.Exists(studyXml)) { FileUtils.Delete(studyXml); } string studyGZipXml = location.GetCompressedStudyXmlPath(); if (File.Exists(studyGZipXml)) { FileUtils.Delete(studyGZipXml); } foreach (FileInfo sopFile in fileList) { String sopInstanceUid = sopFile.Name.Replace(sopFile.Extension, string.Empty); using (ServerExecutionContext context = new ServerExecutionContext()) { // Just use a read context here, in hopes of improving // performance. Every other place in the code should use // Update contexts when doing transactions. IInsertWorkQueue workQueueInsert = context.ReadContext.GetBroker <IInsertWorkQueue>(); InsertWorkQueueParameters queueInsertParms = new InsertWorkQueueParameters { WorkQueueTypeEnum = WorkQueueTypeEnum.StudyProcess, StudyStorageKey = location.GetKey(), ServerPartitionKey = partition.GetKey(), SeriesInstanceUid = sopFile.Directory.Name, SopInstanceUid = sopInstanceUid, ScheduledTime = Platform.Time }; if (workQueueInsert.FindOne(queueInsertParms) == null) { Platform.Log(LogLevel.Error, "Failure attempting to insert SOP Instance into WorkQueue during Reinventory."); } } } } // Cleanup the date directory, if its empty. DirectoryUtility.DeleteIfEmpty(dateDir.FullName); } } }