public async Task Purge(int taskID) { var profile = _profileStorage.Current; var taskInfo = $"task: {taskID}"; try { await Task.Run(() => { PurgeImpl(profile, taskInfo); }); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (Exception e) { _logger.LogFullException(e); } finally { _taskManager.Stop($"Purge"); } }
private async Task ScanImpl(Profile profile, DcmtkConnection Connection, int taskID) { var taskInfo = $"task: {taskID} connection: {Connection.name}"; try { var dir = profile.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + "toScanner"; Directory.CreateDirectory(dir); var fileEntries = _util.DirSearch(dir, "*"); //var dd = new DuplicatesDetection(); if (profile.duplicatesDetectionUpload) { _logger.Log(LogLevel.Information, $"{taskInfo} toCloud: {(fileEntries == null ? 0 : fileEntries.Count)} files to be send to cloud (before duplicates elimination)."); } else { _logger.Log(LogLevel.Information, $"{taskInfo} toCloud: {(fileEntries == null ? 0 : fileEntries.Count)} files to be send to cloud."); } foreach (string file in fileEntries) { if (_taskManager.cts.IsCancellationRequested) { break; } _logger.Log(LogLevel.Debug, $"{taskInfo} Found {file}"); RoutedItem routedItem = new RoutedItem(fromConnection: Connection.name, sourceFileName: file, taskID: taskID) { type = RoutedItem.Type.DICOM }; routedItem = await _dcmDumpService.DcmDump(taskID, routedItem, Connection); try { _routedItemManager.Init(routedItem); _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules)); } catch (Exception e) { _logger.LogFullException(e); } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (Exception e) { _logger.LogFullException(e); } finally { _taskManager.Stop($"{Connection.name}.Scanner"); } }
public void Stop() { _logger.Log(LogLevel.Warning, "Shutdown Requested"); _logger.Log(LogLevel.Warning, "Stopping Tasks for Shutdown"); // todo: could use true //_liteTaskManager.Stop(true); _taskManager.Stop(); _logger.Log(LogLevel.Warning, "Stopping Connections"); var profile = _profileStorage.Current; if (profile != null && profile.connections != null) { foreach (Connection conn in profile.connections) { var connectionManager = _connectionManagerFactory.GetManager(conn); if (connectionManager != null) { connectionManager.Stop(); } } } _logger.Log(LogLevel.Warning, "Stopping Logger"); #if DEBUG //Console.WriteLine("Calling Environment Exit"); //Environment.Exit(0); #endif }
public async Task Download(int taskID, LifeImageCloudConnection Connection, IHttpManager httpManager) { var taskInfo = $"task: {taskID} connection: {Connection.name}"; _logger.Log(LogLevel.Debug, $"{taskInfo} Entering Download"); try { _logger.Log(LogLevel.Debug, $"{taskInfo} Processing downloadStudy"); //we can kick off some more //be careful that this is reentrant, meaning that kickoff launches this on an interval and we only want to start //new work if existing work to capacity is not already occurring. //to avoid Collection was modified; enumeration operation may not execute, making a copy just to iterate. await DownloadImpl(Connection, httpManager, taskInfo); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (Exception e) { _logger.LogFullException(e, $"{taskInfo} Exiting Download"); } finally { _taskManager.Stop($"{Connection.name}.Download"); } }
public async Task Upload(int taskID, LITEConnection connection, SemaphoreSlim toEGSSignal, ISendToAllHubsService sendToAllHubsService) { Connection = connection; ToEGSSignal = toEGSSignal; SendToAllHubsService = sendToAllHubsService; var taskInfo = $"task: {taskID} connection: {Connection.name}"; _logger.Log(LogLevel.Debug, $"{taskInfo} Entering Upload"); try { await UploadImpl(taskID); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (OperationCanceledException) { _logger.Log(LogLevel.Warning, $"Wait Operation Canceled. Exiting Upload"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _logger.Log(LogLevel.Critical, $"{taskInfo} Exiting Upload"); } finally { _taskManager.Stop($"{Connection.name}.Upload"); } }
public async Task Accept(HL7Connection connection, BourneListens bourne, List <TcpClient> clients, Func <TcpClient, int, Task> func, int taskID) { Connection = connection; _logger.Log(LogLevel.Debug, $"{Connection.name} Entering accept on {bourne.LocalEndpoint}"); string local = string.Empty; try { while (!_taskManager.cts.IsCancellationRequested) { if (!bourne.Active()) { break; } local = await ProcessItem(bourne, clients, func, taskID); } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (SocketException e) { _logger.Log(LogLevel.Warning, $"{local} SocketException: {e.ErrorCode} {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } } catch (ObjectDisposedException e) { _logger.Log(LogLevel.Warning, $"{local} HL7 Connection is closed."); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } } catch (System.InvalidOperationException e) { _logger.Log(LogLevel.Warning, $"{local} InvalidOperationException: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } } catch (Exception e) { _logger.LogFullException(e, local); } finally { _taskManager.Stop($"{Connection.name}.accept: {bourne.LocalEndpoint}"); } _logger.Log(LogLevel.Warning, $"Cancellation received. Exiting accept."); }
public async Task SendToDicom(int taskID, DICOMConnection connection, DicomClient dicomClient, SemaphoreSlim toDicomSignal) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var stopWatch = new Stopwatch(); stopWatch.Start(); try { do { if (Connection.TestConnection) { await CEcho(taskID, dicomClient); } bool success = await toDicomSignal.WaitAsync(_profileStorage.Current.KickOffInterval, _taskManager.cts.Token).ConfigureAwait(false); while (Connection.toDicom.ToList().FindAll(e => e.lastAttempt < DateTime.Now.AddMinutes(-Connection.retryDelayMinutes)).Count > 0) { // batch up items along maxRequestsPerAssociation and if the attempt > 1 break out and send it. List <List <RoutedItem> > batches = GetBatches(taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} batches to send: {batches.Count}"); //queue up the requests for each association foreach (var association in batches) { await ProcessBatches(association, dicomClient, taskID, taskInfo, stopWatch); } await Task.Delay(+_profileStorage.Current.backlogInterval).ConfigureAwait(false); stopWatch.Restart(); } // toDicomSignal.Dispose(); // toDicomSignal = new SemaphoreSlim(0, 1); } while (Connection.responsive); } catch (DicomAssociationRejectedException e) { _logger.LogFullException(e, $"{taskInfo} Dicom Association Failed"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } finally { _taskManager.Stop($"{Connection.name}.SendToDicom"); } }
public async Task ExpireCache(LifeImageCloudConnection Connection, IConnectionRoutedCacheManager manager, int taskID) { Throw.IfNull(manager); var taskInfo = $"task: {taskID} connection: {Connection.name}"; try { while (!_taskManager.cts.IsCancellationRequested) { await Task.Delay(_profileStorage.Current.KickOffInterval, _taskManager.cts.Token); //age the response cache before asking for more foreach (var cacheItem in LifeImageCloudConnection.cache.ToArray()) { _logger.Log(LogLevel.Debug, $"{taskInfo} Cache entry id: {cacheItem.Key}"); foreach (var item in cacheItem.Value.ToArray()) { ProcessRoutedItem(item, Connection, manager, taskInfo); } } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (System.InvalidOperationException) //for the Collection was Modified, we can wait { _logger.Log(LogLevel.Information, $"{taskInfo} Waiting for requests to complete before getting new requests"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } finally { try { _taskManager.Stop($"{Connection.name}.ExpireCache"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
/// <summary> /// Outgoing file, send. /// </summary> /// <param name="taskID"></param> /// <param name="fileConnection"></param> /// <param name="connectionManager"></param> /// <param name="ScanPathSignal"></param> /// <returns></returns> public async Task Scan(int taskID, FileConnection fileConnection, IConnectionRoutedCacheManager connectionManager, SemaphoreSlim ScanPathSignal) { Connection = fileConnection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; _logger.Log(LogLevel.Debug, $"{taskInfo} scanpaths: {Connection.scanpaths}"); try { do { bool success = await ScanPathSignal.WaitAsync(_profileStorage.Current.KickOffInterval, _taskManager.cts.Token) .ConfigureAwait(false); // ScanPathSignal.Dispose(); // ScanPathSignal = new SemaphoreSlim(0, 1); foreach (string fileEntry in Connection.scanpaths) { await ProcessScanPath(taskID, Connection, connectionManager, fileEntry); await Task.Yield(); } //purge inpath PurgeInPath(Connection, taskInfo); //purge outpath PurgeOutPath(Connection, taskInfo); } while (Connection.responsive); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (Exception e) { _logger.LogFullException(e); } finally { _taskManager.Stop($"{Connection.name}.scan"); } }
public async Task Upload(int taskID, LifeImageCloudConnection Connection, ILifeImageCloudConnectionManager manager, IConnectionRoutedCacheManager cache, IHttpManager httpManager) { var taskInfo = $"task: {taskID} connection: {Connection.name}"; _logger.Log(LogLevel.Debug, $"{taskInfo} Entering Upload"); try { bool success = await manager.ToCloudSignal.WaitAsync(_profileStorage.Current.KickOffInterval, _taskManager.cts.Token) .ConfigureAwait(false); // ToCloudSignal.Dispose(); // ToCloudSignal = new SemaphoreSlim(0, 1); await _sendToCloudService.SendToCloud(taskID, Connection, cache, httpManager); //if (_profileStorage.Current.rules.DoesRouteDestinationExistForSource(Connection.name)) if (_rulesManager.DoesRouteDestinationExistForSource(Connection.name)) { if (_taskManager.CanStart($"{Connection.name}.GetRequests")) { var newTaskID = _taskManager.NewTaskID(); Task task = new Task(new Action(async() => await _cloudAgentTaskLoader.GetRequests(taskID, Connection, cache, httpManager)), _taskManager.cts.Token); await _taskManager.Start(newTaskID, task, $"{Connection.name}.GetRequests", isLongRunning : false); } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (OperationCanceledException) { _logger.Log(LogLevel.Warning, $"{taskInfo} Wait Operation Canceled. Exiting Upload"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _logger.Log(LogLevel.Critical, $"{taskInfo} Exiting Upload"); } finally { _taskManager.Stop($"{Connection.name}.Upload"); } }
/// <summary> /// Store takes a batch of RoutedItem, all going to the same share destination, and uploads them as a single operation. This is done to solve the many small files problem.Larger files can go individually. /// </summary> /// <param name="batch"></param> /// <param name="taskID"></param> /// <param name="connection"></param> /// <returns></returns> public async Task store(List <RoutedItem> batch, int taskID, LITEConnection connection) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; StreamContent streamContent = null; MultipartContent content = null; HttpResponseMessage response = null; string testFile = null; var firstRecord = batch.First(); try { var stopWatch = new Stopwatch(); stopWatch.Start(); //set the URL //string resourceURL = Connection.URL + "/api/File"; string resourceURL = Connection.URL + FileAgentConstants.BaseUrl; _logger.Log(LogLevel.Debug, $"{taskInfo} URL: {resourceURL}"); // generate guid for boundary...boundaries cannot be accidentally found in the content var boundary = Guid.NewGuid(); _logger.Log(LogLevel.Debug, $"{taskInfo} boundary: {boundary}"); // create the content content = new MultipartContent("related", boundary.ToString()); //add the sharing headers List <string> shareHeader = new List <string>(); if (Connection.shareDestinations != null) { foreach (var connectionSet in firstRecord.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareHeader.Add(shareDestination.boxUuid); _logger.Log(LogLevel.Debug, $"{taskInfo} sharing to: {shareDestination.boxId} {shareDestination.boxName} {shareDestination.groupId} {shareDestination.groupName} {shareDestination.organizationName} {shareDestination.publishableBoxType}"); } } } } content.Headers.Add("X-Li-Destination", shareHeader); long fileSize = 0; var profile = _profileStorage.Current; var dir = profile.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + "toEGS"; Directory.CreateDirectory(dir); testFile = dir + Path.DirectorySeparatorChar + Guid.NewGuid() + ".gz"; using (FileStream compressedFileStream = File.Create(testFile)) { using GZipStream compressionStream = new GZipStream(compressedFileStream, CompressionMode.Compress); foreach (var routedItem in batch) { if (File.Exists(routedItem.sourceFileName)) { routedItem.stream = File.OpenRead(routedItem.sourceFileName); if (Connection.calcCompressionStats) { routedItem.stream.CopyTo(compressionStream); } fileSize += routedItem.length; streamContent = new StreamContent(routedItem.stream); streamContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { FileName = routedItem.sourceFileName }; content.Add(streamContent); streamContent.Headers.Add("content-type", "application/octet-stream"); } else { _logger.Log(LogLevel.Error, $"{taskInfo} {routedItem.sourceFileName} no longer exists. Increase tempFileRetentionHours for heavy transfer backlogs that may take hours!!"); routedItem.Error = "File no longer exists"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS), error: true); } } } if (Connection.calcCompressionStats) { FileInfo info = new FileInfo(testFile); _logger.Log(LogLevel.Information, $"{taskInfo} orgSize: {fileSize} compressedSize: {info.Length} reduction: {(fileSize == 0 ? 0 : (fileSize * 1.0 - info.Length) / (fileSize) * 100)}%"); } // issue the POST Task <HttpResponseMessage> task; var httpClient = _liteHttpClient.GetClient(connection); if (firstRecord.Compress == true) { var compressedContent = new CompressedContent(content, "gzip"); _logger.Log(LogLevel.Debug, $"{taskInfo} compressedContent.Headers {compressedContent.Headers} "); compressedContent.Headers.Remove("Content-Encoding"); var cookies = _liteHttpClient.GetCookies(resourceURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(resourceURL, compressedContent); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} will send content.Headers {content.Headers}"); var cookies = _liteHttpClient.GetCookies(resourceURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(resourceURL, content); } response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); switch (response.StatusCode) { case HttpStatusCode.Created: //dequeue the work, we're done! if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } foreach (var ri in batch) { _routedItemManager.Init(ri); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS)); } //let EGGS know it's available, or when we convert udt to .net core then perhaps push so no open socket required on client. //await SendToAllHubs(LITEServicePoint, batch); break; case HttpStatusCode.UnprocessableEntity: //dequeue the work, we're done! _logger.Log(LogLevel.Warning, $"creation of {firstRecord.sourceFileName} and others in batch failed with {response.StatusCode}"); if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } foreach (var ri in batch) { ri.Error = HttpStatusCode.UnprocessableEntity.ToString(); _routedItemManager.Init(ri); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS), error: true); } break; default: if (response.StatusCode == HttpStatusCode.Unauthorized) { Connection.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"creation of {firstRecord.sourceFileName} and others in batch failed with {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); break; } //delete the compression test file File.Delete(testFile); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (FileNotFoundException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (IOException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } File.Delete(testFile); _taskManager.Stop($"{Connection.name}.Store"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
/// <summary> /// Merges another profile into this one in an additive fashion unless version is higher /// This should preserve the arg > file > remote precedence with the exception of variables required to be defined /// at program startup like kickoffInterval, logRetentionDays, name, etc. /// Command line args boot with version = 0 unless otherwise specified. /// To add settings without agent restart, add at Cloud or agent with same version. /// To replace settings without agent restart, add at Cloud or agent with incremented version.Note: live queues will reset. /// If you switch profiles at the agent, the name will (should) be different and merge is skipped. /// </summary> /// <param name="current"></param> /// <param name="profile"></param> public void MergeProfile(Profile current, Profile profile) { Throw.IfNull(current); bool bootstrap = current.name.Equals("Bootstrap"); if (profile == null) { return; } lock (current) { //game over!! Someone switched profiles on the client or server or it's not time if ((!current.name.Equals(profile.name) && !bootstrap) || !(DateTime.Now > profile.activationTime)) { _logger.Log(LogLevel.Debug, $"inbound profile name: {profile.name} this name: {current.name} bootstrap: {bootstrap}, inbound activationTime: {profile.activationTime} using ignore strategy"); return; } if (profile.version > current.version) { _logger.Log(LogLevel.Debug, $"inbound profile version {profile.version} greater than current version {current.version}, using replacement strategy"); // foreach (var conn in this.connections) // { // Logger.logger.Log(TraceEventType.Information, $"stopping {conn.name}"); // conn.stop(); // } _taskManager.Stop(); //replace all settings, realizing this dumps all live queues and connections //may want code that is more fine grained and doesn't drop work in progress. current.activationTime = profile.activationTime; current.availableCodeVersions = profile.availableCodeVersions; //this.backlog; shb nonserialized current.backlogDetection = profile.backlogDetection; current.backlogInterval = profile.backlogInterval; /* 2020-05-22 AMG added properties to profile */ current.duplicatesDetectionUpload = profile.duplicatesDetectionUpload; current.duplicatesDetectionDownload = profile.duplicatesDetectionDownload; current.duplicatesDetectionInterval = profile.duplicatesDetectionInterval; current.modalityList = profile.modalityList; current.modalityDetectionArchivePeriod = profile.modalityDetectionArchivePeriod; current.connections = profile.connections; current.dcmtkLibPath = profile.dcmtkLibPath; //this.errors = profile.errors; shb not needed //this.highWait = profile.highWait; shb nonserialized current.highWaitDelay = profile.highWaitDelay; //this.jsonInError = profile.jsonInError; shb not needed //this.jsonSchemaPath = profile.jsonSchemaPath; shb not needed current.KickOffInterval = profile.KickOffInterval; //this.lastKickOff = profile.lastKickOff; shb not needed //this.lastStartup = profile.lastStartup; shb not needed current.Labels = profile.Labels; //2018-02-13 shb need to assign inbound profile.logger settings during replacement strategy var primary = _connectionFinder.GetPrimaryLifeImageConnection(current); //current.logger = new Logger("default"); current.logger = new Logger(); current.logger.ConsoleTraceLevel = profile.logger.ConsoleTraceLevel; current.logger.SplunkTraceLevel = profile.logger.SplunkTraceLevel; current.logger.FileTraceLevel = profile.logger.FileTraceLevel; current.logger.TracePattern = profile.logger.TracePattern; Logger.logger = current.logger; //Logger.logger.Init(); _loggerManager.Init(current.logger); current.LogFileSize = profile.LogFileSize; current.logRetentionDays = profile.logRetentionDays; current.maxTaskDuration = profile.maxTaskDuration; //this.mediumWait = profile.mediumWait; shb nonserialized current.mediumWaitDelay = profile.mediumWaitDelay; current.minFreeDiskBytes = profile.minFreeDiskBytes; //this.modifiedDate = profile.modifiedDate; shb not needed current.name = profile.name; Profile._overrideVersionAndModifiedDate = Profile._overrideVersionAndModifiedDate; //profileConverter not needed current.recoveryInterval = profile.recoveryInterval; //this.rowVersion = profile.rowVersion; shb not merged because we get this from the api call current.rules = profile.rules; current.run = profile.run; //this.runningCodeVersion = profile.runningCodeVersion; shb not needed // Only allow startup params in startup profile this.startupParams = profile.startupParams; //startupConfigFilePath shb not needed //this.startupParams = profile.startupParams; shb not needed current.taskDelay = profile.taskDelay; current.tempFileRetentionHours = profile.tempFileRetentionHours; current.tempPath = profile.tempPath; current.updateCodeVersion = profile.updateCodeVersion; current.updateUrl = profile.updateUrl; current.updatePassword = profile.updatePassword; current.updateUsername = profile.updateUsername; current.useSocketsHttpHandler = profile.useSocketsHttpHandler; current.version = profile.version; // shb will change the value of tempPath and write back to a profile if saved (including possibly the same profile). //convenience assignment to reduce number of calls to get Windows ProgramData folder. current.tempPath = _util.GetTempFolder(current.tempPath); _profileWriter.SaveProfile(current).Wait(); if (!bootstrap) { throw new Exception("Replacement Strategy Needs Full LITE.init(), throwing this exception on purpose!"); } } else if (profile.version == current.version) { _logger.Log(LogLevel.Debug, $"inbound profile version {profile.version} same as current version {current.version}, using merge strategy"); //2018-03-03 shb need to assign inbound profile.logger settings during merge so we can have non-destructive loglevel changes // this.logger.logLevel = profile.logger.logLevel; var primary = _connectionFinder.GetPrimaryLifeImageConnection(current); //current.logger = new Logger("default"); current.logger = new Logger(); // this.logger.logLevel = profile.logger.logLevel; current.logger.ConsoleTraceLevel = profile.logger.ConsoleTraceLevel; current.logger.SplunkTraceLevel = profile.logger.SplunkTraceLevel; current.logger.FileTraceLevel = profile.logger.FileTraceLevel; current.logger.TracePattern = profile.logger.TracePattern; Logger.logger = current.logger; //Logger.logger.Init(); _loggerManager.Init(current.logger); if (current.updateCodeVersion == null) { current.updateCodeVersion = profile.updateCodeVersion; } //merge settings in additive fashion except override ones that startup with predefined values foreach (var srcConn in profile.connections) { Connection destConn = current.connections.Find(e => e.name == srcConn.name); if (destConn == null) { //srcConn.profile = this; current.connections.Add(srcConn); } } //dev hack to load in script example. Script script = new Script { name = "Hello World", source = "using System.Diagnostics; if (logger.logLevel == \"Trace\") logger.Log(TraceEventType.Verbose, $\"Hello World\");" }; if (current.rules.scripts.Find(e => e.name == script.name) == null) { _logger.Log(LogLevel.Debug, $"adding script {script.name}"); current.rules.scripts.Add(script); } foreach (var rule in profile.rules.destRules) { if (!current.rules.destRules.Exists(e => e.name == rule.name)) { current.rules.destRules.Add(rule); } } var msg = ""; foreach (var rule in current.rules.destRules) { msg += rule.name + " "; } _logger.Log(LogLevel.Debug, $"{current.rules.destRules.Count} rules after merge: {msg}"); } else if (profile.version < current.version) { //ignore _logger.Log(LogLevel.Debug, $"inbound profile version {profile.version} less than current version {current.version}, using ignore strategy"); return; } else { //ignore _logger.Log(LogLevel.Debug, $"Unexpected condition inbound version {profile.version} current version {current.version}, using ignore strategy"); return; } } }
public async Task PostCompletion(LifeImageCloudConnection Connection, RoutedItem routedItem, IConnectionRoutedCacheManager cacheManager, IHttpManager httpManager, long taskID) { Throw.IfNull(Connection); Throw.IfNull(routedItem); Throw.IfNull(cacheManager); Throw.IfNull(httpManager); var stopWatch = new Stopwatch(); stopWatch.Start(); var taskInfo = $"task: {taskID} connection: {Connection.name} id: {routedItem.id} "; HttpResponseMessage response = null; var httpClient = _liteHttpClient.GetClient(Connection); try { if (routedItem.Study == null || routedItem.Study == "") { _logger.Log(LogLevel.Warning, $"{taskInfo} meta: {routedItem.RoutedItemMetaFile} cannot close routedItem.Study: {routedItem.Study} because null or blank."); cacheManager.RemoveCachedItem(routedItem); return; } //POST /api/agent/v1/study/{studyInstanceUid}/upload-close //string studyCloseURL = Connection.URL + $"/api/agent/v1/study/{routedItem.Study}/upload-close"; string studyCloseURL = Connection.URL + CloudAgentConstants.GetUploadCloseUrl(routedItem.Study); _logger.Log(LogLevel.Debug, $"{taskInfo} studyCloseURL: {studyCloseURL}"); var metadata = ""; try { metadata = _cloudConnectionCacheAccessor.GetCachedItemMetaData(Connection, routedItem, taskID); } catch (Exception e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Unable to produce metadata for {routedItem.id} {routedItem.RoutedItemMetaFile}: {e.Message} {e.StackTrace}"); } using (HttpContent httpContent = new StringContent(metadata)) { var cookies = _liteHttpClient.GetCookies(studyCloseURL); _logger.LogCookies(cookies, taskInfo); response = await httpClient.PostAsync(studyCloseURL, httpContent, _taskManager.cts.Token); // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } //BOUR-995 we don't want to dequeue unless completed or failed if (response.StatusCode == HttpStatusCode.OK) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: false); cacheManager.RemoveCachedItem(routedItem); } //BOUR-995 we don't want to dequeue unless completed or failed if ((response.StatusCode == HttpStatusCode.InternalServerError) || response.StatusCode == HttpStatusCode.BadRequest) { _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {response.ReasonPhrase}. Dequeuing to error folder"); _liteHttpClient.DumpHttpClientDetails(); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: true); cacheManager.RemoveCachedItem(routedItem); } } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed}"); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); cacheManager.RemoveCachedItem(routedItem); } finally { try { _taskManager.Stop($"{Connection.name}.PostCompletion"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task stowAsMultiPart(List <RoutedItem> batch, int taskID, LifeImageCloudConnection Connection, IHttpManager httpManager) { Throw.IfNull(Connection); Throw.IfNull(httpManager); var taskInfo = $"task: {taskID} connection: {Connection.name}"; StreamContent streamContent = null; MultipartContent content = null; HttpResponseMessage response = null; string testFile = null; var firstRecord = batch.First(); var httpClient = _liteHttpClient.GetClient(Connection); try { var stopWatch = new Stopwatch(); stopWatch.Start(); //set the URL //string stowURL = Connection.URL + "/api/agent/v1/stow/studies"; string stowURL = Connection.URL + CloudAgentConstants.StowStudies; _logger.Log(LogLevel.Debug, $"{taskInfo} stowURL: {stowURL}"); // generate guid for boundary...boundaries cannot be accidentally found in the content var boundary = Guid.NewGuid(); _logger.Log(LogLevel.Debug, $"{taskInfo} boundary: {boundary}"); // create the content content = new MultipartContent("related", boundary.ToString()); //add the sharing headers List <string> shareHeader = new List <string>(); if (Connection.shareDestinations != null) { foreach (var connectionSet in firstRecord.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareHeader.Add(shareDestination.boxUuid); _logger.Log(LogLevel.Debug, $"{taskInfo} sharing to: {shareDestination.boxId} {shareDestination.boxName} {shareDestination.groupId} {shareDestination.groupName} {shareDestination.organizationName} {shareDestination.publishableBoxType}"); } } } } content.Headers.Add("X-Li-Destination", shareHeader); long fileSize = 0; var dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + Constants.Dirs.ToCloud; Directory.CreateDirectory(dir); testFile = dir + Path.DirectorySeparatorChar + Guid.NewGuid() + ".gz"; using (FileStream compressedFileStream = File.Create(testFile)) { using GZipStream compressionStream = new GZipStream(compressedFileStream, CompressionMode.Compress); foreach (var routedItem in batch) { if (File.Exists(routedItem.sourceFileName)) { routedItem.stream = File.OpenRead(routedItem.sourceFileName); if (Connection.CalcCompressionStats) { routedItem.stream.CopyTo(compressionStream); } fileSize += routedItem.length; streamContent = new StreamContent(routedItem.stream); streamContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { FileName = routedItem.sourceFileName }; content.Add(streamContent); //shb 2017-11-01 streamcontent (uncompressed) is inside content (can be compressed below) server is complaining so will comment out //streamContent.Headers.Add("Content-Transfer-Encoding", "gzip"); //shb 2017-11-10 shb added content-type header to solve //controller.DicomRSControllerBase: Content-Encoding header has value null !!! //controller.StowRSController: Unable to process part 1 no content-type parameter received streamContent.Headers.Add("content-type", "application/dicom"); } else { _logger.Log(LogLevel.Error, $"{taskInfo} {routedItem.sourceFileName} no longer exists. Increase tempFileRetentionHours for heavy transfer backlogs that may take hours!!"); } } } if (Connection.CalcCompressionStats) { FileInfo info = new FileInfo(testFile); _logger.Log(LogLevel.Information, $"{taskInfo} orgSize: {fileSize} compressedSize: {info.Length} reduction: {(fileSize == 0 ? 0 : (fileSize * 1.0 - info.Length) / (fileSize) * 100)}%"); } // issue the POST Task <HttpResponseMessage> task; if (firstRecord.Compress == true) { var compressedContent = new CompressedContent(content, "gzip"); _logger.Log(LogLevel.Debug, $"{taskInfo} compressedContent.Headers {compressedContent.Headers} "); var cookies = _liteHttpClient.GetCookies(stowURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(stowURL, compressedContent, _taskManager.cts.Token); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} will send content.Headers {content.Headers}"); var cookies = _liteHttpClient.GetCookies(stowURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(stowURL, content, _taskManager.cts.Token); } response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); if (!(response.StatusCode == HttpStatusCode.OK || response.StatusCode == HttpStatusCode.Accepted)) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"stow of {firstRecord.sourceFileName} and others in batch failed with {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); } else { //dequeue the work, we're done! if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } foreach (var ri in batch) { _routedItemManager.Init(ri); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud)); } } //delete the compression test file File.Delete(testFile); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (FileNotFoundException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (IOException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } File.Delete(testFile); _taskManager.Stop($"{Connection.name}.Stow"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task PostResponse(LifeImageCloudConnection Connection, RoutedItem routedItem, IConnectionRoutedCacheManager cacheManager, IHttpManager httpManager, long taskID) { var stopWatch = new Stopwatch(); stopWatch.Start(); var taskInfo = $"task: {taskID} connection: {Connection.name} id: {routedItem.id} "; { _logger.Log(LogLevel.Debug, $"{taskInfo} request: {routedItem.request}"); foreach (var results in routedItem.response) { _logger.Log(LogLevel.Debug, $"{taskInfo} response: {results}"); } } HttpResponseMessage response = null; try { string json = JsonSerializer.Serialize(routedItem.cloudTaskResults); _logger.Log(LogLevel.Debug, $"{taskInfo} posting {json}"); string base64Results = Convert.ToBase64String(Encoding.ASCII.GetBytes(json)); //string agentTasksURL = Connection.URL + $"/api/agent/v1/agent-task-results/{routedItem.id}"; string agentTasksURL = Connection.URL + CloudAgentConstants.GetAgentTaskResultUrl(routedItem.id); //optional status="NEW", "PENDING", "COMPLETED", "FAILED" agentTasksURL += $"?status={routedItem.status}"; _logger.Log(LogLevel.Debug, $"{taskInfo} agentTasksURL: {agentTasksURL}"); var httpClient = _liteHttpClient.GetClient(Connection); using (HttpContent httpContent = new StringContent(base64Results)) { var cookies = _liteHttpClient.GetCookies(agentTasksURL); _logger.LogCookies(cookies, taskInfo); response = await httpClient.PostAsync(agentTasksURL, httpContent, _taskManager.cts.Token); // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } //BOUR-995 we don't want to dequeue unless completed or failed if (response.StatusCode == HttpStatusCode.OK && (routedItem.status == RoutedItem.Status.COMPLETED || routedItem.status == RoutedItem.Status.FAILED)) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: false); cacheManager.RemoveCachedItem(routedItem); } //BOUR-995 we don't want to dequeue unless completed or failed if ((response.StatusCode == HttpStatusCode.InternalServerError || response.StatusCode == HttpStatusCode.BadRequest) && (routedItem.status == RoutedItem.Status.COMPLETED || routedItem.status == RoutedItem.Status.FAILED)) { _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {response.ReasonPhrase}. Dequeuing to error folder"); _liteHttpClient.DumpHttpClientDetails(); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: true); cacheManager.RemoveCachedItem(routedItem); } } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed}"); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { _taskManager.Stop($"{Connection.name}.PostResponse"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
// markDownloadComplete is used to remove an item that was in the /studies call public async Task markDownloadComplete(int taskID, LifeImageCloudConnection connection, IHttpManager httpManager) { Connection = connection; var httpClient = _liteHttpClient.GetClient(connection); var taskInfo = $"task: {taskID} connection: {Connection.name}"; HttpResponseMessage response = null; try { //process the series that have completed _logger.Log(LogLevel.Debug, $"{taskInfo} Processing Series Completion"); try { //loop through the studies and if a study has all of its series downloaded, then we can remove it and tell cloud we downloaded it if (Connection.studies != null && Connection.studies.ImagingStudy != null) { foreach (var study in Connection.studies.ImagingStudy.ToList()) { var remaining = new List <Series>(); bool seriesFail = false; if (study.series != null) { remaining = study.series?.FindAll(e => e.downloadCompleted == DateTime.MinValue); //var studyFail = study.attempts > maxAttempts; we aren't doing study LogLevel. seriesFail = study.series?.FindAll(e => e.attempts > Connection.maxAttempts).Count > 0; //var instanceFail = ins //the study object contains a list of series but the series object //does not contain a list of instances. So no marking and clearing at instance level yet. } if (remaining.Count == 0) { foreach (var series in study.series) { _logger.Log(LogLevel.Debug, $"{taskInfo} study: {study.uid} series: {series.uid} started: {series.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {series.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {series.downloadCompleted - series.downloadStarted} attempts: {series.attempts}"); } study.downloadCompleted = study.series.Max(e => e.downloadCompleted); study.downloadStarted = study.series.FindAll(e => e.downloadStarted != null) .Min(e => e.downloadStarted); _logger.Log(LogLevel.Information, $"{taskInfo} study download (complete): {study.uid} started: {study.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {study.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {study.downloadCompleted - study.downloadStarted} attempts: {study.attempts}"); Connection.markDownloadsComplete.Add(new string[] { study.url, "download-complete" }); } if (seriesFail) { foreach (var series in study.series) { _logger.Log(LogLevel.Debug, $"{taskInfo} study: {study.uid} series: {series.uid} started: {series.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {series.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {series.downloadCompleted - series.downloadStarted} attempts: {series.attempts}"); } study.downloadCompleted = study.series.Max(e => e.downloadCompleted); study.downloadStarted = study.series.FindAll(e => e.downloadStarted != null).Min(e => e.downloadStarted); _logger.Log(LogLevel.Information, $"{taskInfo} study download (failed): {study.uid} started: {study.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {study.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {study.downloadCompleted - study.downloadStarted} attempts: {study.attempts}"); _logger.Log(LogLevel.Information, $"{taskInfo} Failing study: {study.url}"); Connection.markDownloadsComplete.Add(new string[] { study.url, "download-fail" }); } } } foreach (var seriesObj in Connection.markSeriesComplete.ToArray()) { _logger.Log(LogLevel.Debug, $"{taskInfo} new Series Complete: {seriesObj.uid}"); if (Connection.studies != null && Connection.studies.ImagingStudy != null) { foreach (var study in Connection.studies?.ImagingStudy) { foreach (var series in study.series) { if (series.uid == seriesObj.uid) { if (series.downloadCompleted != null) { _logger.Log(LogLevel.Debug, $"{taskInfo} writing timestamps markSeriesComplete: {series.uid}"); series.downloadCompleted = seriesObj.downloadCompleted; series.downloadStarted = seriesObj.downloadStarted; series.attempts = seriesObj.attempts; } else { _logger.Log(LogLevel.Debug, $"{taskInfo} series already marked as complete: {series.uid}"); series.downloadCompleted = DateTime.Now; series.attempts = seriesObj.attempts; } } } } } } Connection.markSeriesComplete.Clear(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } foreach (var markinfo in Connection.markDownloadsComplete.ToList()) { try { _logger.Log(LogLevel.Debug, $"{taskInfo} marking: {markinfo[0]} {markinfo[1]}"); var stopWatch = new Stopwatch(); stopWatch.Start(); string markDownloadCompleteURL = markinfo[0] + "/" + markinfo[1]; _logger.Log(LogLevel.Debug, $"{taskInfo} markDownloadCompleteURL: {markDownloadCompleteURL}"); //set the form parameters var nothingParams = new FormUrlEncodedContent(new[] { new KeyValuePair <string, string>("nothing", "nothing"), }); var cookies = _liteHttpClient.GetCookies(markDownloadCompleteURL); _logger.LogCookies(cookies, taskInfo); // issue the POST var task = httpClient.PostAsync(markDownloadCompleteURL, nothingParams, _taskManager.cts.Token); response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode != HttpStatusCode.OK) { _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {markDownloadCompleteURL}"); _liteHttpClient.DumpHttpClientDetails(); } if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } if (response.StatusCode == HttpStatusCode.OK || response.StatusCode == HttpStatusCode.NotFound) { _logger.Log(LogLevel.Information, $"{taskInfo} {response.StatusCode} {markDownloadCompleteURL}"); lock (Connection.studies) { Connection.studies.ImagingStudy.RemoveAll(e => e.url == markinfo[0]); } lock (Connection.markDownloadsComplete) { Connection.markDownloadsComplete.Remove(markinfo); } } _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed}"); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, $"{taskInfo} markDownloadComplete failed"); _liteHttpClient.DumpHttpClientDetails(); } } _logger.Log(LogLevel.Debug, $"{taskInfo} Processing getStudies"); await _studyManager.getStudies(taskID, connection, httpManager); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } finally { try { _taskManager.Stop($"{Connection.name}.markDownloadComplete"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task wadoAsFileStream( LifeImageCloudConnection connection, int taskID, ImagingStudy study, IHttpManager httpManager, Series series = null, Instance instance = null, bool compress = true) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var stopWatch = new Stopwatch(); stopWatch.Start(); string url = $"{study.url}"; string dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + Constants.Dirs.ToRules + Path.DirectorySeparatorChar + Guid.NewGuid(); long fileSize = 0; HttpResponseMessage response = null; MultipartFileStreamProvider streamProvider = null; MultipartFileStreamProvider contents = null; var httpClient = _liteHttpClient.GetClient(connection); try { _logger.Log(LogLevel.Debug, $"{taskInfo} study url: {url} attempt {study.attempts}"); if (series != null) { Connection.URL += $"/series/{series.uid.Substring(8)}"; _logger.Log(LogLevel.Debug, $"{taskInfo} seriesURL: {url} attempt {series.attempts}"); } if (instance != null) { instance.downloadStarted = DateTime.Now; instance.attempts++; Connection.URL += $"/instances/{instance.uid}"; _logger.Log(LogLevel.Debug, $"{taskInfo} instanceURL: {url} attempt {instance.attempts}"); } var cookies = _liteHttpClient.GetCookies(url); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(url, HttpCompletionOption.ResponseHeadersRead, _taskManager.cts.Token); try { response = await task; } catch (System.Threading.Tasks.TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); //if(Logger.logger.FileTraceLevel == "Verbose") _logger.Log(LogLevel.Debug,$"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode != HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _liteHttpClient.DumpHttpClientDetails(); return; } // 2018-05-09 shb need to get header from Cloud to tell us how big it is if (!_util.IsDiskAvailable(dir, _profileStorage.Current, 16000000000)) //just using 16GB as a catch all { _logger.Log(LogLevel.Debug, $"{taskInfo} Insufficient disk to write {url} to {dir} guessing it could be 16GB"); return; //throw new Exception($"Insufficient disk to write {url} to {dir} guessing it could be 16GB"); } _logger.Log(LogLevel.Debug, $"{taskInfo} download dir will be {dir}"); Directory.CreateDirectory(dir); streamProvider = new MultipartFileStreamProvider(dir); contents = await response.Content.ReadAsMultipartAsync(streamProvider, _taskManager.cts.Token); int index = 0; _logger.Log(LogLevel.Debug, $"{taskInfo} Splitting {contents.FileData.Count} files into RoutedItems."); foreach (var part in contents.FileData) { try { index++; fileSize += new System.IO.FileInfo(part.LocalFileName).Length; _logger.Log(LogLevel.Debug, $"{taskInfo} downloaded file: {part.LocalFileName}"); RoutedItem routedItem = new RoutedItem(fromConnection: Connection.name, sourceFileName: part.LocalFileName, taskID: taskID, fileIndex: index, fileCount: contents.FileData.Count) { type = RoutedItem.Type.DICOM, Study = study.uid, AccessionNumber = study.accession?.value, //study.availability; //routedItem.Description = study.description; //study.extension; //study.modalityList; PatientID = study.patient?.display, //study.referrer; //study.resourceType; Series = series.uid }; //study.started; //study.url; _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing RoutedItem {routedItem.sourceFileName}"); _routedItemManager.Init(routedItem); _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules)); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } //2018-04-27 shb moved completion marking outside of part loop to avoid duplicate entries in markSeriesComplete //also added duplicate check. if (series != null) { series.downloadCompleted = DateTime.Now; lock (Connection.markSeriesComplete) { if (!Connection.markSeriesComplete.Contains(series)) { Connection.markSeriesComplete.Add(series); } } } else if (instance != null) { //means this came from studies calls so we need to mark this download as complete instance.downloadCompleted = DateTime.Now; lock (Connection.markDownloadsComplete) { Connection.markDownloadsComplete.Add(new string[] { url, "download-complete" }); } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (NullReferenceException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); //throw e; } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { if (response != null) { response.Dispose(); } _taskManager.Stop($"{Connection.name}.Wado"); } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); }
/// <summary> /// wado downloads studies from liCloud. ImagingStudy is required while Series and Instance are optional. RAM utilization remains low regardless of download size. /// </summary> /// <param name="taskID"></param> /// <param name="routedItem"></param> /// <param name="connection"></param> /// <param name="httpManager"></param> /// <param name="compress"></param> /// <returns></returns> public async Task DownloadViaHttp(int taskID, RoutedItem routedItem, LITEConnection connection, IHttpManager httpManager, bool compress = true) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name} resource: {routedItem.resource}"; var profile = _profileStorage.Current; var stopWatch = new Stopwatch(); stopWatch.Start(); //string url = Connection.URL + $"/api/File/{routedItem.box}/{routedItem.resource}"; string url = Connection.URL + FileAgentConstants.GetDownloadUrl(routedItem); string dir = profile.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + Constants.Dirs.ToRules + Path.DirectorySeparatorChar + Guid.NewGuid(); Directory.CreateDirectory(dir); long fileSize = 0; HttpResponseMessage response = null; MultipartFileStreamProvider streamProvider = null; MultipartFileStreamProvider contents = null; var httpClient = _liteHttpClient.GetClient(connection); try { _logger.Log(LogLevel.Debug, $"{taskInfo} download dir will be {dir}"); _logger.Log(LogLevel.Debug, $"{taskInfo} url: {url} attempt: {routedItem.attempts}"); var cookies = _liteHttpClient.GetCookies(url); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(url, HttpCompletionOption.ResponseHeadersRead, _taskManager.cts.Token); try { response = await task.ConfigureAwait(false); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); //if(Logger.logger.FileTraceLevel == "Verbose") _logger.Log(LogLevel.Debug,$"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); switch (response.StatusCode) { case HttpStatusCode.OK: break; case HttpStatusCode.NotFound: routedItem.Error = HttpStatusCode.NotFound.ToString(); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true); return; case HttpStatusCode.Unauthorized: httpManager.loginNeeded = true; _liteHttpClient.DumpHttpClientDetails(); return; default: _liteHttpClient.DumpHttpClientDetails(); return; } if (!_util.IsDiskAvailable(dir, profile, routedItem.length)) { _logger.Log(LogLevel.Debug, $"{taskInfo} Insufficient disk to write {url} to {dir} guessing it could be 16GB"); return; } streamProvider = new MultipartFileStreamProvider(dir, 1024000); try { contents = await response.Content.ReadAsMultipartAsync(streamProvider, _taskManager.cts.Token).ConfigureAwait(false); } catch (Exception e) { //MIME is corrupt such as Unexpected end of MIME multipart stream. MIME multipart message is not complete. //This usually happens if the upload does not complete. Catch as "normal" and remove resource as if success //since retrying will not help this condition. _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); if (await _deleteEGSResourceService.DeleteEGSResource(taskID, routedItem, connection, httpManager).ConfigureAwait(false)) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: false); } else { routedItem.Error = "Unable to delete EGS resource"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true); } return; } int index = 0; _logger.Log(LogLevel.Debug, $"{taskInfo} Splitting {contents?.FileData.Count} files into RoutedItems."); foreach (var part in contents.FileData) { try { index++; fileSize += new FileInfo(part.LocalFileName).Length; _logger.Log(LogLevel.Debug, $"{taskInfo} downloaded file: {part.LocalFileName}"); RoutedItem ri = new RoutedItem(fromConnection: Connection.name, sourceFileName: part.LocalFileName, taskID: taskID, fileIndex: index, fileCount: contents.FileData.Count) { type = RoutedItem.Type.FILE }; _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing RoutedItem {routedItem.sourceFileName}"); _routedItemManager.Init(ri); _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules)); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } if (await _deleteEGSResourceService.DeleteEGSResource(taskID, routedItem, connection, httpManager).ConfigureAwait(false)) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: false); } else { routedItem.Error = "Unable to delete EGS resource"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true); } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (NullReferenceException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); //throw e; } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { if (response != null) { response.Dispose(); } _taskManager.Stop($"{Connection.name}.DownloadViaHttp"); } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); }
public async Task SendToRules(int taskID, ConnectionToRulesManagerAdapterArgs args, bool responsive = true) { var Connection = args.Connection; var toRules = args.toRules; var cache = args.cache; IConnectionRoutedCacheManager connectionRoutedCacheManager = args.connectionRoutedCacheManager; Throw.IfNull(Connection); var taskInfo = $"task: {taskID} connection: {Connection.name}"; // RoutedItem[] temp; _logger.Log(LogLevel.Debug, $"{taskInfo} Entering SendToRules."); try { do { RoutedItem routedItem = null; var count = 0; // bool newItems = false; // lock (toRules) // { // newItems = toRules.Any(e => e.attempts == 0); // } // if (count == 0 || !newItems) // { // bool success = await ToRulesSignal.WaitAsync(LITE.profile.kickoffInterval, LITETask.cts.Token).ConfigureAwait(false); // } // lock (toRules) // { // temp = toRules.ToArray(); // } //send everything in toRules // foreach (var routedItem in temp) // { // if (LITETask.cts.Token.IsCancellationRequested) // { routedItem = toRules.Take(_taskManager.cts.Token); //item removed from list prior to processing count = toRules.Count + 1; _logger.Log(LogLevel.Information, $"{taskInfo} toRules: {count} items to transfer."); // if (routedItem.lastAttempt != null && routedItem.lastAttempt < DateTime.Now.AddMinutes(-retryDelayMinutes)) //not attempted lately // { routedItem.attempts++; routedItem.lastAttempt = DateTime.Now; try { if (routedItem.attempts <= Connection.maxAttempts) { // shb 2019-03-15 BOUR-85 to support intelligent routing of items based on pre-determined routing info // EX: hl7 has the routing info while subsequent dicom items do not // Order of precedence // 0.5) No caching for request / response, done elsewhere // 1) No routing info - use connection - based rules // 2) Prior routing info for patientID and accession # - use provider routing rules // 3) Address - oriented routing in item - override all other rules if (routedItem.id != null && routedItem.type != RoutedItem.Type.RPC) { routedItem = _connectionCache.CacheResponse(Connection, routedItem, cache); //for a dicom/file item to look up prior toConnections of hl7 } var currentProfile = _profileStorage.Current; _rulesManager.Init(currentProfile.rules); routedItem = await _rulesManager.SendToRules(routedItem, _routedItemManager, connectionRoutedCacheManager); if (routedItem != null && routedItem.id != null && routedItem.type != RoutedItem.Type.RPC) { routedItem = _connectionCache.CacheResponse(Connection, routedItem, cache); //for an hl7 to record the toConnections } _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, toRules, nameof(toRules), error: false); } else { _logger.Log(LogLevel.Warning, $"{taskInfo} {routedItem.RoutedItemMetaFile} exceeded maxAttempts"); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, toRules, nameof(toRules), error: true); } } catch (Exception e) { _logger.LogFullException(e, $"{taskInfo} returning item to queue: {routedItem.RoutedItemMetaFile}"); if (routedItem != null) { toRules.Add(routedItem, _taskManager.cts.Token); } } // } // ToRulesSignal.Dispose(); // ToRulesSignal = new SemaphoreSlim(0, 1); // // } // } } while (responsive); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } finally { _taskManager.Stop($"{Connection.name}.SendToRules"); } _logger.Log(LogLevel.Debug, $"{taskInfo} Exiting SendToRules."); }
public async Task GetShareDestinations(int taskID, LifeImageCloudConnection Connection, IHttpManager httpManager) { Throw.IfNull(Connection); Throw.IfNull(httpManager); var taskInfo = $"task: {taskID} connection: {Connection.name}"; try { _logger.Log(LogLevel.Debug, $"{taskInfo} Processing getShareDestinations"); var httpClient = _liteHttpClient.GetClient(Connection); try { //set the URL //string shareURL = Connection.URL + "/api/box/v3/listAllPublishable"; string shareURL = Connection.URL + CloudAgentConstants.GetShareDestinationUrl; _logger.Log(LogLevel.Debug, $"{taskInfo} shareURL: {shareURL}"); var cookies = _liteHttpClient.GetCookies(shareURL); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(shareURL); var response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode != HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"{taskInfo} Problem getting share destinations. {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); } // convert from stream to JSON var serializer = new DataContractJsonSerializer(typeof(List <ShareDestinations>)); Connection.shareDestinations = serializer.ReadObject(await response.Content.ReadAsStreamAsync()) as List <ShareDestinations>; } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } } finally { _taskManager.Stop($"{Connection.name}.getShareDestinations"); } }
public async Task Download(int taskID, LITEConnection connection, IHttpManager httpManager, SemaphoreSlim FromEGSSignal) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var profile = _profileStorage.Current; DateTime lastGetResources = DateTime.MinValue; int GetResourcesInterval = 120000; int lastResourceCount = 0; try { do { var temp = Connection.fromEGS.ToList(); if (temp.Count == 0 || !temp.Any(e => e.attempts == 0)) { var getResourcesTime = DateTime.Now.AddMilliseconds(GetResourcesInterval * -1); if (lastGetResources.CompareTo(getResourcesTime) < 0 || lastResourceCount > 0) { lastResourceCount = await _getLiteReresourcesService.GetResources(taskID, connection, httpManager); lastGetResources = DateTime.Now; } bool success = await FromEGSSignal.WaitAsync(profile.KickOffInterval, _taskManager.cts.Token).ConfigureAwait(false); // FromEGSSignal.Dispose(); // FromEGSSignal = new SemaphoreSlim(0, 1); } else { await Task.Delay(profile.taskDelay).ConfigureAwait(false); } foreach (var routedItem in Connection.fromEGS.ToArray()) { await ProcessItem(taskID, routedItem, connection, httpManager); } Task.WaitAll(_taskManager.FindByType($"{Connection.name}.DownloadViaHttp")); } while (Connection.responsive); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (OperationCanceledException) { _logger.Log(LogLevel.Warning, $"Wait Operation Canceled. Exiting Download"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _logger.Log(LogLevel.Critical, $"{taskInfo} Exiting Download"); } finally { _taskManager.Stop($"{Connection.name}.Download"); } }
public async Task putHL7(RoutedItem routedItem, int taskID, LifeImageCloudConnection connection, IHttpManager httpManager) { var Connection = connection; var httpClient = _liteHttpClient.GetClient(connection); var taskInfo = $"task: {taskID} connection: {Connection.name}"; MultipartContent content = null; StreamContent streamContent = null; HttpResponseMessage response = null; try { if (!File.Exists(routedItem.sourceFileName)) { routedItem.Error = "File Not Found"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: true); return; } var stopWatch = new Stopwatch(); stopWatch.Start(); //set theConnection.URL http://localhost:8080/universal-inbox/api/agent/v1/hl7-upload //string putHL7URL = Connection.URL + "/api/agent/v1/hl7-upload?connectionName=" + routedItem.fromConnection; string putHL7URL = Connection.URL + CloudAgentConstants.GetPutHl7Url(routedItem.fromConnection); _logger.Log(LogLevel.Debug, $"{taskInfo} putHL7URL: {putHL7URL}"); //generate guid for boundary...boundaries cannot be accidentally found in the content var boundary = Guid.NewGuid(); _logger.Log(LogLevel.Debug, $"{taskInfo} boundary: {boundary}"); // create the content content = new MultipartContent("related", boundary.ToString()); //add the sharing headers List <string> shareHeader = new List <string>(); if (Connection.shareDestinations != null) { foreach (var connectionSet in routedItem.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareHeader.Add(shareDestination.boxUuid); } } } } content.Headers.Add("X-Li-Destination", shareHeader); // //var fileSize = routedItem.stream.Length; var fileSize = new FileInfo(routedItem.sourceFileName).Length; //var streamContent = new StreamContent(routedItem.stream); streamContent = new StreamContent(File.OpenRead(routedItem.sourceFileName)); streamContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { // FileName = filename FileName = routedItem.sourceFileName }; streamContent.Headers.ContentType = new MediaTypeHeaderValue("application/octet-stream"); //streamContent.Headers.Add("Content-Transfer-Encoding", "gzip"); content.Add(streamContent); // issue the POST Task <HttpResponseMessage> task; var cookies = _liteHttpClient.GetCookies(putHL7URL); _logger.LogCookies(cookies, taskInfo); if (routedItem.Compress == true) { task = httpClient.PostAsync(putHL7URL, new CompressedContent(content, "gzip"), _taskManager.cts.Token); } else { task = httpClient.PostAsync(putHL7URL, content, _taskManager.cts.Token); } response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); // convert from stream to JSON //var serializer = new DataContractJsonSerializer(typeof(LoginJSON)); //var loginJSON = serializer.ReadObject(await response.Content.ReadAsStreamAsync()) as LoginJSON; stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); //dequeue the work, we're done! if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud)); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { _taskManager.Stop($"{Connection.name}.putHL7"); if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task <bool> StoreScp(int taskID, DcmtkConnection connection) { Connection = connection; var stopWatch = new Stopwatch(); stopWatch.Start(); var proc = new Process(); var procinfo = new ProcessStartInfo(); var taskInfo = $"task: {taskID} connection: {Connection.name}"; var profile = _profileStorage.Current; try { var dir = profile.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + "toScanner"; Directory.CreateDirectory(dir); string args = $"-xf \"{Connection.storescpCfgFile}\" Default -od \"{dir}\" -aet {Connection.localAETitle} {Connection.localPort}"; procinfo.UseShellExecute = false; procinfo.RedirectStandardError = true; procinfo.RedirectStandardOutput = true; procinfo.CreateNoWindow = true; if (profile.dcmtkLibPath != null) { procinfo.WorkingDirectory = profile.dcmtkLibPath; procinfo.FileName = profile.dcmtkLibPath + Path.DirectorySeparatorChar + "bin" + Path.DirectorySeparatorChar + "storescp"; var DCMDICTPATH = profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "dicom.dic"; DCMDICTPATH += _util.EnvSeparatorChar() + profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "acrnema.dic"; DCMDICTPATH += _util.EnvSeparatorChar() + profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "diconde.dic"; DCMDICTPATH += _util.EnvSeparatorChar() + profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "private.dic"; procinfo.Environment.Add("DCMDICTPATH", DCMDICTPATH); } else { procinfo.FileName = "storescp"; } procinfo.Arguments = args; proc.StartInfo = procinfo; proc.OutputDataReceived += OutputHandler; proc.ErrorDataReceived += ErrorHandler; proc.EnableRaisingEvents = true; proc.Exited += OnProcExit; _logger.Log(LogLevel.Information, $"{taskInfo} starting {procinfo.FileName} {procinfo.Arguments}"); if (proc.Start()) { proc.BeginOutputReadLine(); proc.BeginErrorReadLine(); _logger.Log(LogLevel.Information, $"{taskInfo} {procinfo.FileName} is listening on {procinfo.Arguments}..."); while (!proc.HasExited) { await Task.Delay(10000, _taskManager.cts.Token).ConfigureAwait(false); if (_taskManager.cts.IsCancellationRequested) { //proc.Kill(); _logger.Log(LogLevel.Debug, $"{taskInfo} {procinfo.FileName} is not killed due to potential crash.Trace AMG1"); } } if (proc.ExitCode != 0) { _logger.Log(LogLevel.Warning, $"{taskInfo} {procinfo.FileName} ExitCode: {proc.ExitCode}"); _logger.Log(LogLevel.Debug, $"{taskInfo} {procinfo.FileName} trace AMG2"); return(false); } _logger.Log(LogLevel.Information, $"{taskInfo} {procinfo.FileName} status: {proc.ExitCode} elapsed: {stopWatch.Elapsed}"); return(true); } return(false); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); return(false); } catch (Exception e) { _logger.LogFullException(e); return(false); } finally { try { if (!proc.HasExited) { proc.Kill(); } } catch (Exception) { //eat it } _taskManager.Stop($"{Connection.name}.storescp"); } }
public async Task <bool> EchoSCU(int taskID, DcmtkConnection connection) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var stopWatch = new Stopwatch(); stopWatch.Start(); var proc = new Process(); var procinfo = new ProcessStartInfo(); var profile = _profileStorage.Current; try { string args = $"{Connection.remoteHostname} {Connection.remotePort} -aec {Connection.remoteAETitle} -aet {Connection.localAETitle}"; procinfo.UseShellExecute = false; procinfo.RedirectStandardError = true; procinfo.RedirectStandardOutput = true; procinfo.CreateNoWindow = true; if (profile.dcmtkLibPath != null) { procinfo.FileName = profile.dcmtkLibPath + Path.DirectorySeparatorChar + "bin" + Path.DirectorySeparatorChar + "echoscu"; var DCMDICTPATH = profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "dicom.dic"; DCMDICTPATH += _util.EnvSeparatorChar() + profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "acrnema.dic"; DCMDICTPATH += _util.EnvSeparatorChar() + profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "diconde.dic"; DCMDICTPATH += _util.EnvSeparatorChar() + profile.dcmtkLibPath + Path.DirectorySeparatorChar + Constants.Dirs.share + Path.DirectorySeparatorChar + Constants.Dirs.dcmtk + Path.DirectorySeparatorChar + "private.dic"; procinfo.Environment.Add("DCMDICTPATH", DCMDICTPATH); } else { procinfo.FileName = "echoscu"; } procinfo.Arguments = args; proc.StartInfo = procinfo; proc.OutputDataReceived += OutputHandler; proc.ErrorDataReceived += ErrorHandler; proc.EnableRaisingEvents = true; proc.Exited += OnProcExit; _logger.Log(LogLevel.Information, $"{taskInfo} starting {procinfo.FileName} {procinfo.Arguments}"); proc.Start(); proc.BeginOutputReadLine(); proc.BeginErrorReadLine(); //proc.WaitForExit(); while (!proc.HasExited) { _logger.Log(LogLevel.Information, $"{taskInfo} echoscu is running..."); await Task.Delay(1000, _taskManager.cts.Token).ConfigureAwait(false); } if (proc.ExitCode != 0) { _logger.Log(LogLevel.Warning, $"{taskInfo} {procinfo.FileName} ExitCode: {proc.ExitCode}"); return(false); } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (Exception e) { _logger.Log(LogLevel.Critical, $"{e.Message} {e.StackTrace}"); return(false); } finally { _taskManager.Stop($"{Connection.name}.echoscu"); } _logger.Log(LogLevel.Information, $"{taskInfo} {procinfo.FileName} status: {proc.ExitCode.ToString()} elapsed: {stopWatch.Elapsed}"); return(true); }
public async Task DownloadStudy(int taskID, ImagingStudy imagingStudy, LifeImageCloudConnection connection, IHttpManager httpManager) { var Connection = connection; var stopWatch = new Stopwatch(); var taskInfo = $"task: {taskID} connection: {Connection.name}"; try { stopWatch.Start(); _logger.Log(LogLevel.Debug, $"{taskInfo} downloading study: {imagingStudy.uid} downloadStarted: {imagingStudy.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} downloadCompleted: {imagingStudy.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} attempts: {imagingStudy.attempts}"); foreach (var series in imagingStudy?.series) { _logger.Log(LogLevel.Debug, $"{taskInfo} checking series: {series.uid} downloadStarted: {series.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} downloadCompleted: {series.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} attempts: {series.attempts}"); if (series.downloadCompleted == DateTime.MinValue) //not completed { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} not completed."); if (series.downloadStarted < DateTime.Now.AddMinutes(-Connection.retryDelayMinutes)) //not attempted lately { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} not attempted lately."); if (imagingStudy.series?.FindAll(e => e.attempts > Connection.maxAttempts).Count == 0) //not exceeded max attempts { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} not exceeded max attempts."); var url = $"{imagingStudy.url}/series/{series.uid.Substring(8)}"; //if the tasklist already contains this series don't add it again //equal is determined by the reference field only //so in this case it is the imagingStudy.url if (await _taskManager.CountByReference(url) == 0) { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} not in task list."); _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} selected for download downloadStarted: {series.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} downloadCompleted: {series.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} attempts: {series.attempts}"); series.downloadStarted = DateTime.Now; series.attempts++; var newTaskID = _taskManager.NewTaskID(); Task task = new Task(new Action(async() => await _studiesDownloadManager.wadoAsFileStream(connection: connection, newTaskID, httpManager: httpManager, study: imagingStudy, series: series)), _taskManager.cts.Token); await _taskManager.Start(newTaskID, task, $"{Connection.name}.Wado", url, isLongRunning : false); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} in task list. Skipping."); } } else { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} exceeded max attempts. Skipping."); } } else { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} attempted lately. Skipping."); } } else { _logger.Log(LogLevel.Debug, $"{taskInfo} series: {series.uid} completed. Skipping."); } } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} method level elapsed: {stopWatch.Elapsed} study: {imagingStudy.uid}"); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } finally { _taskManager.Stop($"{Connection.name}.downloadStudy"); } }
/// <summary> /// Gets the cloud requests. /// </summary> /// <param name="taskID"></param> /// <param name="Connection"></param> /// <param name="cache"></param> /// <param name="httpManager"></param> /// <returns></returns> public async Task GetRequests(int taskID, LifeImageCloudConnection Connection, IConnectionRoutedCacheManager cache, IHttpManager httpManager) { var taskInfo = $"task: {taskID} connection: {Connection.name}"; HttpResponseMessage response = null; var httpClient = _liteHttpClient.GetClient(Connection); try { while (!_taskManager.cts.IsCancellationRequested) { await Task.Delay(_profileStorage.Current.KickOffInterval, _taskManager.cts.Token); //BOUR-1022 shb I think the code that checks for dictionary entry before enqueuing is enough to prevent duplicate behavior //skip if response cache is not empty if (LifeImageCloudConnectionManager.cache.Count > 0) { int requestCount = 0; //check to see if there are any requests foreach (var cacheItem in LifeImageCloudConnectionManager.cache.ToArray()) { _logger.Log(LogLevel.Debug, $"{taskInfo} Cache entry id: {cacheItem.Key}"); foreach (var item in cacheItem.Value.ToArray()) { _logger.Log(LogLevel.Debug, $"{taskInfo} fromConnection: {item.fromConnection} id: {item.id} started: {item.startTime} complete: {item.resultsTime} status: {item.status}"); if (item.type == RoutedItem.Type.RPC) { requestCount++; } } } if (requestCount > 0) { //BOUR-1060 relax condition and rely on dictionary logic below _logger.Log(LogLevel.Warning, $"{taskInfo} response cache has {requestCount} request items."); // _logger.Log(LogLevel.Warning, $"{taskInfo} response cache has {requestCount} request items, skipping getting new requests until clear"); // return; } } //set the URL //string agentTasksURL = Connection.URL + "/api/agent/v1/agent-tasks"; string agentTasksURL = Connection.URL + CloudAgentConstants.GetAgentTasksUrl; _logger.Log(LogLevel.Debug, $"{taskInfo} agentTasksURL: {agentTasksURL}"); var cookies = _liteHttpClient.GetCookies(agentTasksURL); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(agentTasksURL); response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"{taskInfo} Problem getting agent tasks. {agentTasksURL} {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); } // convert from stream to JSON string results = await response.Content.ReadAsStringAsync(); var objResults = JsonSerializer.Deserialize <Dictionary <string, List <Dictionary <string, string> > > >(results); foreach (var key in objResults) { _logger.Log(LogLevel.Debug, $"{taskInfo} key: {key.Key}"); var list = key.Value; foreach (var item in list) { foreach (var subkey in item) { _logger.Log(LogLevel.Debug, $"{taskInfo} subkey.Key: {subkey.Key} subKey.Value: {subkey.Value} "); } } } if (objResults.Count > 0) { // 2018-09-19 shb unwrap encoded task data and then send to rules var agentRequestList = objResults["modelMapList"]; foreach (var agentRequest in agentRequestList) { byte[] data = Convert.FromBase64String(agentRequest["task"]); string agentRequestAsString = Encoding.UTF8.GetString(data); agentRequest["task"] = agentRequestAsString; string id = agentRequest["id"]; string request = agentRequest["task"]; string requestType = agentRequest["task_type"]; string connection = null; agentRequest.TryGetValue("connection", out connection); RoutedItem ri = new RoutedItem(fromConnection: Connection.name, id: id, request: request, requestType: requestType) { type = RoutedItem.Type.RPC, status = RoutedItem.Status.PENDING, startTime = DateTime.Now, TaskID = taskID }; if (connection != null && connection != "*") { ConnectionSet connSet = new ConnectionSet { connectionName = connection }; ri.toConnections.Add(connSet); } LifeImageCloudConnectionManager.cache.TryGetValue(ri.id, out List <RoutedItem> cacheItem); if (cacheItem == null) { //determine which connections will need to reply and prime the response cache _rulesManager.Init(_profileStorage.Current.rules); //var connsets = _profileStorage.Current.rules.Eval(ri); var connsets = _rulesManager.Eval(ri); foreach (var connset in connsets) { _routedItemManager.Init(ri); var prime = (RoutedItem)_routedItemManager.Clone(); prime.startTime = DateTime.Now; //clock starts ticking now prime.status = RoutedItem.Status.PENDING; prime.fromConnection = _profileStorage.Current.connections.Find(e => e.name == connset.connectionName).name; _logger.Log(LogLevel.Debug, $"{taskInfo} Priming Response cache id: {id} conn: {prime.fromConnection} "); cache.Route(prime); } //enqueue the request _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing id: {id} requestType: {requestType} subKey.Value: {request} "); _routedItemManager.Init(ri); _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules)); //BOUR-995 let cloud know we got the request with a status of PENDING await _postResponseCloudService.PostResponse(Connection, ri, cache, httpManager, taskID); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} Exists id: {id} requestType: {requestType} subKey.Value: {request} "); foreach (var item in cacheItem) { _logger.Log(LogLevel.Debug, $"{taskInfo} fromConnection: {item.fromConnection} started: {item.startTime} complete: {item.resultsTime} status: {item.status}"); foreach (var ctr in item.cloudTaskResults) { foreach (var result in ctr.results) { _logger.Log(LogLevel.Debug, $"{taskInfo} Exists id: {item.id} results: {result}"); } } } } } } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (System.InvalidOperationException) //for the Collection was Modified, we can wait { _logger.Log(LogLevel.Information, $"{taskInfo} Waiting for requests to complete before getting new requests"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } finally { try { _taskManager.Stop($"{Connection.name}.GetRequests"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }