public async Task RegisterWithEGS(int taskID, LITEConnection connection, IHttpManager httpManager) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var httpClient = _liteHttpClient.GetClient(connection); try { //contact each EGS and register presence with LITEServicePoint and share dest info which will be used for address oriented routing. IPHostEntry hostEntry = Dns.GetHostEntry(Connection.remoteHostname); //look up on known dns the other LITE EGS instances and register presence foreach (var iPAddress in hostEntry.AddressList) { //set the URL //string url = Connection.URL + "/api/LITE"; string url = Connection.URL + LiteAgentConstants.BaseUrl; _logger.Log(LogLevel.Debug, $"{taskInfo} url: {url}"); // issue the POST HttpResponseMessage response = null; var cookies = _liteHttpClient.GetCookies(url); _logger.LogCookies(cookies, taskInfo); var profile = _profileStorage.Current; string profilejson = JsonSerializer.Serialize(profile); var stream = new MemoryStream(); var writer = new StreamWriter(stream); writer.Write(profilejson); writer.Flush(); stream.Position = 0; StreamContent content = new StreamContent(stream); var task = httpClient.PostAsync(url, content, _taskManager.cts.Token); response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode == HttpStatusCode.Created) { connection.loginAttempts = 0; _logger.Log(LogLevel.Debug, $"{taskInfo} LITE Successfully Registered with EGS!"); httpManager.loginNeeded = false; } else { _liteHttpClient.DumpHttpClientDetails(); httpManager.loginNeeded = true; if (response.StatusCode == HttpStatusCode.Unauthorized && connection.loginAttempts == Connection.maxAttempts) { LiteEngine.shutdown(null, null); Environment.Exit(0); } } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} HttpRequestException: Unable to login: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} HttpRequestException: (Inner Exception) {e.InnerException.Message} {e.InnerException.StackTrace}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, $"{taskInfo} Unable to login"); _liteHttpClient.DumpHttpClientDetails(); } }
public async Task <int> GetResources(int taskID, LITEConnection connection, IHttpManager manager) { Connection = connection; //get the resource list from EGS try { var httpClient = _liteHttpClient.GetClient(connection); var taskInfo = $"task: {taskID} connection: {Connection.name}"; foreach (var shareDestination in Connection.boxes) { //set the URL string url = Connection.URL + "/api/File/" + shareDestination.boxUuid; //add summary _logger.Log(LogLevel.Debug, $"{taskInfo} URL: {url}"); var cookies = _liteHttpClient.GetCookies(url); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(url); var response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode != HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { manager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {response.ReasonPhrase}"); _liteHttpClient.DumpHttpClientDetails(); } //2018-02-06 shb convert from stream to JSON and clean up any non UTF-8 that appears like it did // when receiving "contains invalid UTF8 bytes" exception // var serializer = new DataContractJsonSerializer(typeof(Files)); // var streamReader = new StreamReader(await response.Content.ReadAsStreamAsync(), Encoding.UTF8); // byte[] byteArray = Encoding.UTF8.GetBytes(streamReader.ReadToEnd()); // MemoryStream stream = new MemoryStream(byteArray); // var newResources = serializer.ReadObject(stream) as Files; //List<EGSFileInfo> var newResources = JsonSerializer.Deserialize <FilesModel>(await response.Content.ReadAsStringAsync()); if (newResources != null && newResources.files.Count > 0) { lock (Connection.fromEGS) { //take the new studies from cloud and merge with existing foreach (var ri in newResources.files) { if (!Connection.fromEGS.Any(e => e.resource == ri.resource)) { _logger.Log(LogLevel.Information, $"Adding {ri.resource}"); _routedItemManager.Init(ri); _routedItemManager.Enqueue(connection, connection.fromEGS, nameof(connection.fromEGS), copy: false); } else { _logger.Log(LogLevel.Error, $"Resource already exists: {ri.resource}"); } } } return(newResources.files.Count); } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (System.Runtime.Serialization.SerializationException e) { //eat it for now _logger.Log(LogLevel.Warning, $"{e.Message} {e.StackTrace}"); } catch (System.Net.Http.HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } } catch (Exception e) { _logger.LogFullException(e); //throw e; } return(0); }
public async Task <Profile> GetAgentConfigurationFromCloud(LifeImageCloudConnection conn, string rowVersion, bool _overrideVersionAndModifiedDate) { var taskInfo = $"{conn.name}:"; string json = ""; var httpClient = _liteHttpClient.GetClient(conn); try { //set the URL string profileURL = conn.URL; if (rowVersion == null | _overrideVersionAndModifiedDate == true) { profileURL += CloudAgentConstants.AgentConfigurationUrl; } else { profileURL += $"{CloudAgentConstants.AgentConfigurationUrl}?version={rowVersion}"; } _logger.Log(LogLevel.Debug, $"{taskInfo} getProfileURL: {profileURL}"); var cookies = _liteHttpClient.GetCookies(profileURL); _logger.LogCookies(cookies, taskInfo); // issue the GET HttpResponseMessage httpResponse = await httpClient.GetAsync(profileURL); if (httpResponse.StatusCode == HttpStatusCode.NotModified) { return(null); } string response = await httpResponse.Content.ReadAsStringAsync(); _logger.Log(LogLevel.Debug, $"{taskInfo} response size: {response.Length}"); if (httpResponse.StatusCode == HttpStatusCode.OK && response != null && response.Length > 0) { // Cloud returns results in a map "configFile" -> value Dictionary <string, string> map = JsonHelper.DeserializeFromMap(response); map.TryGetValue("configFile", out string json64); // Convert back from base 64 (needed because json was getting munged) byte[] jsonBytes = Convert.FromBase64String(json64); json = System.Text.Encoding.Default.GetString(jsonBytes); _logger.Log(LogLevel.Debug, $"{taskInfo} Profile successfully downloaded from cloud."); _logger.Log(LogLevel.Debug, $"{taskInfo} Raw JSON: \n {json}"); map.TryGetValue("version", out rowVersion); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} No profile update available from cloud."); } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, $"{taskInfo} {e.Message}"); _liteHttpClient.DumpHttpClientDetails(); //throw e; //solves a perpetual state of unauthorized, now solved by inspection of response code in other liCloud calls } if (json == "") { return(null); } return(_jsonHelper.DeserializeObject(json)); }
public async Task stowAsMultiPart(List <RoutedItem> batch, int taskID, LifeImageCloudConnection Connection, IHttpManager httpManager) { Throw.IfNull(Connection); Throw.IfNull(httpManager); var taskInfo = $"task: {taskID} connection: {Connection.name}"; StreamContent streamContent = null; MultipartContent content = null; HttpResponseMessage response = null; string testFile = null; var firstRecord = batch.First(); var httpClient = _liteHttpClient.GetClient(Connection); try { var stopWatch = new Stopwatch(); stopWatch.Start(); //set the URL //string stowURL = Connection.URL + "/api/agent/v1/stow/studies"; string stowURL = Connection.URL + CloudAgentConstants.StowStudies; _logger.Log(LogLevel.Debug, $"{taskInfo} stowURL: {stowURL}"); // generate guid for boundary...boundaries cannot be accidentally found in the content var boundary = Guid.NewGuid(); _logger.Log(LogLevel.Debug, $"{taskInfo} boundary: {boundary}"); // create the content content = new MultipartContent("related", boundary.ToString()); //add the sharing headers List <string> shareHeader = new List <string>(); if (Connection.shareDestinations != null) { foreach (var connectionSet in firstRecord.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareHeader.Add(shareDestination.boxUuid); _logger.Log(LogLevel.Debug, $"{taskInfo} sharing to: {shareDestination.boxId} {shareDestination.boxName} {shareDestination.groupId} {shareDestination.groupName} {shareDestination.organizationName} {shareDestination.publishableBoxType}"); } } } } content.Headers.Add("X-Li-Destination", shareHeader); long fileSize = 0; var dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + Constants.Dirs.ToCloud; Directory.CreateDirectory(dir); testFile = dir + Path.DirectorySeparatorChar + Guid.NewGuid() + ".gz"; using (FileStream compressedFileStream = File.Create(testFile)) { using GZipStream compressionStream = new GZipStream(compressedFileStream, CompressionMode.Compress); foreach (var routedItem in batch) { if (File.Exists(routedItem.sourceFileName)) { routedItem.stream = File.OpenRead(routedItem.sourceFileName); if (Connection.CalcCompressionStats) { routedItem.stream.CopyTo(compressionStream); } fileSize += routedItem.length; streamContent = new StreamContent(routedItem.stream); streamContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { FileName = routedItem.sourceFileName }; content.Add(streamContent); //shb 2017-11-01 streamcontent (uncompressed) is inside content (can be compressed below) server is complaining so will comment out //streamContent.Headers.Add("Content-Transfer-Encoding", "gzip"); //shb 2017-11-10 shb added content-type header to solve //controller.DicomRSControllerBase: Content-Encoding header has value null !!! //controller.StowRSController: Unable to process part 1 no content-type parameter received streamContent.Headers.Add("content-type", "application/dicom"); } else { _logger.Log(LogLevel.Error, $"{taskInfo} {routedItem.sourceFileName} no longer exists. Increase tempFileRetentionHours for heavy transfer backlogs that may take hours!!"); } } } if (Connection.CalcCompressionStats) { FileInfo info = new FileInfo(testFile); _logger.Log(LogLevel.Information, $"{taskInfo} orgSize: {fileSize} compressedSize: {info.Length} reduction: {(fileSize == 0 ? 0 : (fileSize * 1.0 - info.Length) / (fileSize) * 100)}%"); } // issue the POST Task <HttpResponseMessage> task; if (firstRecord.Compress == true) { var compressedContent = new CompressedContent(content, "gzip"); _logger.Log(LogLevel.Debug, $"{taskInfo} compressedContent.Headers {compressedContent.Headers} "); var cookies = _liteHttpClient.GetCookies(stowURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(stowURL, compressedContent, _taskManager.cts.Token); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} will send content.Headers {content.Headers}"); var cookies = _liteHttpClient.GetCookies(stowURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(stowURL, content, _taskManager.cts.Token); } response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); if (!(response.StatusCode == HttpStatusCode.OK || response.StatusCode == HttpStatusCode.Accepted)) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"stow of {firstRecord.sourceFileName} and others in batch failed with {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); } else { //dequeue the work, we're done! if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } foreach (var ri in batch) { _routedItemManager.Init(ri); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud)); } } //delete the compression test file File.Delete(testFile); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (FileNotFoundException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (IOException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } File.Delete(testFile); _taskManager.Stop($"{Connection.name}.Stow"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task PostCompletion(LifeImageCloudConnection Connection, RoutedItem routedItem, IConnectionRoutedCacheManager cacheManager, IHttpManager httpManager, long taskID) { Throw.IfNull(Connection); Throw.IfNull(routedItem); Throw.IfNull(cacheManager); Throw.IfNull(httpManager); var stopWatch = new Stopwatch(); stopWatch.Start(); var taskInfo = $"task: {taskID} connection: {Connection.name} id: {routedItem.id} "; HttpResponseMessage response = null; var httpClient = _liteHttpClient.GetClient(Connection); try { if (routedItem.Study == null || routedItem.Study == "") { _logger.Log(LogLevel.Warning, $"{taskInfo} meta: {routedItem.RoutedItemMetaFile} cannot close routedItem.Study: {routedItem.Study} because null or blank."); cacheManager.RemoveCachedItem(routedItem); return; } //POST /api/agent/v1/study/{studyInstanceUid}/upload-close //string studyCloseURL = Connection.URL + $"/api/agent/v1/study/{routedItem.Study}/upload-close"; string studyCloseURL = Connection.URL + CloudAgentConstants.GetUploadCloseUrl(routedItem.Study); _logger.Log(LogLevel.Debug, $"{taskInfo} studyCloseURL: {studyCloseURL}"); var metadata = ""; try { metadata = _cloudConnectionCacheAccessor.GetCachedItemMetaData(Connection, routedItem, taskID); } catch (Exception e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Unable to produce metadata for {routedItem.id} {routedItem.RoutedItemMetaFile}: {e.Message} {e.StackTrace}"); } using (HttpContent httpContent = new StringContent(metadata)) { var cookies = _liteHttpClient.GetCookies(studyCloseURL); _logger.LogCookies(cookies, taskInfo); response = await httpClient.PostAsync(studyCloseURL, httpContent, _taskManager.cts.Token); // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } //BOUR-995 we don't want to dequeue unless completed or failed if (response.StatusCode == HttpStatusCode.OK) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: false); cacheManager.RemoveCachedItem(routedItem); } //BOUR-995 we don't want to dequeue unless completed or failed if ((response.StatusCode == HttpStatusCode.InternalServerError) || response.StatusCode == HttpStatusCode.BadRequest) { _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {response.ReasonPhrase}. Dequeuing to error folder"); _liteHttpClient.DumpHttpClientDetails(); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: true); cacheManager.RemoveCachedItem(routedItem); } } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed}"); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); cacheManager.RemoveCachedItem(routedItem); } finally { try { _taskManager.Stop($"{Connection.name}.PostCompletion"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task GetShareDestinations(int taskID, LifeImageCloudConnection Connection, IHttpManager httpManager) { Throw.IfNull(Connection); Throw.IfNull(httpManager); var taskInfo = $"task: {taskID} connection: {Connection.name}"; try { _logger.Log(LogLevel.Debug, $"{taskInfo} Processing getShareDestinations"); var httpClient = _liteHttpClient.GetClient(Connection); try { //set the URL //string shareURL = Connection.URL + "/api/box/v3/listAllPublishable"; string shareURL = Connection.URL + CloudAgentConstants.GetShareDestinationUrl; _logger.Log(LogLevel.Debug, $"{taskInfo} shareURL: {shareURL}"); var cookies = _liteHttpClient.GetCookies(shareURL); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(shareURL); var response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode != HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"{taskInfo} Problem getting share destinations. {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); } // convert from stream to JSON var serializer = new DataContractJsonSerializer(typeof(List <ShareDestinations>)); Connection.shareDestinations = serializer.ReadObject(await response.Content.ReadAsStreamAsync()) as List <ShareDestinations>; } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } } finally { _taskManager.Stop($"{Connection.name}.getShareDestinations"); } }
public async Task PostResponse(LifeImageCloudConnection Connection, RoutedItem routedItem, IConnectionRoutedCacheManager cacheManager, IHttpManager httpManager, long taskID) { var stopWatch = new Stopwatch(); stopWatch.Start(); var taskInfo = $"task: {taskID} connection: {Connection.name} id: {routedItem.id} "; { _logger.Log(LogLevel.Debug, $"{taskInfo} request: {routedItem.request}"); foreach (var results in routedItem.response) { _logger.Log(LogLevel.Debug, $"{taskInfo} response: {results}"); } } HttpResponseMessage response = null; try { string json = JsonSerializer.Serialize(routedItem.cloudTaskResults); _logger.Log(LogLevel.Debug, $"{taskInfo} posting {json}"); string base64Results = Convert.ToBase64String(Encoding.ASCII.GetBytes(json)); //string agentTasksURL = Connection.URL + $"/api/agent/v1/agent-task-results/{routedItem.id}"; string agentTasksURL = Connection.URL + CloudAgentConstants.GetAgentTaskResultUrl(routedItem.id); //optional status="NEW", "PENDING", "COMPLETED", "FAILED" agentTasksURL += $"?status={routedItem.status}"; _logger.Log(LogLevel.Debug, $"{taskInfo} agentTasksURL: {agentTasksURL}"); var httpClient = _liteHttpClient.GetClient(Connection); using (HttpContent httpContent = new StringContent(base64Results)) { var cookies = _liteHttpClient.GetCookies(agentTasksURL); _logger.LogCookies(cookies, taskInfo); response = await httpClient.PostAsync(agentTasksURL, httpContent, _taskManager.cts.Token); // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } //BOUR-995 we don't want to dequeue unless completed or failed if (response.StatusCode == HttpStatusCode.OK && (routedItem.status == RoutedItem.Status.COMPLETED || routedItem.status == RoutedItem.Status.FAILED)) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: false); cacheManager.RemoveCachedItem(routedItem); } //BOUR-995 we don't want to dequeue unless completed or failed if ((response.StatusCode == HttpStatusCode.InternalServerError || response.StatusCode == HttpStatusCode.BadRequest) && (routedItem.status == RoutedItem.Status.COMPLETED || routedItem.status == RoutedItem.Status.FAILED)) { _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {response.ReasonPhrase}. Dequeuing to error folder"); _liteHttpClient.DumpHttpClientDetails(); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: true); cacheManager.RemoveCachedItem(routedItem); } } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed}"); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { _taskManager.Stop($"{Connection.name}.PostResponse"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task <string> login(int taskID, LifeImageCloudConnection connection, IHttpManager _manager) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var httpClient = _liteHttpClient.GetClient(connection); try { Connection.loginAttempts++; //set the URL string loginURL = Connection.URL + "/login/authenticate"; _logger.Log(LogLevel.Debug, $"{taskInfo} loginURL: {loginURL}"); //set the form parameters var loginParams = new FormUrlEncodedContent(new[] { new KeyValuePair <string, string>("j_username", Connection.username), new KeyValuePair <string, string>("j_password", GetUnprotectedPassword(connection)), new KeyValuePair <string, string>("OrganizationCode", Connection.organizationCode), new KeyValuePair <string, string>("ServiceName", Connection.serviceName), new KeyValuePair <string, string>("applTenantId", Connection.tenantID) }); // issue the POST HttpResponseMessage response = null; try { var cookies = _liteHttpClient.GetCookies(loginURL); _logger.LogCookies(cookies, taskInfo); var task = httpClient.PostAsync(loginURL, loginParams, _taskManager.cts.Token); //ServicePointManager.FindServicePoint(new Uri(loginURL)).ConnectionLeaseTimeout = 0; //(int)TimeSpan.FromMinutes(1).TotalMilliseconds; response = await task; } catch (System.Threading.Tasks.TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); return(null); } catch (HttpRequestException e) { _logger.LogFullException(e, $"{taskInfo} HttpRequestException: Unable to login:"******"{taskInfo} HttpRequestException: Unable to login:"******"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode == HttpStatusCode.OK) { Connection.loginAttempts = 0; _logger.Log(LogLevel.Debug, $"{taskInfo} User Successfully logged in!"); _manager.loginNeeded = false; } else { _liteHttpClient.DumpHttpClientDetails(); _manager.loginNeeded = true; if (response.StatusCode == HttpStatusCode.Unauthorized && Connection.loginAttempts == Connection.maxAttempts) { Console.WriteLine("Exceeded max login attempts. Shutting down"); LiteEngine.shutdown(this, null); Environment.Exit(0); } } // grab the X-Li-Synctoken if (response.Headers.TryGetValues("X-Li-Synctoken", out IEnumerable <string> syncTokens)) { foreach (var token in syncTokens) { Connection.syncToken = token; httpClient.DefaultRequestHeaders.Remove("X-Li-Synctoken"); //in case we have to login again without recreating httpClient httpClient.DefaultRequestHeaders.Add("X-Li-Synctoken", Connection.syncToken); break; } } // get the session cookie var newcookies = _liteHttpClient.GetCookies(loginURL); foreach (var cookie in newcookies) { var cookiestr = cookie.ToString(); if (cookiestr.Contains("JSESSIONID")) { _logger.Log(LogLevel.Debug, $"{taskInfo} Cookie: {cookiestr}"); _manager.jSessionID = cookiestr; } } _logger.Log(LogLevel.Debug, $"{taskInfo} Login successful: {_manager.jSessionID}"); // convert from stream to JSON //BUG JSON is invalid as of 4/11/2016 //var serializer = new DataContractJsonSerializer(typeof(LoginJSON)); //var loginJSON = serializer.ReadObject(await response.Content.ReadAsStreamAsync()) as LoginJSON; return(response.StatusCode.ToString()); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); return(null); } catch (Exception e) { _logger.LogFullException(e, taskInfo); return(null); } }
public async Task PutConfigurationToCloud(Profile profile, LifeImageCloudConnection conn) { var taskInfo = $"PutProfile"; if (conn == null) { //this can be called early in startup during replacement strategy await Task.CompletedTask; return; } var httpClient = _liteHttpClient.GetClient(conn); try { //set the URL //string profileURL = conn.URL + $"/api/agent/v1/agent-configuration?version={Profile.rowVersion}"; string profileURL = conn.URL + CloudAgentConstants.AgentConfigurationUrl + $"version={Profile.rowVersion}"; _logger.Log(LogLevel.Debug, $"{taskInfo} putProfileURL: {profileURL}"); // validate and put any errors in the profile so it can be returned to the server profile.errors = _profileValidator.FullValidate(profile, profile.ToString()); string json = profile.ToString(); byte[] toBytes = Encoding.ASCII.GetBytes(json); string json64 = Convert.ToBase64String(toBytes); // string json64 = Convert.ToBase64String(json); using HttpContent httpContent = new StringContent(json64); var cookies = _liteHttpClient.GetCookies(profileURL); _logger.LogCookies(cookies, taskInfo); HttpResponseMessage response = httpClient.PutAsync(profileURL, httpContent).Result; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.OK) { string resp = await response.Content.ReadAsStringAsync(); _logger.Log(LogLevel.Debug, $"{taskInfo} Profile successfully uploaded to cloud."); // Read out server version in case null was passed in, essentially allow the agent to write once // without a version (just in case its needed say to load a local version to the server) then the // version needs to be respected. In practice profile should be read first but that's not currently required. Dictionary <string, string> map = JsonHelper.DeserializeFromMap(resp); map.TryGetValue("version", out Profile.rowVersion); _logger.Log(LogLevel.Debug, $"{taskInfo} Profile version from server: {Profile.rowVersion}."); } else { _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); } } catch (System.Runtime.Serialization.SerializationException e) { _logger.LogFullException(e, taskInfo); } catch (InvalidOperationException e) { if (e.InnerException != null && e.InnerException.Message != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Unable to upload profile to cloud because profile is being iterated. Will try again: {e.Message} {e.StackTrace} Inner Exception: {e.InnerException.Message}"); if (e.InnerException.StackTrace != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} e.InnerException.StackTrace: {e.InnerException.StackTrace}"); } } else { _logger.Log(LogLevel.Warning, $"{taskInfo} Unable to upload profile to cloud because profile is being iterated. Will try again: {e.Message} {e.StackTrace}"); } } catch (System.Threading.Tasks.TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e); _liteHttpClient.DumpHttpClientDetails(); } }
/// <summary> /// Gets the cloud requests. /// </summary> /// <param name="taskID"></param> /// <param name="Connection"></param> /// <param name="cache"></param> /// <param name="httpManager"></param> /// <returns></returns> public async Task GetRequests(int taskID, LifeImageCloudConnection Connection, IConnectionRoutedCacheManager cache, IHttpManager httpManager) { var taskInfo = $"task: {taskID} connection: {Connection.name}"; HttpResponseMessage response = null; var httpClient = _liteHttpClient.GetClient(Connection); try { while (!_taskManager.cts.IsCancellationRequested) { await Task.Delay(_profileStorage.Current.KickOffInterval, _taskManager.cts.Token); //BOUR-1022 shb I think the code that checks for dictionary entry before enqueuing is enough to prevent duplicate behavior //skip if response cache is not empty if (LifeImageCloudConnectionManager.cache.Count > 0) { int requestCount = 0; //check to see if there are any requests foreach (var cacheItem in LifeImageCloudConnectionManager.cache.ToArray()) { _logger.Log(LogLevel.Debug, $"{taskInfo} Cache entry id: {cacheItem.Key}"); foreach (var item in cacheItem.Value.ToArray()) { _logger.Log(LogLevel.Debug, $"{taskInfo} fromConnection: {item.fromConnection} id: {item.id} started: {item.startTime} complete: {item.resultsTime} status: {item.status}"); if (item.type == RoutedItem.Type.RPC) { requestCount++; } } } if (requestCount > 0) { //BOUR-1060 relax condition and rely on dictionary logic below _logger.Log(LogLevel.Warning, $"{taskInfo} response cache has {requestCount} request items."); // _logger.Log(LogLevel.Warning, $"{taskInfo} response cache has {requestCount} request items, skipping getting new requests until clear"); // return; } } //set the URL //string agentTasksURL = Connection.URL + "/api/agent/v1/agent-tasks"; string agentTasksURL = Connection.URL + CloudAgentConstants.GetAgentTasksUrl; _logger.Log(LogLevel.Debug, $"{taskInfo} agentTasksURL: {agentTasksURL}"); var cookies = _liteHttpClient.GetCookies(agentTasksURL); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(agentTasksURL); response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"{taskInfo} Problem getting agent tasks. {agentTasksURL} {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); } // convert from stream to JSON string results = await response.Content.ReadAsStringAsync(); var objResults = JsonSerializer.Deserialize <Dictionary <string, List <Dictionary <string, string> > > >(results); foreach (var key in objResults) { _logger.Log(LogLevel.Debug, $"{taskInfo} key: {key.Key}"); var list = key.Value; foreach (var item in list) { foreach (var subkey in item) { _logger.Log(LogLevel.Debug, $"{taskInfo} subkey.Key: {subkey.Key} subKey.Value: {subkey.Value} "); } } } if (objResults.Count > 0) { // 2018-09-19 shb unwrap encoded task data and then send to rules var agentRequestList = objResults["modelMapList"]; foreach (var agentRequest in agentRequestList) { byte[] data = Convert.FromBase64String(agentRequest["task"]); string agentRequestAsString = Encoding.UTF8.GetString(data); agentRequest["task"] = agentRequestAsString; string id = agentRequest["id"]; string request = agentRequest["task"]; string requestType = agentRequest["task_type"]; string connection = null; agentRequest.TryGetValue("connection", out connection); RoutedItem ri = new RoutedItem(fromConnection: Connection.name, id: id, request: request, requestType: requestType) { type = RoutedItem.Type.RPC, status = RoutedItem.Status.PENDING, startTime = DateTime.Now, TaskID = taskID }; if (connection != null && connection != "*") { ConnectionSet connSet = new ConnectionSet { connectionName = connection }; ri.toConnections.Add(connSet); } LifeImageCloudConnectionManager.cache.TryGetValue(ri.id, out List <RoutedItem> cacheItem); if (cacheItem == null) { //determine which connections will need to reply and prime the response cache _rulesManager.Init(_profileStorage.Current.rules); //var connsets = _profileStorage.Current.rules.Eval(ri); var connsets = _rulesManager.Eval(ri); foreach (var connset in connsets) { _routedItemManager.Init(ri); var prime = (RoutedItem)_routedItemManager.Clone(); prime.startTime = DateTime.Now; //clock starts ticking now prime.status = RoutedItem.Status.PENDING; prime.fromConnection = _profileStorage.Current.connections.Find(e => e.name == connset.connectionName).name; _logger.Log(LogLevel.Debug, $"{taskInfo} Priming Response cache id: {id} conn: {prime.fromConnection} "); cache.Route(prime); } //enqueue the request _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing id: {id} requestType: {requestType} subKey.Value: {request} "); _routedItemManager.Init(ri); _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules)); //BOUR-995 let cloud know we got the request with a status of PENDING await _postResponseCloudService.PostResponse(Connection, ri, cache, httpManager, taskID); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} Exists id: {id} requestType: {requestType} subKey.Value: {request} "); foreach (var item in cacheItem) { _logger.Log(LogLevel.Debug, $"{taskInfo} fromConnection: {item.fromConnection} started: {item.startTime} complete: {item.resultsTime} status: {item.status}"); foreach (var ctr in item.cloudTaskResults) { foreach (var result in ctr.results) { _logger.Log(LogLevel.Debug, $"{taskInfo} Exists id: {item.id} results: {result}"); } } } } } } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (System.InvalidOperationException) //for the Collection was Modified, we can wait { _logger.Log(LogLevel.Information, $"{taskInfo} Waiting for requests to complete before getting new requests"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } finally { try { _taskManager.Stop($"{Connection.name}.GetRequests"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
// markDownloadComplete is used to remove an item that was in the /studies call public async Task markDownloadComplete(int taskID, LifeImageCloudConnection connection, IHttpManager httpManager) { Connection = connection; var httpClient = _liteHttpClient.GetClient(connection); var taskInfo = $"task: {taskID} connection: {Connection.name}"; HttpResponseMessage response = null; try { //process the series that have completed _logger.Log(LogLevel.Debug, $"{taskInfo} Processing Series Completion"); try { //loop through the studies and if a study has all of its series downloaded, then we can remove it and tell cloud we downloaded it if (Connection.studies != null && Connection.studies.ImagingStudy != null) { foreach (var study in Connection.studies.ImagingStudy.ToList()) { var remaining = new List <Series>(); bool seriesFail = false; if (study.series != null) { remaining = study.series?.FindAll(e => e.downloadCompleted == DateTime.MinValue); //var studyFail = study.attempts > maxAttempts; we aren't doing study LogLevel. seriesFail = study.series?.FindAll(e => e.attempts > Connection.maxAttempts).Count > 0; //var instanceFail = ins //the study object contains a list of series but the series object //does not contain a list of instances. So no marking and clearing at instance level yet. } if (remaining.Count == 0) { foreach (var series in study.series) { _logger.Log(LogLevel.Debug, $"{taskInfo} study: {study.uid} series: {series.uid} started: {series.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {series.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {series.downloadCompleted - series.downloadStarted} attempts: {series.attempts}"); } study.downloadCompleted = study.series.Max(e => e.downloadCompleted); study.downloadStarted = study.series.FindAll(e => e.downloadStarted != null) .Min(e => e.downloadStarted); _logger.Log(LogLevel.Information, $"{taskInfo} study download (complete): {study.uid} started: {study.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {study.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {study.downloadCompleted - study.downloadStarted} attempts: {study.attempts}"); Connection.markDownloadsComplete.Add(new string[] { study.url, "download-complete" }); } if (seriesFail) { foreach (var series in study.series) { _logger.Log(LogLevel.Debug, $"{taskInfo} study: {study.uid} series: {series.uid} started: {series.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {series.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {series.downloadCompleted - series.downloadStarted} attempts: {series.attempts}"); } study.downloadCompleted = study.series.Max(e => e.downloadCompleted); study.downloadStarted = study.series.FindAll(e => e.downloadStarted != null).Min(e => e.downloadStarted); _logger.Log(LogLevel.Information, $"{taskInfo} study download (failed): {study.uid} started: {study.downloadStarted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} completed: {study.downloadCompleted.ToString("yyyy-MM-dd HH:mm:ss.ffff")} duration: {study.downloadCompleted - study.downloadStarted} attempts: {study.attempts}"); _logger.Log(LogLevel.Information, $"{taskInfo} Failing study: {study.url}"); Connection.markDownloadsComplete.Add(new string[] { study.url, "download-fail" }); } } } foreach (var seriesObj in Connection.markSeriesComplete.ToArray()) { _logger.Log(LogLevel.Debug, $"{taskInfo} new Series Complete: {seriesObj.uid}"); if (Connection.studies != null && Connection.studies.ImagingStudy != null) { foreach (var study in Connection.studies?.ImagingStudy) { foreach (var series in study.series) { if (series.uid == seriesObj.uid) { if (series.downloadCompleted != null) { _logger.Log(LogLevel.Debug, $"{taskInfo} writing timestamps markSeriesComplete: {series.uid}"); series.downloadCompleted = seriesObj.downloadCompleted; series.downloadStarted = seriesObj.downloadStarted; series.attempts = seriesObj.attempts; } else { _logger.Log(LogLevel.Debug, $"{taskInfo} series already marked as complete: {series.uid}"); series.downloadCompleted = DateTime.Now; series.attempts = seriesObj.attempts; } } } } } } Connection.markSeriesComplete.Clear(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } foreach (var markinfo in Connection.markDownloadsComplete.ToList()) { try { _logger.Log(LogLevel.Debug, $"{taskInfo} marking: {markinfo[0]} {markinfo[1]}"); var stopWatch = new Stopwatch(); stopWatch.Start(); string markDownloadCompleteURL = markinfo[0] + "/" + markinfo[1]; _logger.Log(LogLevel.Debug, $"{taskInfo} markDownloadCompleteURL: {markDownloadCompleteURL}"); //set the form parameters var nothingParams = new FormUrlEncodedContent(new[] { new KeyValuePair <string, string>("nothing", "nothing"), }); var cookies = _liteHttpClient.GetCookies(markDownloadCompleteURL); _logger.LogCookies(cookies, taskInfo); // issue the POST var task = httpClient.PostAsync(markDownloadCompleteURL, nothingParams, _taskManager.cts.Token); response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode != HttpStatusCode.OK) { _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {markDownloadCompleteURL}"); _liteHttpClient.DumpHttpClientDetails(); } if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } if (response.StatusCode == HttpStatusCode.OK || response.StatusCode == HttpStatusCode.NotFound) { _logger.Log(LogLevel.Information, $"{taskInfo} {response.StatusCode} {markDownloadCompleteURL}"); lock (Connection.studies) { Connection.studies.ImagingStudy.RemoveAll(e => e.url == markinfo[0]); } lock (Connection.markDownloadsComplete) { Connection.markDownloadsComplete.Remove(markinfo); } } _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed}"); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, $"{taskInfo} markDownloadComplete failed"); _liteHttpClient.DumpHttpClientDetails(); } } _logger.Log(LogLevel.Debug, $"{taskInfo} Processing getStudies"); await _studyManager.getStudies(taskID, connection, httpManager); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } finally { try { _taskManager.Stop($"{Connection.name}.markDownloadComplete"); if (response != null) { response.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task putHL7(RoutedItem routedItem, int taskID, LifeImageCloudConnection connection, IHttpManager httpManager) { var Connection = connection; var httpClient = _liteHttpClient.GetClient(connection); var taskInfo = $"task: {taskID} connection: {Connection.name}"; MultipartContent content = null; StreamContent streamContent = null; HttpResponseMessage response = null; try { if (!File.Exists(routedItem.sourceFileName)) { routedItem.Error = "File Not Found"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: true); return; } var stopWatch = new Stopwatch(); stopWatch.Start(); //set theConnection.URL http://localhost:8080/universal-inbox/api/agent/v1/hl7-upload //string putHL7URL = Connection.URL + "/api/agent/v1/hl7-upload?connectionName=" + routedItem.fromConnection; string putHL7URL = Connection.URL + CloudAgentConstants.GetPutHl7Url(routedItem.fromConnection); _logger.Log(LogLevel.Debug, $"{taskInfo} putHL7URL: {putHL7URL}"); //generate guid for boundary...boundaries cannot be accidentally found in the content var boundary = Guid.NewGuid(); _logger.Log(LogLevel.Debug, $"{taskInfo} boundary: {boundary}"); // create the content content = new MultipartContent("related", boundary.ToString()); //add the sharing headers List <string> shareHeader = new List <string>(); if (Connection.shareDestinations != null) { foreach (var connectionSet in routedItem.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareHeader.Add(shareDestination.boxUuid); } } } } content.Headers.Add("X-Li-Destination", shareHeader); // //var fileSize = routedItem.stream.Length; var fileSize = new FileInfo(routedItem.sourceFileName).Length; //var streamContent = new StreamContent(routedItem.stream); streamContent = new StreamContent(File.OpenRead(routedItem.sourceFileName)); streamContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { // FileName = filename FileName = routedItem.sourceFileName }; streamContent.Headers.ContentType = new MediaTypeHeaderValue("application/octet-stream"); //streamContent.Headers.Add("Content-Transfer-Encoding", "gzip"); content.Add(streamContent); // issue the POST Task <HttpResponseMessage> task; var cookies = _liteHttpClient.GetCookies(putHL7URL); _logger.LogCookies(cookies, taskInfo); if (routedItem.Compress == true) { task = httpClient.PostAsync(putHL7URL, new CompressedContent(content, "gzip"), _taskManager.cts.Token); } else { task = httpClient.PostAsync(putHL7URL, content, _taskManager.cts.Token); } response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); // convert from stream to JSON //var serializer = new DataContractJsonSerializer(typeof(LoginJSON)); //var loginJSON = serializer.ReadObject(await response.Content.ReadAsStreamAsync()) as LoginJSON; stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); //dequeue the work, we're done! if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud)); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { _taskManager.Stop($"{Connection.name}.putHL7"); if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
/// <summary> /// wado downloads studies from liCloud. ImagingStudy is required while Series and Instance are optional. RAM utilization remains low regardless of download size. /// </summary> /// <param name="taskID"></param> /// <param name="routedItem"></param> /// <param name="connection"></param> /// <param name="httpManager"></param> /// <param name="compress"></param> /// <returns></returns> public async Task DownloadViaHttp(int taskID, RoutedItem routedItem, LITEConnection connection, IHttpManager httpManager, bool compress = true) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name} resource: {routedItem.resource}"; var profile = _profileStorage.Current; var stopWatch = new Stopwatch(); stopWatch.Start(); //string url = Connection.URL + $"/api/File/{routedItem.box}/{routedItem.resource}"; string url = Connection.URL + FileAgentConstants.GetDownloadUrl(routedItem); string dir = profile.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + Constants.Dirs.ToRules + Path.DirectorySeparatorChar + Guid.NewGuid(); Directory.CreateDirectory(dir); long fileSize = 0; HttpResponseMessage response = null; MultipartFileStreamProvider streamProvider = null; MultipartFileStreamProvider contents = null; var httpClient = _liteHttpClient.GetClient(connection); try { _logger.Log(LogLevel.Debug, $"{taskInfo} download dir will be {dir}"); _logger.Log(LogLevel.Debug, $"{taskInfo} url: {url} attempt: {routedItem.attempts}"); var cookies = _liteHttpClient.GetCookies(url); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(url, HttpCompletionOption.ResponseHeadersRead, _taskManager.cts.Token); try { response = await task.ConfigureAwait(false); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); //if(Logger.logger.FileTraceLevel == "Verbose") _logger.Log(LogLevel.Debug,$"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); switch (response.StatusCode) { case HttpStatusCode.OK: break; case HttpStatusCode.NotFound: routedItem.Error = HttpStatusCode.NotFound.ToString(); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true); return; case HttpStatusCode.Unauthorized: httpManager.loginNeeded = true; _liteHttpClient.DumpHttpClientDetails(); return; default: _liteHttpClient.DumpHttpClientDetails(); return; } if (!_util.IsDiskAvailable(dir, profile, routedItem.length)) { _logger.Log(LogLevel.Debug, $"{taskInfo} Insufficient disk to write {url} to {dir} guessing it could be 16GB"); return; } streamProvider = new MultipartFileStreamProvider(dir, 1024000); try { contents = await response.Content.ReadAsMultipartAsync(streamProvider, _taskManager.cts.Token).ConfigureAwait(false); } catch (Exception e) { //MIME is corrupt such as Unexpected end of MIME multipart stream. MIME multipart message is not complete. //This usually happens if the upload does not complete. Catch as "normal" and remove resource as if success //since retrying will not help this condition. _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); if (await _deleteEGSResourceService.DeleteEGSResource(taskID, routedItem, connection, httpManager).ConfigureAwait(false)) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: false); } else { routedItem.Error = "Unable to delete EGS resource"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true); } return; } int index = 0; _logger.Log(LogLevel.Debug, $"{taskInfo} Splitting {contents?.FileData.Count} files into RoutedItems."); foreach (var part in contents.FileData) { try { index++; fileSize += new FileInfo(part.LocalFileName).Length; _logger.Log(LogLevel.Debug, $"{taskInfo} downloaded file: {part.LocalFileName}"); RoutedItem ri = new RoutedItem(fromConnection: Connection.name, sourceFileName: part.LocalFileName, taskID: taskID, fileIndex: index, fileCount: contents.FileData.Count) { type = RoutedItem.Type.FILE }; _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing RoutedItem {routedItem.sourceFileName}"); _routedItemManager.Init(ri); _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules)); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } if (await _deleteEGSResourceService.DeleteEGSResource(taskID, routedItem, connection, httpManager).ConfigureAwait(false)) { _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: false); } else { routedItem.Error = "Unable to delete EGS resource"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true); } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (NullReferenceException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); //throw e; } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { if (response != null) { response.Dispose(); } _taskManager.Stop($"{Connection.name}.DownloadViaHttp"); } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); }
public async Task wadoAsFileStream( LifeImageCloudConnection connection, int taskID, ImagingStudy study, IHttpManager httpManager, Series series = null, Instance instance = null, bool compress = true) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var stopWatch = new Stopwatch(); stopWatch.Start(); string url = $"{study.url}"; string dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + Constants.Dirs.ToRules + Path.DirectorySeparatorChar + Guid.NewGuid(); long fileSize = 0; HttpResponseMessage response = null; MultipartFileStreamProvider streamProvider = null; MultipartFileStreamProvider contents = null; var httpClient = _liteHttpClient.GetClient(connection); try { _logger.Log(LogLevel.Debug, $"{taskInfo} study url: {url} attempt {study.attempts}"); if (series != null) { Connection.URL += $"/series/{series.uid.Substring(8)}"; _logger.Log(LogLevel.Debug, $"{taskInfo} seriesURL: {url} attempt {series.attempts}"); } if (instance != null) { instance.downloadStarted = DateTime.Now; instance.attempts++; Connection.URL += $"/instances/{instance.uid}"; _logger.Log(LogLevel.Debug, $"{taskInfo} instanceURL: {url} attempt {instance.attempts}"); } var cookies = _liteHttpClient.GetCookies(url); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(url, HttpCompletionOption.ResponseHeadersRead, _taskManager.cts.Token); try { response = await task; } catch (System.Threading.Tasks.TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled"); } // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); //if(Logger.logger.FileTraceLevel == "Verbose") _logger.Log(LogLevel.Debug,$"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode != HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _liteHttpClient.DumpHttpClientDetails(); return; } // 2018-05-09 shb need to get header from Cloud to tell us how big it is if (!_util.IsDiskAvailable(dir, _profileStorage.Current, 16000000000)) //just using 16GB as a catch all { _logger.Log(LogLevel.Debug, $"{taskInfo} Insufficient disk to write {url} to {dir} guessing it could be 16GB"); return; //throw new Exception($"Insufficient disk to write {url} to {dir} guessing it could be 16GB"); } _logger.Log(LogLevel.Debug, $"{taskInfo} download dir will be {dir}"); Directory.CreateDirectory(dir); streamProvider = new MultipartFileStreamProvider(dir); contents = await response.Content.ReadAsMultipartAsync(streamProvider, _taskManager.cts.Token); int index = 0; _logger.Log(LogLevel.Debug, $"{taskInfo} Splitting {contents.FileData.Count} files into RoutedItems."); foreach (var part in contents.FileData) { try { index++; fileSize += new System.IO.FileInfo(part.LocalFileName).Length; _logger.Log(LogLevel.Debug, $"{taskInfo} downloaded file: {part.LocalFileName}"); RoutedItem routedItem = new RoutedItem(fromConnection: Connection.name, sourceFileName: part.LocalFileName, taskID: taskID, fileIndex: index, fileCount: contents.FileData.Count) { type = RoutedItem.Type.DICOM, Study = study.uid, AccessionNumber = study.accession?.value, //study.availability; //routedItem.Description = study.description; //study.extension; //study.modalityList; PatientID = study.patient?.display, //study.referrer; //study.resourceType; Series = series.uid }; //study.started; //study.url; _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing RoutedItem {routedItem.sourceFileName}"); _routedItemManager.Init(routedItem); _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules)); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } //2018-04-27 shb moved completion marking outside of part loop to avoid duplicate entries in markSeriesComplete //also added duplicate check. if (series != null) { series.downloadCompleted = DateTime.Now; lock (Connection.markSeriesComplete) { if (!Connection.markSeriesComplete.Contains(series)) { Connection.markSeriesComplete.Add(series); } } } else if (instance != null) { //means this came from studies calls so we need to mark this download as complete instance.downloadCompleted = DateTime.Now; lock (Connection.markDownloadsComplete) { Connection.markDownloadsComplete.Add(new string[] { url, "download-complete" }); } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (NullReferenceException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); //throw e; } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { if (response != null) { response.Dispose(); } _taskManager.Stop($"{Connection.name}.Wado"); } stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); }
/// <summary> /// Store takes a batch of RoutedItem, all going to the same share destination, and uploads them as a single operation. This is done to solve the many small files problem.Larger files can go individually. /// </summary> /// <param name="batch"></param> /// <param name="taskID"></param> /// <param name="connection"></param> /// <returns></returns> public async Task store(List <RoutedItem> batch, int taskID, LITEConnection connection) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; StreamContent streamContent = null; MultipartContent content = null; HttpResponseMessage response = null; string testFile = null; var firstRecord = batch.First(); try { var stopWatch = new Stopwatch(); stopWatch.Start(); //set the URL //string resourceURL = Connection.URL + "/api/File"; string resourceURL = Connection.URL + FileAgentConstants.BaseUrl; _logger.Log(LogLevel.Debug, $"{taskInfo} URL: {resourceURL}"); // generate guid for boundary...boundaries cannot be accidentally found in the content var boundary = Guid.NewGuid(); _logger.Log(LogLevel.Debug, $"{taskInfo} boundary: {boundary}"); // create the content content = new MultipartContent("related", boundary.ToString()); //add the sharing headers List <string> shareHeader = new List <string>(); if (Connection.shareDestinations != null) { foreach (var connectionSet in firstRecord.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareHeader.Add(shareDestination.boxUuid); _logger.Log(LogLevel.Debug, $"{taskInfo} sharing to: {shareDestination.boxId} {shareDestination.boxName} {shareDestination.groupId} {shareDestination.groupName} {shareDestination.organizationName} {shareDestination.publishableBoxType}"); } } } } content.Headers.Add("X-Li-Destination", shareHeader); long fileSize = 0; var profile = _profileStorage.Current; var dir = profile.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + "toEGS"; Directory.CreateDirectory(dir); testFile = dir + Path.DirectorySeparatorChar + Guid.NewGuid() + ".gz"; using (FileStream compressedFileStream = File.Create(testFile)) { using GZipStream compressionStream = new GZipStream(compressedFileStream, CompressionMode.Compress); foreach (var routedItem in batch) { if (File.Exists(routedItem.sourceFileName)) { routedItem.stream = File.OpenRead(routedItem.sourceFileName); if (Connection.calcCompressionStats) { routedItem.stream.CopyTo(compressionStream); } fileSize += routedItem.length; streamContent = new StreamContent(routedItem.stream); streamContent.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { FileName = routedItem.sourceFileName }; content.Add(streamContent); streamContent.Headers.Add("content-type", "application/octet-stream"); } else { _logger.Log(LogLevel.Error, $"{taskInfo} {routedItem.sourceFileName} no longer exists. Increase tempFileRetentionHours for heavy transfer backlogs that may take hours!!"); routedItem.Error = "File no longer exists"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS), error: true); } } } if (Connection.calcCompressionStats) { FileInfo info = new FileInfo(testFile); _logger.Log(LogLevel.Information, $"{taskInfo} orgSize: {fileSize} compressedSize: {info.Length} reduction: {(fileSize == 0 ? 0 : (fileSize * 1.0 - info.Length) / (fileSize) * 100)}%"); } // issue the POST Task <HttpResponseMessage> task; var httpClient = _liteHttpClient.GetClient(connection); if (firstRecord.Compress == true) { var compressedContent = new CompressedContent(content, "gzip"); _logger.Log(LogLevel.Debug, $"{taskInfo} compressedContent.Headers {compressedContent.Headers} "); compressedContent.Headers.Remove("Content-Encoding"); var cookies = _liteHttpClient.GetCookies(resourceURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(resourceURL, compressedContent); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} will send content.Headers {content.Headers}"); var cookies = _liteHttpClient.GetCookies(resourceURL); _logger.LogCookies(cookies, taskInfo); task = httpClient.PostAsync(resourceURL, content); } response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); stopWatch.Stop(); _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s"); switch (response.StatusCode) { case HttpStatusCode.Created: //dequeue the work, we're done! if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } foreach (var ri in batch) { _routedItemManager.Init(ri); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS)); } //let EGGS know it's available, or when we convert udt to .net core then perhaps push so no open socket required on client. //await SendToAllHubs(LITEServicePoint, batch); break; case HttpStatusCode.UnprocessableEntity: //dequeue the work, we're done! _logger.Log(LogLevel.Warning, $"creation of {firstRecord.sourceFileName} and others in batch failed with {response.StatusCode}"); if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } foreach (var ri in batch) { ri.Error = HttpStatusCode.UnprocessableEntity.ToString(); _routedItemManager.Init(ri); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS), error: true); } break; default: if (response.StatusCode == HttpStatusCode.Unauthorized) { Connection.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"creation of {firstRecord.sourceFileName} and others in batch failed with {response.StatusCode}"); _liteHttpClient.DumpHttpClientDetails(); break; } //delete the compression test file File.Delete(testFile); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}"); } _liteHttpClient.DumpHttpClientDetails(); } catch (FileNotFoundException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (IOException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _liteHttpClient.DumpHttpClientDetails(); } finally { try { if (streamContent != null) { streamContent.Dispose(); } if (response != null) { response.Dispose(); } if (content != null) { content.Dispose(); } File.Delete(testFile); _taskManager.Stop($"{Connection.name}.Store"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } } }
public async Task getStudies(int taskID, LifeImageCloudConnection connection, IHttpManager httpManager) { var Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var httpClient = _liteHttpClient.GetClient(connection); try { //set the URL //string studiesURL = Connection.URL + "/api/agent/v1/studies?state=NEEDS_DOWNLOADING&lifeImageSummary=true"; //add summary string studiesURL = Connection.URL + CloudAgentConstants.GetStudies; //add summary _logger.Log(LogLevel.Debug, $"{taskInfo} studiesURL: {studiesURL}"); var cookies = _liteHttpClient.GetCookies(studiesURL); _logger.LogCookies(cookies, taskInfo); // issue the GET var task = httpClient.GetAsync(studiesURL); var response = await task; // output the result _logger.LogHttpResponseAndHeaders(response, taskInfo); _logger.Log(LogLevel.Debug, $"{taskInfo} response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}"); if (response.StatusCode != HttpStatusCode.OK) { if (response.StatusCode == HttpStatusCode.Unauthorized) { httpManager.loginNeeded = true; } _logger.Log(LogLevel.Warning, $"{taskInfo} {response.StatusCode} {response.ReasonPhrase}"); _liteHttpClient.DumpHttpClientDetails(); } //2018-02-06 shb convert from stream to JSON and clean up any non UTF-8 that appears like it did // when receiving "contains invalid UTF8 bytes" exception var serializer = new DataContractJsonSerializer(typeof(RootObject)); var streamReader = new StreamReader(await response.Content.ReadAsStreamAsync(), Encoding.UTF8); byte[] byteArray = Encoding.UTF8.GetBytes(streamReader.ReadToEnd()); MemoryStream stream = new MemoryStream(byteArray); var newStudies = serializer.ReadObject(stream) as RootObject; MergeStudies(newStudies, Connection); if (Connection.studies != null && Connection.studies.ImagingStudy != null) { _logger.Log(LogLevel.Information, $"{taskInfo} studies.ImagingStudy.Count: {Connection.studies.ImagingStudy.Count}"); foreach (var imagingStudy in Connection.studies.ImagingStudy) { _logger.Log(LogLevel.Information, $"{taskInfo} ImagingStudy.uid: {imagingStudy.uid} series:{imagingStudy.numberOfSeries} instances:{imagingStudy.numberOfInstances}"); } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (System.Runtime.Serialization.SerializationException e) { //eat it for now _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); } catch (System.Net.Http.HttpRequestException e) { _logger.Log(LogLevel.Warning, $"{taskInfo} {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}"); } } catch (Exception e) { _logger.LogFullException(e, taskInfo); //throw e; } }