Ejemplo n.º 1
0
        public async Task <RoutedItem> SendToRules(RoutedItem ri, IRoutedItemManager routedItemManager, IConnectionRoutedCacheManager connectionRoutedCacheManager)
        {
            var      taskInfo = $"task: {ri.TaskID} {ri.fromConnection}";
            Priority priority = Priority.Low;

            //first things first, deep copy the routedItem so the sender can dequeue safely
            //RoutedItemEx routedItem = (RoutedItemEx)ri.Clone();

            routedItemManager.Init(ri);
            RoutedItemEx routedItem = (RoutedItemEx)routedItemManager.Clone();

            try
            {
                //Check rules and if simple, don't open or save dicom
                var  destRulesToProcess = Item.destRules.FindAll(e => e.fromConnectionName == routedItem.fromConnection);
                bool simple             = true;
                foreach (var destRule in destRulesToProcess)
                {
                    if (!destRule.IsSimple())
                    {
                        simple = false;
                    }
                }

                if (!simple)
                {
                    //This allows the sender to just provide a filename.
                    //Open the file and stream if file and stream are null and fileName is specified.
                    //If the sender provides a stream and/or open DicomFile, then we need to close them
                    //when done.
                    routedItemManager.Open();

                    //preProcessFromConnectionScriptNames executes once for each inbound stream before streaming out
                    //perceived problem with location of this call is that we already
                    //have both file and stream and need to do tag morphing on file before streaming, or in the
                    //middle of two streams so trying this in DicomListener instead of here.  Moving will require that
                    //Connections implement script calls.
                    routedItem.rules = this.Item;
                    await RunPreProcessFromConnectionScripts(routedItem);

                    //scripts can modify tags and content.  We need to save the file before proceeding
                    string filePath = null;
                    if (routedItem.sourceDicomFile != null)
                    {
                        var dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + "Rule.cs" + Path.DirectorySeparatorChar + "toStage2";
                        Directory.CreateDirectory(dir);
                        filePath = dir + Path.DirectorySeparatorChar + System.Guid.NewGuid();
                        _logger.Log(LogLevel.Debug, $"{taskInfo} routedItem.sourceFileName: {routedItem.sourceFileName} is being saved to {filePath} after PreProcessFromConnectionScripts completion");
                        var oldfile = routedItem.sourceFileName;

                        if (!_util.IsDiskAvailable(routedItem.sourceDicomFile.File.Name, _profileStorage.Current))
                        {
                            throw new Exception($"Insufficient disk to write {filePath}");
                        }

                        if (routedItem.sourceDicomFile.File.Exists)
                        {
                            routedItem.sourceFileName = filePath;
                            routedItem.sourceDicomFile.Save(filePath);
                        }
                    }
                    else if (routedItem.sourceFileName.EndsWith(".hl7"))
                    {
                        //do nothing
                    }

                    routedItemManager.Close();
                    routedItemManager.Open();

                    routedItem.toConnections = Eval(routedItem);

                    //preProcessToConnectionScriptNames executes once for each outbound stream before streaming out
                    //moved this outside the toConnection loop to allow the toConnection array to be manipulated
                    await RunPreProcessToConnectionScripts(routedItem);

                    priority = await CheckAndDelayOnWaitConditions(routedItem);
                }
                else
                {
                    routedItem.toConnections = Eval(routedItem);
                }

                foreach (ConnectionSet toConnection in routedItem.toConnections)
                {
                    Connection toConn = _profileStorage.Current.connections.Find(e => e.name.Equals(toConnection.connectionName));
                    _logger.Log(LogLevel.Debug, $"{taskInfo} ToConnection Found, Sending to: {toConn.name}");


                    routedItemManager.Init(routedItem);
                    var clone = (RoutedItem)routedItemManager.Clone();

                    connectionRoutedCacheManager.Route(clone);
                    //toConn.Route();  //each receiver needs their own clone


                    //postProcessToConnectionScriptNames executes once for each outbound stream after streaming out
                    await RunPostProcessToConnectionScripts(routedItem);
                }

                if (!simple)
                {
                    DisengageWaitConditions(routedItem);
                    //postProcessFromConnectionScriptNames executes once for each inbound stream after streaming out
                    await RunPostProcessFromConnectionScripts(routedItem);
                }

                return(routedItem);
            }
            catch (TaskCanceledException)
            {
                _logger.Log(LogLevel.Information, $"Task was canceled.");
            }
            catch (Exception e)
            {
                // if (routedItem.sourceFileName != null)
                // {

                _logger.LogFullException(e);
                //                }

                throw;
            }
            finally
            {
                routedItemManager.Close();
                routedItem = null;
            }

            return(routedItem);
        }
        /// <summary>
        /// Enqueue serializes and adds a RoutedItem metadata and related file artifact(s) to a specified connection and list
        /// </summary>
        /// <param name="Item"></param>
        /// <param name="conn"></param>
        /// <param name="list"></param>
        /// <param name="queueName"></param>
        /// <param name="copy"></param>
        public void Enqueue(RoutedItem Item, Connection conn, BlockingCollection <RoutedItem> list, string queueName, bool copy = false)
        {
            Throw.IfNull(Item);

            var    taskInfo = $"task: {Item.TaskID} connection: {conn.name}";
            string dir;

            Item.attempts    = 0; //attempts from prior stages needs to be cleared on an enqueue
            Item.lastAttempt = DateTime.MinValue;

            try
            {
                //move or copy the sourceFileName
                if (Item.sourceFileName != null) //sourceFileName can be null for RoutedItems that do not reference a file such as Requests (Q/R)
                {
                    dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + conn.name + Path.DirectorySeparatorChar + queueName;
                    Directory.CreateDirectory(dir);

                    var filename = dir + Path.DirectorySeparatorChar + System.Guid.NewGuid();
                    if (Item.sourceFileName.EndsWith(".hl7"))
                    {
                        filename += ".hl7";
                    }

                    if (File.Exists(Item.sourceFileName))
                    {
                        _logger.Log(LogLevel.Debug, $"{taskInfo} Setting last access time to now for {Item.sourceFileName} to avoid the purge.");
                        File.SetLastAccessTime(Item.sourceFileName, DateTime.Now);

                        if (Item.toConnections.Count <= 1 && !copy)  //the connection might be enqueuing to itself before rule eval so the toConnection isn't yet populated.  If so we can def move
                        {
                            _logger.Log(LogLevel.Debug, $"{taskInfo} Moving {Item.sourceFileName} to {filename}");
                            File.Move(Item.sourceFileName, filename);
                            Item.sourceFileName = filename;
                        }
                        else
                        {
                            _logger.Log(LogLevel.Debug, $"{taskInfo} Copying {Item.sourceFileName} to {filename}");
                            File.Copy(Item.sourceFileName, filename);
                            Item.sourceFileName = filename;
                        }
                    }
                    else
                    {
                        _logger.Log(LogLevel.Critical, $"{taskInfo} sourceFileName: {Item.sourceFileName} does not exist.  Cannot Route this request.");
                        return;
                    }
                }


                //serialize the routedItem metadata to disk

                JsonSerializerOptions settings = new JsonSerializerOptions
                {
                    //Formatting = Formatting.Indented
                };

                dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + conn.name + Path.DirectorySeparatorChar + queueName + Path.DirectorySeparatorChar + Constants.Dirs.Meta;
                Directory.CreateDirectory(dir);
                string fileName = dir + Path.DirectorySeparatorChar + System.Guid.NewGuid() + Constants.Extensions.MetaExt;
                Item.RoutedItemMetaFile = fileName;

                string json = JsonSerializer.Serialize(Item, settings);

                if (string.IsNullOrEmpty(json))
                {
                    throw new Exception("json is empty or null");
                }

                if (!_util.IsDiskAvailable(fileName, _profileStorage.Current, json.Length))
                {
                    throw new Exception($"Insufficient disk to write {fileName}");
                }

                File.WriteAllText(fileName, json);
            }
            catch (Exception e)
            {
                WriteDetailedLog(e, Item, taskInfo);
            }

            try
            {
                // lock (list)
                // {
                switch (Item.priority)
                {
                case Priority.Low:
                    list.Add(Item, _taskManager.cts.Token);      //to the end of the line you go
                    break;

                case Priority.Medium:
                    list.Add(Item, _taskManager.cts.Token);      //to the end of the line you go
                                                                 //list.Insert(list.Count / 2, this);  //okay this is a hack.  We maybe should place above first found low priority.
                    break;

                case Priority.High:
                    list.Add(Item, _taskManager.cts.Token);      //to the end of the line you go
                                                                 //                            list.Prepend(this); //this should probably go above first found medium or low priority.
                    break;
                }
                // }
            }
            catch (Exception e)
            {
                WriteDetailedLog(e, Item, taskInfo);
            }
        }
        public void EnqueueCache(RoutedItem Item, Connection conn, Dictionary <string, List <RoutedItem> > list, string queueName, bool copy = true)
        {
            Throw.IfNull(Item);

            var    taskInfo = $"task: {Item.TaskID} connection: {conn.name}";
            string dir;

            Item.attempts    = 0; //attempts from prior stages needs to be cleared on an enqueue
            Item.lastAttempt = DateTime.MinValue;

            try
            {
                if (Item.id != null)
                {
                    lock (list)
                    {
                        //add id to cache if not present.
                        List <RoutedItem> cacheEntry = new List <RoutedItem>()
                        {
                            Item
                        };

                        if (list.TryAdd(Item.id, cacheEntry))
                        {
                            try
                            {
                                //serialize the cache entry to disk

                                // old code
                                //JsonSerializerSettings settings = new JsonSerializerSettings
                                //{
                                //    Formatting = Formatting.Indented
                                //};

                                JsonSerializerOptions settings = new JsonSerializerOptions
                                {
                                };

                                dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + Constants.Dirs.ResponseCache + Path.DirectorySeparatorChar + queueName + Path.DirectorySeparatorChar + Constants.Dirs.Meta;
                                Directory.CreateDirectory(dir);
                                string fileName = dir + Path.DirectorySeparatorChar + System.Guid.NewGuid() + Constants.Extensions.MetaExt;
                                Item.RoutedItemMetaFile = fileName;

                                string json = JsonSerializer.Serialize(Item, settings); //list[id]

                                if (string.IsNullOrEmpty(json))
                                {
                                    throw new Exception("json is empty or null");
                                }

                                if (!_util.IsDiskAvailable(fileName, _profileStorage.Current, json.Length))
                                {
                                    throw new Exception($"Insufficient disk to write {fileName}");
                                }

                                File.WriteAllText(fileName, json);
                                _logger.Log(LogLevel.Debug, $"id: {Item.id} added to {queueName} file: {fileName} ");
                            }
                            catch (Exception e)
                            {
                                WriteDetailedLog(e, Item, taskInfo);
                            }
                        }
                        else
                        {
                            //we need to figure out whether this is a duplicate from route caching, ie it's not a bi-directional request
                            var existing = list[Item.id].Find(e => e.fromConnection == Item.fromConnection);
                            if (existing == null)
                            {
                                list[Item.id].Add(Item);
                                try
                                {
                                    //serialize the cache entry to disk

                                    JsonSerializerOptions settings = new JsonSerializerOptions
                                    {
                                        //Formatting = Formatting.Indented
                                    };

                                    dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + Constants.Dirs.ResponseCache + Path.DirectorySeparatorChar + queueName + Path.DirectorySeparatorChar + Constants.Dirs.Meta;
                                    Directory.CreateDirectory(dir);
                                    string fileName = dir + Path.DirectorySeparatorChar + System.Guid.NewGuid() + Constants.Extensions.MetaExt;
                                    Item.RoutedItemMetaFile = fileName;

                                    string json = JsonSerializer.Serialize(Item, settings); //list[id]

                                    if (string.IsNullOrEmpty(json))
                                    {
                                        throw new Exception("json is empty or null");
                                    }

                                    if (!_util.IsDiskAvailable(fileName, _profileStorage.Current, json.Length))
                                    {
                                        throw new Exception($"Insufficient disk to write {fileName}");
                                    }

                                    File.WriteAllText(fileName, json);
                                    _logger.Log(LogLevel.Debug, $"id: {Item.id} merged to {queueName} file: {fileName} ");
                                }
                                catch (Exception e)
                                {
                                    WriteDetailedLog(e, Item, taskInfo);
                                }
                            }
                            else
                            {
                                if (Item.toConnections.Count > 0)
                                {
                                    existing.toConnections = Item.toConnections;
                                }

                                if (Item.status == RoutedItem.Status.COMPLETED || Item.status == RoutedItem.Status.FAILED)
                                {
                                    //add the responses
                                    existing.status = Item.status;
                                    foreach (var response in Item.response.ToArray())
                                    {
                                        if (!existing.response.Contains(response))
                                        {
                                            existing.response.Add(response);
                                        }
                                    }

                                    foreach (var kvp in Item.TagData.ToArray())
                                    {
                                        existing.TagData.TryAdd(kvp.Key, kvp.Value);
                                    }

                                    foreach (var result in Item.cloudTaskResults.ToArray())
                                    {
                                        if (!existing.cloudTaskResults.Contains(result))
                                        {
                                            existing.cloudTaskResults.Add(result);
                                        }
                                    }
                                }

                                try
                                {
                                    //serialize the cache entry to disk using existing filename

                                    JsonSerializerOptions settings = new JsonSerializerOptions
                                    {
                                        //Formatting = Formatting.Indented
                                    };

                                    string json = JsonSerializer.Serialize(existing, settings); //list[id]

                                    if (string.IsNullOrEmpty(json))
                                    {
                                        throw new Exception("json is empty or null");
                                    }

                                    if (!_util.IsDiskAvailable(existing.RoutedItemMetaFile, _profileStorage.Current, json.Length))
                                    {
                                        throw new Exception($"Insufficient disk to write {existing.RoutedItemMetaFile}");
                                    }

                                    File.WriteAllText(existing.RoutedItemMetaFile, json);
                                    _logger.Log(LogLevel.Debug, $"id: {Item.id} merged to {queueName} file: {existing.RoutedItemMetaFile} ");
                                }
                                catch (Exception e)
                                {
                                    WriteDetailedLog(e, Item, taskInfo);
                                }
                            }
                        }
                    }
                }
                else
                {
                    _logger.Log(LogLevel.Warning, $"{taskInfo} Cannot cache RoutedItem because id is null: {Item.sourceFileName}");
                }
            }
            catch (Exception e)
            {
                WriteDetailedLog(e, Item, taskInfo);
            }
        }
Ejemplo n.º 4
0
        /// <summary>
        /// Dequeue removes RoutedItem metadata and related file artifact(s) from disk and from a specified connection and list
        /// </summary>
        /// <param name="Item"></param>
        /// <param name="conn"></param>
        /// <param name="queueName"></param>
        /// <param name="error"></param>
        /// <param name="stream"></param>
        public void Dequeue(RoutedItem Item, Connection conn, string queueName, bool error = false, Stream stream = null)
        {
            var taskInfo = $"task: {Item.TaskID} connection: {conn.name}";

            Item.lastAttempt = DateTime.MaxValue;

            var dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + conn.name + Path.DirectorySeparatorChar + queueName + Path.DirectorySeparatorChar + Constants.Dirs.Errors;

            Directory.CreateDirectory(dir);
            string metadir = dir + Path.DirectorySeparatorChar + Constants.Dirs.Meta;

            Directory.CreateDirectory(metadir);

            var    fileName     = Guid.NewGuid().ToString();
            string metafileName = metadir + Path.DirectorySeparatorChar + fileName + Constants.Extensions.MetaExt;

            fileName = dir + Path.DirectorySeparatorChar + fileName;

            _logger.Log(LogLevel.Debug, $"{taskInfo} Dequeue meta: {Item.RoutedItemMetaFile} source: {Item.sourceFileName} error: {error}");

            if (stream != null)
            {
                stream.Dispose();
            }

            if (error)
            {
                //serialize the routedItem metadata to disk which should contain the error for diagnostics

                JsonSerializerOptions settings = new JsonSerializerOptions
                {
                    //Formatting = Formatting.Indented
                };

                string json = JsonSerializer.Serialize(Item, settings);

                if (string.IsNullOrEmpty(json))
                {
                    throw new Exception("json is empty or null");
                }

                if (!_util.IsDiskAvailable(fileName, _profileStorage.Current, json.Length))
                {
                    throw new Exception($"Insufficient disk to write {fileName}");
                }

                File.WriteAllText(Item.RoutedItemMetaFile, json);

                //move to errors
                // move the meta file
                try
                {
                    if (Item.RoutedItemMetaFile != null & File.Exists(Item.RoutedItemMetaFile))
                    {
                        _logger.Log(LogLevel.Debug, $"{taskInfo} Move: {Item.RoutedItemMetaFile} to: {metafileName}");
                        File.Move(Item.RoutedItemMetaFile, metafileName);
                    }
                }
                catch (Exception e)
                {
                    WriteDetailedLog(e, Item, taskInfo);
                }

                // move source file

                try
                {
                    if (Item.sourceFileName != null & File.Exists(Item.sourceFileName))
                    {
                        _logger.Log(LogLevel.Debug, $"{taskInfo} Move: {Item.sourceFileName} to: {fileName}");
                        File.Move(Item.sourceFileName, fileName);
                    }
                }
                catch (Exception e)
                {
                    WriteDetailedLog(e, Item, taskInfo);
                }
            }
            else
            {
                // remove metadata
                try
                {
                    if (Item.RoutedItemMetaFile != null & File.Exists(Item.RoutedItemMetaFile))
                    {
                        _logger.Log(LogLevel.Debug, $"{taskInfo} Delete: {Item.RoutedItemMetaFile}");
                        File.Delete(Item.RoutedItemMetaFile);
                    }
                }
                catch (Exception e)
                {
                    WriteDetailedLog(e, Item, taskInfo);
                }

                // remove source file

                try
                {
                    if (Item.sourceFileName != null & File.Exists(Item.sourceFileName))
                    {
                        _logger.Log(LogLevel.Debug, $"{taskInfo} Delete: {Item.sourceFileName}");
                        File.Delete(Item.sourceFileName);
                    }
                }
                catch (Exception e)
                {
                    _logger.LogFullException(e, $"{taskInfo} routedItemMetaFile: {(Item.RoutedItemMetaFile ?? "null")}");
                }
            }
        }
        /// <summary>
        /// wado downloads studies from liCloud.  ImagingStudy is required while Series and Instance are optional.  RAM utilization remains low regardless of download size.
        /// </summary>
        /// <param name="taskID"></param>
        /// <param name="routedItem"></param>
        /// <param name="connection"></param>
        /// <param name="httpManager"></param>
        /// <param name="compress"></param>
        /// <returns></returns>
        public async Task DownloadViaHttp(int taskID, RoutedItem routedItem, LITEConnection connection, IHttpManager httpManager, bool compress = true)
        {
            Connection = connection;
            var taskInfo  = $"task: {taskID} connection: {Connection.name} resource: {routedItem.resource}";
            var profile   = _profileStorage.Current;
            var stopWatch = new Stopwatch();

            stopWatch.Start();
            //string url = Connection.URL + $"/api/File/{routedItem.box}/{routedItem.resource}";
            string url = Connection.URL + FileAgentConstants.GetDownloadUrl(routedItem);
            string dir = profile.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar + Constants.Dirs.ToRules + Path.DirectorySeparatorChar + Guid.NewGuid();

            Directory.CreateDirectory(dir);
            long fileSize = 0;
            HttpResponseMessage         response       = null;
            MultipartFileStreamProvider streamProvider = null;
            MultipartFileStreamProvider contents       = null;

            var httpClient = _liteHttpClient.GetClient(connection);

            try
            {
                _logger.Log(LogLevel.Debug, $"{taskInfo} download dir will be {dir}");
                _logger.Log(LogLevel.Debug, $"{taskInfo} url: {url} attempt: {routedItem.attempts}");

                var cookies = _liteHttpClient.GetCookies(url);
                _logger.LogCookies(cookies, taskInfo);

                // issue the GET
                var task = httpClient.GetAsync(url, HttpCompletionOption.ResponseHeadersRead, _taskManager.cts.Token);

                try
                {
                    response = await task.ConfigureAwait(false);
                }
                catch (TaskCanceledException)
                {
                    _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled");
                }

                // output the result
                _logger.LogHttpResponseAndHeaders(response, taskInfo);

                //if(Logger.logger.FileTraceLevel == "Verbose") _logger.Log(LogLevel.Debug,$"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}");
                switch (response.StatusCode)
                {
                case HttpStatusCode.OK:
                    break;

                case HttpStatusCode.NotFound:
                    routedItem.Error = HttpStatusCode.NotFound.ToString();

                    _routedItemManager.Init(routedItem);
                    _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true);
                    return;

                case HttpStatusCode.Unauthorized:
                    httpManager.loginNeeded = true;
                    _liteHttpClient.DumpHttpClientDetails();
                    return;

                default:
                    _liteHttpClient.DumpHttpClientDetails();
                    return;
                }

                if (!_util.IsDiskAvailable(dir, profile, routedItem.length))
                {
                    _logger.Log(LogLevel.Debug, $"{taskInfo} Insufficient disk to write {url} to {dir} guessing it could be 16GB");
                    return;
                }

                streamProvider = new MultipartFileStreamProvider(dir, 1024000);

                try
                {
                    contents = await response.Content.ReadAsMultipartAsync(streamProvider, _taskManager.cts.Token).ConfigureAwait(false);
                }
                catch (Exception e)
                {
                    //MIME is corrupt such as Unexpected end of MIME multipart stream. MIME multipart message is not complete.
                    //This usually happens if the upload does not complete.  Catch as "normal" and remove resource as if success
                    //since retrying will not help this condition.

                    _logger.LogFullException(e, taskInfo);

                    _liteHttpClient.DumpHttpClientDetails();

                    if (await _deleteEGSResourceService.DeleteEGSResource(taskID, routedItem, connection, httpManager).ConfigureAwait(false))
                    {
                        _routedItemManager.Init(routedItem);
                        _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: false);
                    }
                    else
                    {
                        routedItem.Error = "Unable to delete EGS resource";
                        _routedItemManager.Init(routedItem);
                        _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true);
                    }

                    return;
                }

                int index = 0;
                _logger.Log(LogLevel.Debug, $"{taskInfo} Splitting {contents?.FileData.Count} files into RoutedItems.");
                foreach (var part in contents.FileData)
                {
                    try
                    {
                        index++;

                        fileSize += new FileInfo(part.LocalFileName).Length;

                        _logger.Log(LogLevel.Debug, $"{taskInfo} downloaded file: {part.LocalFileName}");

                        RoutedItem ri = new RoutedItem(fromConnection: Connection.name, sourceFileName: part.LocalFileName, taskID: taskID, fileIndex: index, fileCount: contents.FileData.Count)
                        {
                            type = RoutedItem.Type.FILE
                        };

                        _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing RoutedItem {routedItem.sourceFileName}");

                        _routedItemManager.Init(ri);
                        _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules));
                    }
                    catch (Exception e)
                    {
                        _logger.LogFullException(e, taskInfo);
                    }
                }

                if (await _deleteEGSResourceService.DeleteEGSResource(taskID, routedItem, connection, httpManager).ConfigureAwait(false))
                {
                    _routedItemManager.Init(routedItem);
                    _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: false);
                }
                else
                {
                    routedItem.Error = "Unable to delete EGS resource";
                    _routedItemManager.Init(routedItem);
                    _routedItemManager.Dequeue(Connection, Connection.fromEGS, nameof(Connection.fromEGS), error: true);
                }
            }
            catch (TaskCanceledException)
            {
                _logger.Log(LogLevel.Information, $"Task was canceled.");
            }
            catch (NullReferenceException e)
            {
                _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}");
                //throw e;
            }
            catch (HttpRequestException e)
            {
                _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}");
                if (e.InnerException != null)
                {
                    _logger.Log(LogLevel.Warning, $"Inner Exception: {e.InnerException}");
                }

                _liteHttpClient.DumpHttpClientDetails();
            }
            catch (Exception e)
            {
                _logger.LogFullException(e, taskInfo);
                _liteHttpClient.DumpHttpClientDetails();
            }
            finally
            {
                if (response != null)
                {
                    response.Dispose();
                }

                _taskManager.Stop($"{Connection.name}.DownloadViaHttp");
            }

            stopWatch.Stop();
            _logger.Log(LogLevel.Information, $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s");
        }
        public async Task wadoAsFileStream(
            LifeImageCloudConnection connection,
            int taskID,
            ImagingStudy study,
            IHttpManager httpManager,
            Series series     = null,
            Instance instance = null,
            bool compress     = true)
        {
            Connection = connection;

            var taskInfo  = $"task: {taskID} connection: {Connection.name}";
            var stopWatch = new Stopwatch();

            stopWatch.Start();
            string url = $"{study.url}";
            string dir = _profileStorage.Current.tempPath + Path.DirectorySeparatorChar + Connection.name + Path.DirectorySeparatorChar +
                         Constants.Dirs.ToRules + Path.DirectorySeparatorChar + Guid.NewGuid();
            long fileSize = 0;
            HttpResponseMessage         response       = null;
            MultipartFileStreamProvider streamProvider = null;
            MultipartFileStreamProvider contents       = null;

            var httpClient = _liteHttpClient.GetClient(connection);

            try
            {
                _logger.Log(LogLevel.Debug, $"{taskInfo} study url: {url} attempt {study.attempts}");

                if (series != null)
                {
                    Connection.URL += $"/series/{series.uid.Substring(8)}";

                    _logger.Log(LogLevel.Debug, $"{taskInfo} seriesURL: {url} attempt {series.attempts}");
                }

                if (instance != null)
                {
                    instance.downloadStarted = DateTime.Now;
                    instance.attempts++;
                    Connection.URL += $"/instances/{instance.uid}";

                    _logger.Log(LogLevel.Debug, $"{taskInfo} instanceURL: {url} attempt {instance.attempts}");
                }

                var cookies = _liteHttpClient.GetCookies(url);
                _logger.LogCookies(cookies, taskInfo);

                // issue the GET
                var task = httpClient.GetAsync(url, HttpCompletionOption.ResponseHeadersRead, _taskManager.cts.Token);

                try
                {
                    response = await task;
                }
                catch (System.Threading.Tasks.TaskCanceledException)
                {
                    _logger.Log(LogLevel.Information, $"{taskInfo} Task Canceled");
                }

                // output the result
                _logger.LogHttpResponseAndHeaders(response, taskInfo);

                //if(Logger.logger.FileTraceLevel == "Verbose") _logger.Log(LogLevel.Debug,$"{taskInfo} await response.Content.ReadAsStringAsync(): {await response.Content.ReadAsStringAsync()}");
                if (response.StatusCode != HttpStatusCode.OK)
                {
                    if (response.StatusCode == HttpStatusCode.Unauthorized)
                    {
                        httpManager.loginNeeded = true;
                    }

                    _liteHttpClient.DumpHttpClientDetails();

                    return;
                }

                // 2018-05-09 shb need to get header from Cloud to tell us how big it is
                if (!_util.IsDiskAvailable(dir, _profileStorage.Current, 16000000000)) //just using 16GB as a catch all
                {
                    _logger.Log(LogLevel.Debug, $"{taskInfo} Insufficient disk to write {url} to {dir} guessing it could be 16GB");
                    return;
                    //throw new Exception($"Insufficient disk to write {url} to {dir} guessing it could be 16GB");
                }


                _logger.Log(LogLevel.Debug, $"{taskInfo} download dir will be {dir}");

                Directory.CreateDirectory(dir);
                streamProvider = new MultipartFileStreamProvider(dir);
                contents       = await response.Content.ReadAsMultipartAsync(streamProvider, _taskManager.cts.Token);

                int index = 0;

                _logger.Log(LogLevel.Debug, $"{taskInfo} Splitting {contents.FileData.Count} files into RoutedItems.");
                foreach (var part in contents.FileData)
                {
                    try
                    {
                        index++;

                        fileSize += new System.IO.FileInfo(part.LocalFileName).Length;

                        _logger.Log(LogLevel.Debug, $"{taskInfo} downloaded file: {part.LocalFileName}");

                        RoutedItem routedItem = new RoutedItem(fromConnection: Connection.name, sourceFileName: part.LocalFileName,
                                                               taskID: taskID, fileIndex: index, fileCount: contents.FileData.Count)
                        {
                            type            = RoutedItem.Type.DICOM,
                            Study           = study.uid,
                            AccessionNumber = study.accession?.value,
                            //study.availability;
                            //routedItem.Description = study.description;
                            //study.extension;
                            //study.modalityList;
                            PatientID = study.patient?.display,
                            //study.referrer;
                            //study.resourceType;
                            Series = series.uid
                        };
                        //study.started;
                        //study.url;


                        _logger.Log(LogLevel.Debug, $"{taskInfo} Enqueuing RoutedItem {routedItem.sourceFileName}");

                        _routedItemManager.Init(routedItem);
                        _routedItemManager.Enqueue(Connection, Connection.toRules, nameof(Connection.toRules));
                    }
                    catch (Exception e)
                    {
                        _logger.LogFullException(e, taskInfo);
                    }
                }

                //2018-04-27 shb moved completion marking outside of part loop to avoid duplicate entries in markSeriesComplete
                //also added duplicate check.
                if (series != null)
                {
                    series.downloadCompleted = DateTime.Now;
                    lock (Connection.markSeriesComplete)
                    {
                        if (!Connection.markSeriesComplete.Contains(series))
                        {
                            Connection.markSeriesComplete.Add(series);
                        }
                    }
                }
                else if (instance != null)
                {
                    //means this came from studies calls so we need to mark this download as complete
                    instance.downloadCompleted = DateTime.Now;
                    lock (Connection.markDownloadsComplete)
                    {
                        Connection.markDownloadsComplete.Add(new string[] { url, "download-complete" });
                    }
                }
            }
            catch (TaskCanceledException)
            {
                _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled.");
            }
            catch (NullReferenceException e)
            {
                _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}");
                //throw e;
            }
            catch (HttpRequestException e)
            {
                _logger.Log(LogLevel.Warning, $"{taskInfo} Exception: {e.Message} {e.StackTrace}");
                if (e.InnerException != null)
                {
                    _logger.Log(LogLevel.Warning, $"{taskInfo} Inner Exception: {e.InnerException}");
                }

                _liteHttpClient.DumpHttpClientDetails();
            }
            catch (Exception e)
            {
                _logger.LogFullException(e, taskInfo);
                _liteHttpClient.DumpHttpClientDetails();
            }
            finally
            {
                if (response != null)
                {
                    response.Dispose();
                }

                _taskManager.Stop($"{Connection.name}.Wado");
            }

            stopWatch.Stop();
            _logger.Log(LogLevel.Information,
                        $"{taskInfo} elapsed: {stopWatch.Elapsed} size: {fileSize} rate: {(float)fileSize / stopWatch.Elapsed.TotalMilliseconds * 1000 / 1000000} MB/s");
        }