public async Task SendToCloud(int taskID, LifeImageCloudConnection Connection, IConnectionRoutedCacheManager cacheManager, IHttpManager httpManager) { //2018-02-02 shb RoutedItem does not necessarily have an open stream at this point any more var taskInfo = $"task: {taskID} connection: {Connection.name}"; _logger.Log(LogLevel.Information, $"{taskInfo} toCloud: {(Connection.toCloud == null ? 0 : Connection.toCloud.Count)} suggested items to send."); Dictionary <string, List <RoutedItem> > shareSet = new Dictionary <string, List <RoutedItem> >(); //I need a set for each sharing dest set List <List <RoutedItem> > sizeSet = new List <List <RoutedItem> >(); //I need a set for each minStowBatchSize try { Task.WaitAll(_taskManager.FindByType($"{Connection.name}.putHL7")); Task.WaitAll(_taskManager.FindByType($"{Connection.name}.PostResponse")); Task.WaitAll(_taskManager.FindByType($"{Connection.name}.Stow")); int retryDelayed = 0; List <RoutedItem> toCloudTemp = new List <RoutedItem>(); if (Connection.toCloud.Count <= 0) { await Task.CompletedTask; return; } lock (Connection.toCloud) { foreach (var routedItem in Connection.toCloud) { if (_profileStorage.Current.duplicatesDetectionUpload && routedItem.type == RoutedItem.Type.DICOM && routedItem.sourceFileName != null) { _duplicatesDetectionService.DuplicatesPurge(); lock (routedItem) { if (!_duplicatesDetectionService.DuplicatesReference(routedItem.fromConnection, routedItem.sourceFileName)) { continue; } } } if (routedItem.lastAttempt == null || (routedItem.lastAttempt != null && routedItem.lastAttempt < DateTime.Now.AddMinutes(-Connection.retryDelayMinutes))) { //not attempted lately routedItem.attempts++; routedItem.lastAttempt = DateTime.Now; if (routedItem.attempts > 1) { _logger.Log(LogLevel.Debug, $"{taskInfo} type: {routedItem.type} id: {routedItem.id} file: {routedItem.sourceFileName} meta: {routedItem.RoutedItemMetaFile} second attempt."); } toCloudTemp.Add(routedItem); } else { retryDelayed++; } } } foreach (var routedItem in toCloudTemp) { //remove the toCloud item if it has exceeded maxAttempts if (routedItem.attempts > Connection.maxAttempts) { // AMG LITE-1090 - put a break on then execution (add continue statement) and add routedItem status to the message. _logger.Log(LogLevel.Error, $"{taskInfo} type: {routedItem.type} id: {routedItem.id} status: {routedItem.status} file: {routedItem.sourceFileName} meta: {routedItem.RoutedItemMetaFile} has exceeded maxAttempts of {Connection.maxAttempts}. Will move to errors and not try again (removed from send queue)."); _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), error: true); continue; } else { _logger.Log(LogLevel.Information, $"{taskInfo} type: {routedItem.type} id: {routedItem.id} file: {routedItem.sourceFileName} meta: {routedItem.RoutedItemMetaFile} attempt: {routedItem.attempts}"); } switch (routedItem.type) { case RoutedItem.Type.RPC: { _logger.Log(LogLevel.Debug, $"{taskInfo} PostResponse ID: {routedItem.id}"); var newTaskID = _taskManager.NewTaskID(); Task task = new Task(new Action(async() => await _postResponseCloudService.PostResponse(Connection, routedItem, cacheManager, httpManager, newTaskID)), _taskManager.cts.Token); await _taskManager.Start(newTaskID, task, $"{Connection.name}.PostResponse", $"{Connection.name}.PostResponse {routedItem.id}", isLongRunning : false); } break; case RoutedItem.Type.HL7: { _logger.Log(LogLevel.Debug, $"{taskInfo} putHL7 file: {routedItem.sourceFileName}"); var newTaskID = _taskManager.NewTaskID(); Task task = new Task(new Action(async() => await _sendFromCloudToHl7Service.putHL7(routedItem, newTaskID, Connection, httpManager)), _taskManager.cts.Token); await _taskManager.Start(newTaskID, task, $"{Connection.name}.putHL7", $"{Connection.name}.putHL7 {routedItem.sourceFileName}", isLongRunning : false); } break; case RoutedItem.Type.COMPLETION: { _logger.Log(LogLevel.Debug, $"{taskInfo} Completion ID: {routedItem.id} type: {routedItem.type} "); var newTaskID = _taskManager.NewTaskID(); Task task = new Task( new Action(async() => await _postCompletionCloudService.PostCompletion(Connection, routedItem, cacheManager, httpManager, newTaskID)), _taskManager.cts.Token); await _taskManager.Start(newTaskID, task, $"{Connection.name}.PostCompletion", $"{Connection.name}.PostCompletion {routedItem.id}", isLongRunning : false); } break; case RoutedItem.Type.DICOM: case RoutedItem.Type.FILE: //check if dicom, if not dicomize since cloud only does dicom via stow. if (File.Exists(routedItem.sourceFileName) && !_dicomUtil.IsDICOM(routedItem)) { _dicomUtil.Dicomize(routedItem); } //inspect the sharing headers and batch by set string shareString = ""; if (Connection.shareDestinations != null) { foreach (var connectionSet in routedItem.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareString += shareDestination.boxUuid; } } } } if (shareSet.ContainsKey(shareString)) { _logger.Log(LogLevel.Debug, $"{taskInfo} Adding {routedItem.sourceFileName} to shareString: {shareString}"); shareSet.GetValueOrDefault(shareString).Add(routedItem); } else { var list = new List <RoutedItem> { routedItem }; _logger.Log(LogLevel.Debug, $"{taskInfo} Adding {routedItem.sourceFileName} to shareString: {shareString}"); shareSet.Add(shareString, list); } break; default: _logger.Log(LogLevel.Critical, $"{taskInfo} meta: {routedItem.RoutedItemMetaFile} Unsupported type: {routedItem.type}"); break; } } //Now that each key in the Dictionary is to a single set of sharing destinations, let's break it up further by minStowBatchSize //What we want is a big enough upload to solve the small file problem, but small enough so the upload makes forward progress. //If this is not the first attempt, then disable batching and send individually. foreach (var share in shareSet.Values) { var batch = new List <RoutedItem>(); long bytes = 0; foreach (var element in share) { try { element.length = new FileInfo(element.sourceFileName).Length; } catch (FileNotFoundException e) { _logger.Log(LogLevel.Critical, $"{taskInfo} id: {element.id} meta:{element.RoutedItemMetaFile} source:{element.sourceFileName} type:{element.type} {e.Message} {e.StackTrace}"); _routedItemManager.Init(element); _routedItemManager.Dequeue(Connection, Connection.toCloud, nameof(Connection.toCloud), true); continue; } catch (IOException e) { _logger.Log(LogLevel.Critical, $"{taskInfo} id: {element.id} meta:{element.RoutedItemMetaFile} source:{element.sourceFileName} type:{element.type} {e.Message} {e.StackTrace}"); //condition may be transient like file in use so skip for the moment continue; } catch (Exception e) { _logger.Log(LogLevel.Critical, $"{taskInfo} id: {element.id} meta:{element.RoutedItemMetaFile} source:{element.sourceFileName} type:{element.type} {e.Message} {e.StackTrace}"); if (e.InnerException != null) { _logger.Log(LogLevel.Critical, $"Inner Exception: {e.InnerException}"); } //condition may be transient like file so skip for the moment continue; } //If this is not the first attempt, then disable batching and send individually. if (element.length < Connection.minStowBatchSize && bytes < Connection.minStowBatchSize && element.attempts == 1) { bytes += element.length; _logger.Log(LogLevel.Debug, $"{taskInfo} Adding {element.sourceFileName} to batch..."); batch.Add(element); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} Batch is full with count: {batch.Count} size: {bytes} attempts: {element.attempts} {(element.attempts > 1 ? "items are sent individually after 1st attempt!" : "")}"); sizeSet.Add(batch); batch = new List <RoutedItem>(); bytes = element.length; batch.Add(element); } } if (!sizeSet.Contains(batch) && batch.Count > 0) { _logger.Log(LogLevel.Debug, $"{taskInfo} Add final batch to set with count: {batch.Count} size: {bytes}"); sizeSet.Add(batch); } } int tempcount = 0; foreach (var batch in sizeSet) { tempcount += batch.Count; } _logger.Log(LogLevel.Information, $"{taskInfo} {sizeSet.Count} batches to send, selected: {tempcount}/{toCloudTemp.Count} retry delayed: {retryDelayed}"); foreach (var batch in sizeSet) { if (httpManager.loginNeeded) { break; } if (batch.Count > 0) { var newTaskID = _taskManager.NewTaskID(); Task task = new Task(new Action(async() => await _stowAsMultiPartCloudService.stowAsMultiPart(batch, newTaskID, Connection, httpManager)), _taskManager.cts.Token); await _taskManager.Start(newTaskID, task, $"{Connection.name}.Stow", $"{Connection.name}.Stow batch {batch.Count}", isLongRunning : false); } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"{taskInfo} Task was canceled."); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } }
public async Task SendToEGS(int taskID, LITEConnection connection, ISendToAllHubsService sendToAllHubs) { Connection = connection; //2018-02-02 shb RoutedItem does not necessarily have an open stream at this point any more var taskInfo = $"task: {taskID} connection: {Connection.name}"; _logger.Log(LogLevel.Information, $"{taskInfo} toEGS: {(Connection.toEGS == null ? 0 : Connection.toEGS.Count)} items to send."); Dictionary <string, List <RoutedItem> > shareSet = new Dictionary <string, List <RoutedItem> >(); //I need a set for each sharing dest set List <List <RoutedItem> > sizeSet = new List <List <RoutedItem> >(); //I need a set for each minEGSBatchSize try { Task.WaitAll(_taskManager.FindByType($"{Connection.name}.PresentAsResource")); Task.WaitAll(_taskManager.FindByType($"{Connection.name}.Store")); int retryDelayed = 0; if (Connection.toEGS.Count > 0) { var toEGSTemp = Connection.toEGS.ToArray(); //remove the toCloud item if it has exceeded maxAttempts foreach (var routedItem in toEGSTemp) { if (_taskManager.cts.IsCancellationRequested) { return; } if (routedItem.lastAttempt != null && routedItem.lastAttempt < DateTime.Now.AddMinutes(-Connection.retryDelayMinutes)) //not attempted lately { routedItem.attempts++; if (routedItem.attempts > 1) { _logger.Log(LogLevel.Debug, $"{taskInfo} {routedItem.sourceFileName} second attempt."); } routedItem.lastAttempt = DateTime.Now; if (routedItem.attempts > Connection.maxAttempts) { _logger.Log(LogLevel.Error, $"{taskInfo} {routedItem.sourceFileName} has exceeded maxAttempts of {Connection.maxAttempts}. Will move to errors and not try again."); routedItem.Error = "Exceeded maxAttempts"; _routedItemManager.Init(routedItem); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS), error: true); } else { _logger.Log(LogLevel.Information, $"{taskInfo} {routedItem.sourceFileName} attempts: {routedItem.attempts}"); } //inspect the sharing headers and batch by set string shareString = ""; if (Connection.shareDestinations != null) { foreach (var connectionSet in routedItem.toConnections.FindAll(e => e.connectionName.Equals(Connection.name))) { if (connectionSet.shareDestinations != null) { foreach (var shareDestination in connectionSet.shareDestinations) { shareString += shareDestination.boxUuid; } } } } if (shareSet.ContainsKey(shareString)) { _logger.Log(LogLevel.Debug, $"{taskInfo} Adding {routedItem.sourceFileName} to shareString: {shareString}"); shareSet.GetValueOrDefault(shareString).Add(routedItem); } else { var list = new List <RoutedItem> { routedItem }; _logger.Log(LogLevel.Debug, $"{taskInfo} Adding {routedItem.sourceFileName} to shareString: {shareString}"); shareSet.Add(shareString, list); } } else { retryDelayed++; } } //Now that each key in the Dictionary is to a single set of sharing destinations, let's break it up further by minEGSBatchSize //What we want is a big enough upload to solve the small file problem, but small enough so the upload makes forward progress. //If this is not the first attempt, then disable batching and send individually. foreach (var share in shareSet.Values) { if (_taskManager.cts.IsCancellationRequested) { return; } var batch = new List <RoutedItem>(); long bytes = 0; foreach (var element in share) { if (File.Exists(element.sourceFileName)) { try { element.length = new FileInfo(element.sourceFileName).Length; } catch (FileNotFoundException e) { _logger.Log(LogLevel.Error, $"{taskInfo} {e.Message} {e.StackTrace}"); continue; } catch (IOException e) { _logger.Log(LogLevel.Error, $"{taskInfo} {e.Message} {e.StackTrace}"); //condition may be transient like file in use so skip for the moment continue; } catch (Exception e) { _logger.LogFullException(e, taskInfo); //condition may be transient like file so skip for the moment continue; } } else { element.Error = $"File {element.sourceFileName} does not exist"; _routedItemManager.Init(element); _routedItemManager.Dequeue(Connection, Connection.toEGS, nameof(Connection.toEGS), true); } //If this is not the first attempt, then disable batching and send individually. if (element.length < Connection.minEGSBatchSize && bytes < Connection.minEGSBatchSize && element.attempts == 1) { bytes += element.length; _logger.Log(LogLevel.Debug, $"{taskInfo} Adding {element.sourceFileName} to batch..."); batch.Add(element); } else { _logger.Log(LogLevel.Debug, $"{taskInfo} Batch is full with count: {batch.Count} size: {bytes} attempts: {element.attempts} {(element.attempts > 1 ? "items are sent individually after 1st attempt!" : "")}"); sizeSet.Add(batch); batch = new List <RoutedItem>(); bytes = element.length; batch.Add(element); } } if (!sizeSet.Contains(batch) && batch.Count > 0) { _logger.Log(LogLevel.Debug, $"{taskInfo} Add final batch to set with count: {batch.Count} size: {bytes}"); sizeSet.Add(batch); } } int tempcount = 0; foreach (var batch in sizeSet) { tempcount += batch.Count; } _logger.Log(LogLevel.Information, $"{taskInfo} {sizeSet.Count} batches to send, selected: {tempcount}/{toEGSTemp.Length} retry delayed: {retryDelayed}"); foreach (var batch in sizeSet) { if (_taskManager.cts.IsCancellationRequested) { return; } //if (loginNeeded) break; if (batch.Count > 0) { switch (Connection.PushPull) { case PushPullEnum.pull: switch (Connection.protocol) { case Protocol.Http: case Protocol.UDT: var newTaskID = _taskManager.NewTaskID(); Task task = new Task(new Action(async() => await _presentAsResourceService.PresentAsResource(batch, newTaskID, connection, sendToAllHubs))); await _taskManager.Start(newTaskID, task, $"{Connection.name}.PresentAsResource", $"{Connection.name}.PresentAsResource batch {batch.Count}", isLongRunning : false); break; } break; case PushPullEnum.push: switch (Connection.protocol) { case Protocol.Http: var newTaskID2 = _taskManager.NewTaskID(); Task task2 = new Task(new Action(async() => await _liteStoreService.store(batch, newTaskID2, connection))); await _taskManager.Start(newTaskID2, task2, $"{Connection.name}.Store", $"{Connection.name}.Store batch {batch.Count}", isLongRunning : false); break; case Protocol.UDT: break; } break; case PushPullEnum.both: //since each method dequeues it's own work we would need a separate queue before we can do both, like toEGSPull toEGSPush. break; } } } } } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (Exception e) { _logger.LogFullException(e, taskInfo); } }
public async Task Download(int taskID, LITEConnection connection, IHttpManager httpManager, SemaphoreSlim FromEGSSignal) { Connection = connection; var taskInfo = $"task: {taskID} connection: {Connection.name}"; var profile = _profileStorage.Current; DateTime lastGetResources = DateTime.MinValue; int GetResourcesInterval = 120000; int lastResourceCount = 0; try { do { var temp = Connection.fromEGS.ToList(); if (temp.Count == 0 || !temp.Any(e => e.attempts == 0)) { var getResourcesTime = DateTime.Now.AddMilliseconds(GetResourcesInterval * -1); if (lastGetResources.CompareTo(getResourcesTime) < 0 || lastResourceCount > 0) { lastResourceCount = await _getLiteReresourcesService.GetResources(taskID, connection, httpManager); lastGetResources = DateTime.Now; } bool success = await FromEGSSignal.WaitAsync(profile.KickOffInterval, _taskManager.cts.Token).ConfigureAwait(false); // FromEGSSignal.Dispose(); // FromEGSSignal = new SemaphoreSlim(0, 1); } else { await Task.Delay(profile.taskDelay).ConfigureAwait(false); } foreach (var routedItem in Connection.fromEGS.ToArray()) { await ProcessItem(taskID, routedItem, connection, httpManager); } Task.WaitAll(_taskManager.FindByType($"{Connection.name}.DownloadViaHttp")); } while (Connection.responsive); } catch (TaskCanceledException) { _logger.Log(LogLevel.Information, $"Task was canceled."); } catch (OperationCanceledException) { _logger.Log(LogLevel.Warning, $"Wait Operation Canceled. Exiting Download"); } catch (Exception e) { _logger.LogFullException(e, taskInfo); _logger.Log(LogLevel.Critical, $"{taskInfo} Exiting Download"); } finally { _taskManager.Stop($"{Connection.name}.Download"); } }