public async Task RunOrchestrator( [OrchestrationTrigger] IDurableOrchestrationContext context, ILogger log) { try { var command = context.GetInput <RecompensateApplicationProcessCommand>(); log.LogInformation( $"Starting recompensation process(instanceId: {context.InstanceId} for ${command.Id}"); var deleteCvCommand = new DeleteFileCommand( _fileNameProvider.GetFileName(command.Id, command.Cv.Extension), FileStore.CvsContainer); var deletePhotoCommand = new DeleteFileCommand( _fileNameProvider.GetFileName(command.Id, command.Photo.Extension), FileStore.PhotosContainer); await context.CallActivityAsync(nameof(FileDeleter), deleteCvCommand); await context.CallActivityAsync(nameof(FileDeleter), deletePhotoCommand); log.LogInformation( $"Finished recompensation process(instanceId: {context.InstanceId} for ${command.Id}"); } catch (Exception) { // Allow somebody from support to handle it manually throw; } }
public (bool ok, string output) RunOnNode(string node, string contentToRun) { try { using (IServiceContext context = contextPool.Acquire()) { this.logger.LogDebug($@"[{this.GetHashCode()}]Starting to run job on node {node}, job content {Environment.NewLine}{contentToRun}{Environment.NewLine}"); string fileName = provider.GetFileName(); this.logger.LogDebug($@"[{this.GetHashCode()}]The file for remote to bake is {fileName}, start now!"); string result = context.ProcessRequest(node, fileName, contentToRun, profile); this.logger.LogDebug($@"[{this.GetHashCode()}]Request comes back with payload: {result}"); if (result.IndexOf("[ORCH-ERR]") >= 0) { return(false, result); } else { return(true, result); } } } catch (InvalidOperationException) { return(false, $@"{node} not in the profile or more than one element matches {node}"); } catch (Exception ex) { return(false, $@"Hit exception when processing {contentToRun} on {node} : {ex.Message}"); } }
protected override bool ProcessPart(FilePart part) { Logger.Add($"Поток {Thread.CurrentThread.Name} получил part {part}"); var stopWatch = new Stopwatch(); stopWatch.Start(); if (_targetStream == null) { _targetStream = File.Create(_targetFileNameProvider.GetFileName()); } _targetStream.Write(part.Result, 0, part.Result.Length); _currentPartIndex++; stopWatch.Stop(); Logger.Add($"Поток {Thread.CurrentThread.Name} записал part {part} за {stopWatch.ElapsedMilliseconds} ms"); part.Result = null; // он теперь не нужен NextQueue?.Add(part); if (part.IsLast) { Logger.Add($"Поток {Thread.CurrentThread.Name} записал последнюю part - это признак завершения работы"); // часть последняя - сам поток решает, что ему пора остановиться SetIsNeedStop(); // сообщаем, что работа завершена _stopEvent.Set(); } return(true); }
private void Compress(ICompressStrategy strategy, IFileNameProvider sourceFileNameProvider, IFileNameProvider targetFileNameProvider) { _wasException = null; var sourceFileName = sourceFileNameProvider.GetFileName(); if (!File.Exists(sourceFileName)) { throw new FileNotFoundException($"Не найден файл {sourceFileName}"); } // создание очередей var loggerForQueue = new LoggerDummy(); var queueForRead = new PartQueue("ForRead", loggerForQueue); _queues.Add(queueForRead); var queueForCompress = new PartQueue("ForCompress", loggerForQueue); _queues.Add(queueForCompress); var queueForWrite = new IndexedParts("ForWrite", loggerForQueue); _queues.Add(queueForWrite); _stopEvent = new ManualResetEventSlim(false); // создание обработчиков очередей var writer = new Writer(_logger, _systemInfoProvider, ApplExceptionHandler, targetFileNameProvider, _stopEvent, queueForWrite, queueForRead); _queueHandlers.Add(writer); var archiversRuner = new CompressRuner(_logger, _systemInfoProvider, ApplExceptionHandler, queueForCompress, queueForWrite); _queueHandlers.Add(archiversRuner); var partReader = new FilePartReader(_logger, strategy); var reader = new Reader(_logger, _systemInfoProvider, ApplExceptionHandler, sourceFileNameProvider, partReader, queueForRead, queueForCompress); _queueHandlers.Add(reader); // вывод отладочной информации var sourceFileInfo = new FileInfo(sourceFileName); _logger.Add($"Размер файла {sourceFileInfo.Length} byte"); AddSystemInfo(); var maxActivePartCount = strategy.MaxActivePartCount; _logger.Add($"Максимальное кол-во одновременно обрабатываемых частей {maxActivePartCount} шт."); _logger.Add($"Размер одной части {strategy.PartSize} byte"); _logger.Add("Работа начата..."); _stopWatch.Reset(); _stopWatch.Start(); for (var i = 0; i < maxActivePartCount; i++) { var part = new FilePart($"FilePart{i + 1}"); queueForRead.Add(part); } StopEventWait(); }
private void Decompress(IDecompressStrategy strategy, IFileNameProvider sourceFileNameProvider, IFileNameProvider targetFileNameProvider) { // нужно читать из файла части заархивированные // они начинаются с 10 байт (31,139,8,0,0,0,0,0,4,0) // эти части по отдельности отдавать на декомпрессию _wasException = null; var sourceFileName = sourceFileNameProvider.GetFileName(); if (!File.Exists(sourceFileName)) { throw new FileNotFoundException($"Не найден файл {sourceFileName}"); } // создание очередей var loggerForQueue = new LoggerDummy(); var queueForRead = new PartQueue("ForRead", loggerForQueue); _queues.Add(queueForRead); var queueForDecompress = new PartQueue("ForDecompress", loggerForQueue); _queues.Add(queueForDecompress); var queueForWrite = new IndexedParts("ForWrite", loggerForQueue); _queues.Add(queueForWrite); _stopEvent = new ManualResetEventSlim(false); // создание обработчиков очередей var writer = new Writer(_logger, _systemInfoProvider, ApplExceptionHandler, targetFileNameProvider, _stopEvent, queueForWrite, queueForRead); _queueHandlers.Add(writer); var decompressRuner = new DecompressRuner(_logger, _systemInfoProvider, ApplExceptionHandler, queueForDecompress, queueForWrite); _queueHandlers.Add(decompressRuner); var partReader = new ArсhivePartReader(_logger); var reader = new Reader(_logger, _systemInfoProvider, ApplExceptionHandler, sourceFileNameProvider, partReader, queueForRead, queueForDecompress); _queueHandlers.Add(reader); var sourceFileInfo = new FileInfo(sourceFileName); _logger.Add($"Размер файла {sourceFileInfo.Length} byte"); AddSystemInfo(); _logger.Add("Работа начата..."); _stopWatch.Reset(); _stopWatch.Start(); for (var i = 0; i < strategy.MaxActivePartCount; i++) { var part = new FilePart($"FilePart{i + 1}"); queueForRead.Add(part); } StopEventWait(); }
/// <summary> /// Writes the message to the file specified in Configuration file /// </summary> /// <param name="message"></param> public void Write(string message) { var fileName = _fileNameProvider.GetFileName(); using (FileStream stream = File.Open(fileName, FileMode.Append)) { using (StreamWriter streamWriter = new StreamWriter(stream)) { streamWriter.WriteLine(message); streamWriter.Flush(); } } }
protected override bool ProcessPart(FilePart part) { if (_sourceStream == null) { var fileName = _sourceFileNameProvider.GetFileName(); _sourceStream = new FileStream(fileName, FileMode.Open, FileAccess.Read); _partReader.Init(_sourceStream, new FileInfo(fileName).Length); } try { _processingStopwatch.Reset(); _processingStopwatch.Start(); if (_partReader.ReadPart(part)) { _processingStopwatch.Stop(); part.Index = _currentPartIndex; _currentPartIndex++; Logger.Add($"Поток {Thread.CurrentThread.Name} прочитал часть {part} {part.Source.Length} byte за {_processingStopwatch.ElapsedMilliseconds} ms"); NextQueue?.Add(part); // часть последняя - сам поток решает, что ему пора остановиться if (part.IsLast) { SetIsNeedStop(); } return(true); } Logger.Add($"!Поток {Thread.CurrentThread.Name} НЕ удалось прочитать часть {part}"); throw new Exception($"Не удалось прочитать часть {part}"); } catch (Exception) { Logger.Add($"Поток {Thread.CurrentThread.Name} - ошибка при чтении"); Close(); throw; } }
public async Task Run( [DurableClient] IDurableOrchestrationClient client, [ActivityTrigger] IDurableActivityContext context, ILogger log) { var command = context.GetInput <UploadCvCommand>(); var saveCvResult = await _fileWriter.Write( FileStore.CvsContainer, command.Content, command.ContentType, _fileNameProvider.GetFileName(context.InstanceId, command.Extension)); if (!saveCvResult.Success) { log.LogErrors($"Uploading cv failed instanceId: {context.InstanceId}", saveCvResult.Errors); var failedEvent = new CvUploadFailedInternalFunctionEvent(saveCvResult.Errors); await client.RaiseEventAsync(context.InstanceId, nameof(CvUploadFailedInternalFunctionEvent), failedEvent); } var eventToDispatch = new CvUploadedInternalFunctionEvent(saveCvResult.Value); await client.RaiseEventAsync(context.InstanceId, nameof(CvUploadedInternalFunctionEvent), eventToDispatch); }
public async Task Run( [DurableClient] IDurableOrchestrationClient client, [ActivityTrigger] IDurableActivityContext context, ILogger log) { _correlationInitializer.Initialize(context.InstanceId); var command = context.GetInput <UploadPhotoCommand>(); var photoSaveResult = await _fileWriter.Write( FileStore.PhotosContainer, command.Content, command.ContentType, _fileNameProvider.GetFileName(context.InstanceId, command.Extension)); if (!photoSaveResult.Success) { log.LogError($"Uploading photo failed instanceId: {context.InstanceId}", photoSaveResult.Errors, context.InstanceId); var failedEvent = new CvUploadFailedInternalFunctionEvent(photoSaveResult.Errors); await client.RaiseEventAsync(context.InstanceId, nameof(CvUploadFailedInternalFunctionEvent), failedEvent); } var eventToDispatch = new PhotoUploadedInternalFunctionEvent(photoSaveResult.Value); await client.RaiseEventAsync(context.InstanceId, nameof(PhotoUploadedInternalFunctionEvent), eventToDispatch); }