private string ParseString(string message) { message = message.WithoutStxEtxEnvelope().Trim(); IMessageParser parser = _parserFactory.GetParser(message); BaseParseResult result = parser.Parse(message); // todo: evaluate // It's not possible to determine if hex is a request or response value, check is now based on ErrorParseResult if (result is ErrorParseResult) { parser = _parserFactory.GetParser(typeof(MiniResponse)); result = parser.Parse(message); } return(JsonConvert.SerializeObject(result, Formatting.Indented, new Newtonsoft.Json.Converters.StringEnumConverter())); }
/// <inheritdoc /> public async Task <IMangaPages> Handle(GetMangaChapterContentCriterion criterion, CancellationToken cancellationToken) { var catalog = criterion.Catalog.GetEnumValue <CatalogType>(); return(await _parserFactory.GetParser(catalog) .GetMangaChapterContentAsync(criterion.Manga, criterion.Volume, criterion.Chapter)); }
public async Task Upload(Stream fileStream, string name, string fileType) { await _fileStorageAdapter.Upload(fileStream, name); // This is done to have ability later make this process asyncronus var fileContent = await _fileStorageAdapter.DownloadFile(name); //var fileString = await _fileStorageAdapter.DownloadAsString(name); var parser = _parserFactory.GetParser(fileType); var parsingResult = parser.Parse(fileContent); if (!parsingResult.IsSuccess) { throw new Exception(); } if (parsingResult.Results.Count == 0) { return; } var transactionsRepository = _unitOfWork.Transactions; var createTranscationCommands = parsingResult.Results.Select(ToCommand).ToArray(); await transactionsRepository.AddRange(createTranscationCommands); await _unitOfWork.SaveChanges(); }
/// <inheritdoc /> public async Task <IEnumerable <IGenre> > Handle(GetGenresCriterion criterion, CancellationToken cancellationToken) { var catalog = criterion.Catalog.GetEnumValue <CatalogType>(); return(await _parserFactory.GetParser(catalog) .GetGenresAsync()); }
/// <inheritdoc /> public async Task <IEnumerable <IMangaPreview> > Handle(GetCatalogContentCriterion criterion, CancellationToken cancellationToken) { var catalog = criterion.Catalog.GetEnumValue <CatalogType>(); var sort = (SortType)Enum.Parse(typeof(SortType), criterion.Sort, true); return(await _parserFactory.GetParser(catalog) .GetCatalogContentAsync(sort, criterion.Page)); }
public MongoInsertionFileProcessor(LogFileContext logFile, LogsharkRequest request, IParserFactory parserFactory) { this.logFile = logFile; parser = parserFactory.GetParser(logFile); mongoDatabase = request.Configuration.MongoConnectionInfo.GetDatabase(request.RunContext.MongoDatabaseName); ignoreDebugLogs = request.IgnoreDebugLogs; inFlightInsertions = new List <Thread>(); insertionQueue = new List <BsonDocument>(); }
/// <summary> /// Indicates whether a given file qualifies for partitioning. /// </summary> protected virtual bool IsPartitionableFile(LogFileContext file, long maxFileSizeBytes) { if (file.FileSize <= maxFileSizeBytes) { return(false); } IParser parser = parserFactory.GetParser(file.FilePath); return(parser != null && !parser.IsMultiLineLogType); }
public async Task <ActUnit> ParseUnit <T>(T source) { var toParse = source as string; if (toParse == null) { throw new ArgumentException("This parser supports only string as a parameter"); } if (string.IsNullOrWhiteSpace(toParse)) { return(null); } var reader = new StringReader(toParse); var unit = await ParseMetadata(reader); var contentStringBuilder = new StringBuilder(); UnitType?nextUnitType; if (unit.SubUnits == null) { unit.SubUnits = new List <ActUnit>(); } do { var line = await reader.ReadLineAsync(); if (line == null) { break; } nextUnitType = line.GetTypeOfHeader(); if (nextUnitType.HasValue && IsSubUnit(nextUnitType.Value)) { var parser = _parserFactory.GetParser(nextUnitType.Value); unit.SubUnits.Add(await parser.ParseUnit(line)); } else { contentStringBuilder.AppendLine(line); } } while (!IsNextUnit(nextUnitType)); unit.Content = contentStringBuilder.ToString().Trim(); return(unit); }
/// <summary> /// Process a single log file. /// </summary> protected bool ProcessFile(LogFileContext file, IParserFactory parserFactory, string logsetHash) { try { Log.InfoFormat("Processing {0}.. ({1})", file, file.FileSize.ToPrettySize()); using (var parseTimer = new LogsharkTimer("Parse File", file.ToString(), GlobalEventTimingData.Add)) { IParser parser = parserFactory.GetParser(file); if (parser == null) { Log.ErrorFormat("Failed to locate a parser for file '{0}'. Skipping this file..", file.FilePath); return(false); } IDocumentWriter writer = GetDocumentWriter(file, parser.CollectionSchema.CollectionName, logsetHash); // Attempt to process the file; register a failure if we don't yield at least one document for a file // with at least one byte of content. var fileProcessor = new LogFileParser(parser, writer); long documentsSuccessfullyParsed = fileProcessor.Parse(file); if (file.FileSize > 0 && documentsSuccessfullyParsed == 0) { Log.WarnFormat("Failed to parse any log events from {0}!", file); return(false); } Log.InfoFormat("Completed processing of {0} ({1}) [{2}]", file, file.FileSize.ToPrettySize(), parseTimer.Elapsed.Print()); return(true); } } catch (Exception ex) { Log.Error(String.Format("Failed to process file '{0}': {1}", file, ex.Message)); Log.Debug(ex.StackTrace); return(false); } finally { Cleanup(file); } }
/// <summary> /// Loads of all the logs required for this request. /// </summary> /// <returns>Log contexts for all logs required for request.</returns> public IEnumerable <LogFileContext> LoadRequiredLogs() { var logsToProcess = new List <LogFileContext>(); // Filter down to only supported files. var supportedFiles = GetSupportedFiles(request.RunContext.RootLogDirectory); // Filter supported files to keep only what we need to populate the required collections. foreach (var supportedFile in supportedFiles) { var parser = parserFactory.GetParser(supportedFile.FullName); string collectionName = parser.CollectionSchema.CollectionName.ToLowerInvariant(); if (LogsetDependencyHelper.IsCollectionRequiredForRequest(collectionName, request)) { logsToProcess.Add(new LogFileContext(supportedFile.FullName, request.RunContext.RootLogDirectory)); } } return(logsToProcess); }
/// <summary> /// Loads all of the logs found in the root log directory which are supported by the given artifact processor. /// </summary> /// <returns>Log contexts for all logs required for request.</returns> protected IEnumerable <LogFileContext> LoadRequiredLogs(string rootLogDirectory, IArtifactProcessor artifactProcessor, IParserFactory parserFactory, ISet <string> collectionsRequested) { var logsToProcess = new List <LogFileContext>(); // Filter down to only supported files. IEnumerable <FileInfo> supportedFiles = GetSupportedFiles(rootLogDirectory, parserFactory); // Filter supported files to keep only what we need to populate the required collections. foreach (var supportedFile in supportedFiles) { var parser = parserFactory.GetParser(supportedFile.FullName); string collectionName = parser.CollectionSchema.CollectionName; if (collectionsRequested.Contains(collectionName, StringComparer.InvariantCultureIgnoreCase)) { logsToProcess.Add(new LogFileContext(supportedFile.FullName, rootLogDirectory, artifactProcessor.GetAdditionalFileMetadata)); } } return(logsToProcess); }
private async Task <DataTable> LoadDataAsync(string uploadsPath) { List <FileInfo> files = _fileManager.GetUploadedFiles(uploadsPath); DataTable dt = GenerateDataTable(); List <Task <List <Customer> > > tasks = files .Select( async f => { List <string> lines = await _fileManager.ReadFileAsync(f); IParser parser = _parserFactory.GetParser(lines[0]); return(parser.Parse(lines)); }) .ToList(); (await Task.WhenAll(tasks)) .SelectMany(x => x) .ToList() .ForEach(c => dt.Rows.Add(c.CompanyName, c.YearsInBusiness, c.ContactName, c.ContactEmail, c.ContactPhone)); return(dt); }
public IEnumerable <Transaction> ParseValidate(StreamReader reader, string format) { var parser = _parserFactory.GetParser(format); return(parser.ParseValidate(reader)); }
/// <summary> /// Splits all logfiles exceeding the specified size threshold into smaller chunks. /// </summary> /// <param name="files">The collection of files eligible for partitioning.</param> /// <returns>Collection of all files in logset following the partition step.</returns> public IEnumerable <LogFileContext> PartitionLargeFiles(IEnumerable <LogFileContext> files) { long maxBytes = request.Configuration.TuningOptions.FilePartitionerThresholdMb * 1024 * 1024; // Build list of files to chunk by searching for files exceeding the max size and consulting the parser // factory to see if it's a single-line parsable log type. We avoid doing chunking on logs where a single document // spans multiple log lines. var processedFiles = new ConcurrentBag <LogFileContext>(); IList <LogFileContext> filesToPartition = new List <LogFileContext>(); foreach (var file in files) { if (file.FileSize > maxBytes) { // If there's no parser available for it, don't bother chunking it. var parser = parserFactory.GetParser(file.FilePath); if (parser == null) { continue; } if (!parser.IsMultiLineLogType) { filesToPartition.Add(file); } else { // This is a large multi-line log type; we can't do anything with it so we just consider it good as-is. processedFiles.Add(file); } } else { processedFiles.Add(file); } } if (filesToPartition.Count == 0) { Log.InfoFormat("No log files were found that are larger than {0}MB; skipping partitioning phase.", request.Configuration.TuningOptions.FilePartitionerThresholdMb); return(processedFiles); } Log.InfoFormat("Partitioning {0} log {1} larger than {2}MB to speed up processing. This may take some time..", filesToPartition.Count, "file".Pluralize(filesToPartition.Count), request.Configuration.TuningOptions.FilePartitionerThresholdMb); // Set up task scheduler. var factory = GetFilePartitioningTaskFactory(); // Spin up partitioning tasks in parallel. Task[] taskArray = new Task[filesToPartition.Count]; for (var i = 0; i < filesToPartition.Count; i++) { var fileToChunk = filesToPartition[i]; taskArray[i] = factory.StartNew(() => { Log.InfoFormat("Partitioning file {0}.. ({1})", fileToChunk.FileName, fileToChunk.FileSize.ToPrettySize()); var partitionFileTimer = request.RunContext.CreateTimer("Partition File", String.Format("{0}/{1}", fileToChunk.FileLocationRelativeToRoot, fileToChunk.FileName)); var partitioner = new FilePartitioner(request, fileToChunk, maxBytes); IList <LogFileContext> partitions = partitioner.PartitionFile(); foreach (var partition in partitions) { processedFiles.Add(partition); } partitionFileTimer.Stop(); Log.InfoFormat("Finished partitioning file {0} ({1}) [{2}]", fileToChunk.FileName, fileToChunk.FileSize.ToPrettySize(), partitionFileTimer.Elapsed.Print()); }); } // Wait on any in-flight tasks. Task.WaitAll(taskArray); return(processedFiles); }
public string Handle() { using (new ExtendedLogger(_context.Mini.SerialNumber)) { Log.Trace("Enter"); try { Log.Info("sn: {0} | input: {1} | lastUrl: {2}", _context.Mini.SerialNumber, _context.Message, _context.Mini.QboxStatus.Url); LogQboxMessage(_context.Message, QboxMessageType.Request); // start parsing var parser = _parserFactory.GetParser(_context.Message); _result = parser.Parse(_context.Message); // end of parsing if ((_result as MiniParseResult) != null) { var parseResult = (_result as MiniParseResult); // handle the result _context.Mini.QboxStatus.FirmwareVersion = parseResult.ProtocolNr; _context.Mini.State = parseResult.Model.Status.Status; _context.Mini.QboxStatus.State = (byte)parseResult.Model.Status.Status; var operational = false; switch (_context.Mini.State) { case MiniState.HardReset: _context.Mini.QboxStatus.LastHardReset = DateTime.UtcNow; break; case MiniState.InvalidImage: _context.Mini.QboxStatus.LastImageInvalid = DateTime.UtcNow; break; case MiniState.Operational: operational = true; break; case MiniState.ValidImage: _context.Mini.QboxStatus.LastImageValid = DateTime.UtcNow; break; case MiniState.UnexpectedReset: _context.Mini.QboxStatus.LastPowerLoss = DateTime.UtcNow; break; } if (!operational) { _context.Mini.QboxStatus.LastNotOperational = DateTime.UtcNow; } if (parseResult.Model.Status.TimeIsReliable) { _context.Mini.QboxStatus.LastTimeIsReliable = DateTime.UtcNow; } else { _context.Mini.QboxStatus.LastTimeUnreliable = DateTime.UtcNow; } if (parseResult.Model.Status.ValidResponse) { _context.Mini.QboxStatus.LastValidResponse = DateTime.UtcNow; } else { _context.Mini.QboxStatus.LastInvalidResponse = DateTime.UtcNow; } foreach (var payload in parseResult.Model.Payloads) { payload.Visit(this); } BuildResult(ResponseType.Normal); } else { var errorParseResult = _result as ErrorParseResult; if (errorParseResult != null) { LogQboxMessage(errorParseResult.Error, QboxMessageType.Error); } // We could not handle the message normally, but if we don't answer at all, the Qbox will just retransmit the message. // So we just send back the basic message, without handling the queue and auto-answer. BuildResult(ResponseType.Basic); } _context.Mini.QboxStatus.LastSeen = DateTime.UtcNow; Log.Debug("sn: {0} | result: {1}", _context.Mini.SerialNumber, _result.GetMessage()); LogQboxMessage(_result.GetMessageWithEnvelope(), QboxMessageType.Response); return(_result.GetMessageWithEnvelope()); } catch (Exception e) { if (_context.Mini != null) { _context.Mini.QboxStatus.LastSeen = DateTime.UtcNow; _context.Mini.QboxStatus.LastError = DateTime.UtcNow; _context.Mini.QboxStatus.LastErrorMessage = e.Message; } LogQboxMessage(e.ToString(), QboxMessageType.Exception); Log.Error(e, String.Format("sn: {0} | Error: {1}", _context.Mini.SerialNumber, e.Message)); return(e.Message); } } }
/// <inheritdoc cref="IQboxNextDataHandler.HandleAsync()"/> public async Task <string> HandleAsync() { try { var stateDataRequest = new StateData { SerialNumber = _context.Mini.SerialNumber, MessageType = MessageTypeRequest, Message = _context.Message, State = _context.Mini.State, Status = _context.Mini.QboxStatus }; await _stateStoreService.StoreAsync(_correlationId, stateDataRequest); var parser = _parserFactory.GetParser(_context.Message); var result = parser.Parse(_context.Message); var responseMessageTime = _dateTimeService.Now; if (result is MiniParseResult parseResult) { // handle the result _context.Mini.QboxStatus.FirmwareVersion = parseResult.ProtocolNr; _context.Mini.State = parseResult.Model.Status.Status; _context.Mini.QboxStatus.State = (byte)parseResult.Model.Status.Status; var stateDataRequestParsed = new StateData { SerialNumber = _context.Mini.SerialNumber, MessageType = MessageTypeRequestParsed, Message = null, // Set to null State = _context.Mini.State, Status = _context.Mini.QboxStatus, MeterType = parseResult.Model.MeterType, MessageTime = parseResult.Model.MeasurementTime, SequenceNumber = parseResult.SequenceNr, Payloads = parseResult.Model.Payloads?.Count }; await _stateStoreService.StoreAsync(_correlationId, stateDataRequestParsed); bool operational = false; switch (_context.Mini.State) { case MiniState.HardReset: _context.Mini.QboxStatus.LastHardReset = _dateTimeService.UtcNow; break; case MiniState.InvalidImage: _context.Mini.QboxStatus.LastImageInvalid = _dateTimeService.UtcNow; break; case MiniState.Operational: operational = true; break; case MiniState.ValidImage: _context.Mini.QboxStatus.LastImageValid = _dateTimeService.UtcNow; break; case MiniState.UnexpectedReset: _context.Mini.QboxStatus.LastPowerLoss = _dateTimeService.UtcNow; break; } if (!operational) { _context.Mini.QboxStatus.LastNotOperational = _dateTimeService.UtcNow; } if (parseResult.Model.Status.TimeIsReliable) { _context.Mini.QboxStatus.LastTimeIsReliable = _dateTimeService.UtcNow; } else { _context.Mini.QboxStatus.LastTimeUnreliable = _dateTimeService.UtcNow; } if (parseResult.Model.Status.ValidResponse) { _context.Mini.QboxStatus.LastValidResponse = _dateTimeService.UtcNow; } else { _context.Mini.QboxStatus.LastInvalidResponse = _dateTimeService.UtcNow; } await VisitPayloadsAsync(parseResult); BuildResult(parseResult, responseMessageTime, ResponseType.Normal); } else { // We could not handle the message normally, but if we don't answer at all, the Qbox will just retransmit the message. // So we just send back the basic message, without handling the queue and auto-answer. BuildResult(result, responseMessageTime, ResponseType.Basic); if (result is ErrorParseResult errorParseResult) { var stateDataError = new StateData { SerialNumber = _context.Mini.SerialNumber, MessageType = MessageTypeError, Message = errorParseResult.Error, State = _context.Mini.State, Status = _context.Mini.QboxStatus, MessageTime = responseMessageTime }; await _stateStoreService.StoreAsync(_correlationId, stateDataError); } } _context.Mini.QboxStatus.LastSeen = _dateTimeService.UtcNow; string resultWithEnvelope = result.GetMessageWithEnvelope(); var stateDataResponse = new StateData { SerialNumber = _context.Mini.SerialNumber, MessageType = MessageTypeResponse, Message = resultWithEnvelope, State = _context.Mini.State, Status = _context.Mini.QboxStatus, MessageTime = responseMessageTime }; await _stateStoreService.StoreAsync(_correlationId, stateDataResponse); return(resultWithEnvelope); } catch (Exception exception) { if (_context.Mini != null) { _context.Mini.QboxStatus.LastSeen = _dateTimeService.UtcNow; _context.Mini.QboxStatus.LastError = _dateTimeService.UtcNow; _context.Mini.QboxStatus.LastErrorMessage = exception.Message; } var stateDataException = new StateData { SerialNumber = _context.Mini?.SerialNumber ?? "N/A", MessageType = MessageTypeException, Message = null, State = _context.Mini?.State ?? MiniState.Waiting, Status = _context.Mini?.QboxStatus }; await _stateStoreService.StoreAsync(_correlationId, stateDataException); _logger.LogError(exception, "SerialNumber {SerialNumber}", stateDataException.SerialNumber); throw; } }