public Table Parse(List <Exception> exceptions) { _fileStream = _fileData.GetReadStream(); _totalLines = _fileData.GetLineCount(); if (_headerLines < 0) { _headerLines = (_fileToProcess.HeaderLines.HasValue && _fileToProcess.HeaderLines.Value > 0 ? _fileToProcess.HeaderLines.Value : 0); } _footerLines = (_fileToProcess.FooterLines.HasValue && _fileToProcess.FooterLines.Value > 0 ? _fileToProcess.FooterLines.Value : 0); ParseSingleSchemaFile(exceptions); //File Profile level scripting should not be supported because it will enumerate/yield all records, causing downstream failures //if (_fileToProcess.IsCustom) //{ // _scriptRunner.RunScript(_fileToProcess.CustomAssemblyPath, _fileToProcess.CustomClassName, null, null, _table); //} return(_table); }
public List <string> Parse(List <Exception> exceptions) { var result = new List <string>(); X12Parser parser = new X12Parser(); ClaimFormTransformationService service = new ClaimFormTransformationService( new ProfessionalClaimToHcfa1500FormTransformation(""), new InstitutionalClaimToUB04ClaimFormTransformation(""), new DentalClaimToJ400FormTransformation(""), parser ); try { Stream fileStream = _fileData.GetReadStream(); ClaimDocument claimDoc = service.Transform837ToClaimDocument(fileStream); XmlSerializer xs = new XmlSerializer(typeof(Claim)); foreach (Claim claim in claimDoc.Claims) { MemoryStream ms = new MemoryStream(); xs.Serialize(ms, claim); ms.Seek(0, SeekOrigin.Begin); StreamReader sr = new StreamReader(ms); string claimXml = sr.ReadToEnd(); result.Add(claimXml); //sr.Dispose(); //ms.Dispose(); } } catch (Exception ex) { exceptions.Add(ex); throw new Exception("Aborting import of EDI file"); } return(result); }
private void SplitSchemas() { if (!_isEdiFileFormat) { long totalLines = _fileData.GetLineCount(); _result.TotalLines = totalLines; if (!_fileToProcess.IsFixedWidth && (_fileToProcess.HeaderLines.HasValue && _fileToProcess.HeaderLines > 0) && _fileToProcess.CaptureUnmappedDataForInboundFile) { Stream headerStream = null; DelimitedTextFileParser headerParser = null; try { headerStream = _fileData.GetReadStream(); headerParser = new DelimitedTextFileParser(headerStream); headerParser.SetDelimiters(_fileToProcess.Delimiter); headerParser.SetTextQualifier(_fileToProcess.TextQualifier); _headerFields = headerParser.ReadFields(); //BK : This code is doing nothing and spending cycle just finding byteread which is never used. //this takes around 15 minutes with big file. //int byteRead = 0; //while (byteRead != -1) //{ // byteRead = headerStream.ReadByte(); //} } catch { //swallow, ignore headers _headerFields = null; } finally { if (headerStream != null) { try { headerStream.Dispose(); } catch { //swallow errors //hopefully the stream eventually gets disposed by GC } } if (headerParser != null) { headerParser.Dispose(); } } } if (!_fileToProcess.IsMultiSchema) { _result.TemporaryFiles.Add("Default", _fileData); return; } long footerLines = _fileToProcess.FooterLines.HasValue ? _fileToProcess.FooterLines.Value : 0; StreamReader readReader = _fileData.GetStreamReader(); StreamReader parserReader = _fileData.GetStreamReader(); ITextFileParser schemaParser = ParserFactory.Instance.GetSchemaParser <ITextFileParser>(_fileToProcess, parserReader.BaseStream); SkipHeaderLines(readReader, schemaParser); while (!schemaParser.EndOfData) { _currentLineNumber++; if (footerLines > 0 && _currentLineNumber > totalLines - footerLines) { break; } string[] fields = schemaParser.ReadFields(); if (fields == null) { _result.BlankLines++; readReader.ReadLine(); continue; } RowDefinition rowDef = _fileToProcess.SchemaDetector; IContext context = new Context((DataTypeEnum)rowDef.DataType, rowDef.IsRequired, rowDef.CanSetDefault, rowDef.DefaultValue, _fileToProcess.FileTypes[0].Description, (_fileToProcess.IsFixedWidth ? null : rowDef.ParseStartPosition), (_fileToProcess.IsFixedWidth ? null : rowDef.ParseLength), rowDef.FieldFormat, rowDef.FieldDisplayName, rowDef.FieldDescription, rowDef.TargetTableName, rowDef.TargetFieldName); IColumn column = (_fileToProcess.IsFixedWidth ? new Column(fields[0], context) : new Column(fields[rowDef.SourceColumnNumber.Value - 1], context)); //TODO: CryptoStream seems to convert blank lines into a bunch of escaped nulls //is there a better way to detect this condition, or prevent it all together if (column.ActualValue != null && column.ActualValue.ToString() != "\0\0") { var currentWriter = GetStreamWriter(column.ActualValue); string line = readReader.ReadLine(); int originalLineLength = line.Length; const int rowNumLength = 10; if (_fileToProcess.IsFixedWidth) { line += _currentLineNumber.ToString().ToFixedLength(rowNumLength, ' ', true); } else { line += (_fileToProcess.Delimiter.ToLower().Trim() == "tab" ? "\t" : _fileToProcess.Delimiter) + _currentLineNumber.ToString(); } currentWriter.WriteLine(line); } } CloseWriterStreams(); } else { _result.TemporaryFiles.Add("EdiDefault", _fileData); } }