private void SplitSchemas() { if (!_isEdiFileFormat) { long totalLines = _fileData.GetLineCount(); _result.TotalLines = totalLines; if (!_fileToProcess.IsFixedWidth && (_fileToProcess.HeaderLines.HasValue && _fileToProcess.HeaderLines > 0) && _fileToProcess.CaptureUnmappedDataForInboundFile) { Stream headerStream = null; DelimitedTextFileParser headerParser = null; try { headerStream = _fileData.GetReadStream(); headerParser = new DelimitedTextFileParser(headerStream); headerParser.SetDelimiters(_fileToProcess.Delimiter); headerParser.SetTextQualifier(_fileToProcess.TextQualifier); _headerFields = headerParser.ReadFields(); //BK : This code is doing nothing and spending cycle just finding byteread which is never used. //this takes around 15 minutes with big file. //int byteRead = 0; //while (byteRead != -1) //{ // byteRead = headerStream.ReadByte(); //} } catch { //swallow, ignore headers _headerFields = null; } finally { if (headerStream != null) { try { headerStream.Dispose(); } catch { //swallow errors //hopefully the stream eventually gets disposed by GC } } if (headerParser != null) { headerParser.Dispose(); } } } if (!_fileToProcess.IsMultiSchema) { _result.TemporaryFiles.Add("Default", _fileData); return; } long footerLines = _fileToProcess.FooterLines.HasValue ? _fileToProcess.FooterLines.Value : 0; StreamReader readReader = _fileData.GetStreamReader(); StreamReader parserReader = _fileData.GetStreamReader(); ITextFileParser schemaParser = ParserFactory.Instance.GetSchemaParser <ITextFileParser>(_fileToProcess, parserReader.BaseStream); SkipHeaderLines(readReader, schemaParser); while (!schemaParser.EndOfData) { _currentLineNumber++; if (footerLines > 0 && _currentLineNumber > totalLines - footerLines) { break; } string[] fields = schemaParser.ReadFields(); if (fields == null) { _result.BlankLines++; readReader.ReadLine(); continue; } RowDefinition rowDef = _fileToProcess.SchemaDetector; IContext context = new Context((DataTypeEnum)rowDef.DataType, rowDef.IsRequired, rowDef.CanSetDefault, rowDef.DefaultValue, _fileToProcess.FileTypes[0].Description, (_fileToProcess.IsFixedWidth ? null : rowDef.ParseStartPosition), (_fileToProcess.IsFixedWidth ? null : rowDef.ParseLength), rowDef.FieldFormat, rowDef.FieldDisplayName, rowDef.FieldDescription, rowDef.TargetTableName, rowDef.TargetFieldName); IColumn column = (_fileToProcess.IsFixedWidth ? new Column(fields[0], context) : new Column(fields[rowDef.SourceColumnNumber.Value - 1], context)); //TODO: CryptoStream seems to convert blank lines into a bunch of escaped nulls //is there a better way to detect this condition, or prevent it all together if (column.ActualValue != null && column.ActualValue.ToString() != "\0\0") { var currentWriter = GetStreamWriter(column.ActualValue); string line = readReader.ReadLine(); int originalLineLength = line.Length; const int rowNumLength = 10; if (_fileToProcess.IsFixedWidth) { line += _currentLineNumber.ToString().ToFixedLength(rowNumLength, ' ', true); } else { line += (_fileToProcess.Delimiter.ToLower().Trim() == "tab" ? "\t" : _fileToProcess.Delimiter) + _currentLineNumber.ToString(); } currentWriter.WriteLine(line); } } CloseWriterStreams(); } else { _result.TemporaryFiles.Add("EdiDefault", _fileData); } }