/// /////////////////////////////////////////////////////////////////////// /// ReadData /// <summary> /// /// </summary> /// <typeparam name="T"></typeparam> /// <param name="fileName"> /// Name of the file associated with the stream. /// /// If this is not null, a file is opened with this name. /// If this is null, the method attempts to read from the passed in stream. /// </param> /// <param name="stream"> /// All data is read from this stream, unless fileName is not null. /// /// This is a StreamReader rather then a TextReader, /// because we need to be able to seek back to the start of the /// stream, and you can't do that with a TextReader (or StringReader). /// </param> /// <param name="fileDescription"></param> /// <returns></returns> private IEnumerable <T> ReadData <T>( string fileName, StreamReader stream, CsvFileDescription fileDescription) where T : class, new() { // If T implements IDataRow, then we're reading raw data rows bool readingRawDataRows = typeof(IDataRow).GetTypeInfo().IsAssignableFrom(typeof(T).GetTypeInfo()); // The constructor for FieldMapper_Reading will throw an exception if there is something // wrong with type T. So invoke that constructor before you open the file, because if there // is an exception, the file will not be closed. // // If T implements IDataRow, there is no need for a FieldMapper, because in that case we're returning // raw data rows. FieldMapper_Reading <T> fm = null; if (!readingRawDataRows) { fm = new FieldMapper_Reading <T>(fileDescription, fileName, false); } // ------- // Each time the IEnumerable<T> that is returned from this method is // accessed in a foreach, ReadData is called again (not the original Read overload!) // // So, open the file here, or rewind the stream. bool readingFile = !string.IsNullOrEmpty(fileName); if (readingFile) { stream = new StreamReader( File.Open(fileName, FileMode.Open), fileDescription.TextEncoding, fileDescription.DetectEncodingFromByteOrderMarks); } else { // Rewind the stream if ((stream == null) || (!stream.BaseStream.CanSeek)) { throw new BadStreamException(); } stream.BaseStream.Seek(0, SeekOrigin.Begin); } // ---------- CsvStream cs = new CsvStream(stream, null, fileDescription.SeparatorChar, fileDescription.IgnoreTrailingSeparatorChar); // If we're reading raw data rows, instantiate a T so we return objects // of the type specified by the caller. // Otherwise, instantiate a DataRow, which also implements IDataRow. IDataRow row = null; if (readingRawDataRows) { row = new T() as IDataRow; } else { row = new DataRow(); } AggregatedException ae = new AggregatedException(typeof(T).ToString(), fileName, fileDescription.MaximumNbrExceptions); try { List <int> charLengths = null; if (!readingRawDataRows) { charLengths = fm.GetCharLengths(); } bool firstRow = true; while (cs.ReadRow(row, charLengths)) { // Skip empty lines. // Important. If there is a newline at the end of the last data line, the code // thinks there is an empty line after that last data line. if ((row.Count == 1) && ((row[0].Value == null) || (string.IsNullOrEmpty(row[0].Value.Trim())))) { continue; } if (firstRow && fileDescription.FirstLineHasColumnNames) { if (!readingRawDataRows) { fm.ReadNames(row); } } else { T obj = default(T); try { if (readingRawDataRows) { obj = row as T; } else { obj = fm.ReadObject(row, ae); } } catch (AggregatedException ae2) { // Seeing that the AggregatedException was thrown, maximum number of exceptions // must have been reached, so rethrow. // Catch here, so you don't add an AggregatedException to an AggregatedException throw ae2; } catch (Exception e) { // Store the exception in the AggregatedException ae. // That way, if a file has many errors leading to exceptions, // you get them all in one go, packaged in a single aggregated exception. ae.AddException(e); } yield return(obj); } firstRow = false; } } finally { if (readingFile) { stream.Dispose(); } // If any exceptions were raised while reading the data from the file, // they will have been stored in the AggregatedException ae. // In that case, time to throw ae. ae.ThrowIfExceptionsStored(); } }
/// /////////////////////////////////////////////////////////////////////// /// ReadObject /// /// <summary> /// Creates an object of type T from the data in row and returns that object. /// /// </summary> /// <param name="row"></param> /// <param name="firstRow"></param> /// <returns></returns> public T ReadObject(IDataRow row, AggregatedException ae) { //If there are more columns than the required if (row.Count > m_IndexToInfo.Length) { //Are we ignoring unknown columns? if (!m_fileDescription.IgnoreUnknownColumns) { // Too many fields throw new TooManyDataFieldsException(typeof(T).ToString(), row[0].LineNbr, m_fileName); } } // ----- T obj = new T(); //If we will be using the mappings, we just iterate through all the cells in this row int maxRowCount = _mappingIndexes.Count > 0 ? row.Count : Math.Min(row.Count, m_IndexToInfo.Length); for (int i = 0; i < maxRowCount; i++) { TypeFieldInfo tfi; //If there is some index mapping generated and the IgnoreUnknownColums is `true` if (m_fileDescription.IgnoreUnknownColumns && _mappingIndexes.Count > 0) { if (!_mappingIndexes.ContainsKey(i)) { continue; } tfi = m_IndexToInfo[_mappingIndexes[i]]; } else { tfi = m_IndexToInfo[i]; } if (m_fileDescription.EnforceCsvColumnAttribute && (!tfi.hasColumnAttribute)) { // enforcing column attr, but this field/prop has no column attr. // So there are too many fields in this record. throw new TooManyNonCsvColumnDataFieldsException(typeof(T).ToString(), row[i].LineNbr, m_fileName); } // ----- if ((!m_fileDescription.FirstLineHasColumnNames) && (tfi.index == CsvColumnAttribute.mc_DefaultFieldIndex)) { // First line in the file does not have field names, so we're // depending on the FieldIndex of each field in the type // to ensure each value is placed in the correct field. // However, now hit a field where there is no FieldIndex. throw new MissingFieldIndexException(typeof(T).ToString(), row[i].LineNbr, m_fileName); } // ----- if (m_fileDescription.UseFieldIndexForReadingData && (!m_fileDescription.FirstLineHasColumnNames) && (tfi.index > row.Count)) { // First line in the file does not have field names, so we're // depending on the FieldIndex of each field in the type // to ensure each value is placed in the correct field. // However, now hit a field where the FieldIndex is bigger // than the total number of items in a row generated by the separatorChar throw new WrongFieldIndexException(typeof(T).ToString(), row[i].LineNbr, m_fileName); } int index = m_fileDescription.UseFieldIndexForReadingData ? tfi.index - 1 : i; // value to put in the object string value = row[index].Value; if (value == null) { if (!tfi.canBeNull) { ae.AddException( new MissingRequiredFieldException( typeof(T).ToString(), tfi.name, row[i].LineNbr, m_fileName)); } } else { try { Object objValue = null; // Normally, either tfi.typeConverter is not null, // or tfi.parseNumberMethod is not null. // if (tfi.typeConverter != null) { objValue = tfi.typeConverter.ConvertFromString( null, m_fileDescription.FileCultureInfo, value); } else if (tfi.parseExactMethod != null) { objValue = tfi.parseExactMethod.Invoke( tfi.fieldType, new Object[] { value, tfi.outputFormat, m_fileDescription.FileCultureInfo }); } else if (tfi.parseNumberMethod != null) { objValue = tfi.parseNumberMethod.Invoke( tfi.fieldType, new Object[] { value, tfi.inputNumberStyle, m_fileDescription.FileCultureInfo }); } else { // No TypeConverter and no Parse method available. // Try direct approach. objValue = value; } if (tfi.memberInfo is PropertyInfo) { ((PropertyInfo)tfi.memberInfo).SetValue(obj, objValue, null); } else { ((FieldInfo)tfi.memberInfo).SetValue(obj, objValue); } } catch (Exception e) { if (e is TargetInvocationException) { e = e.InnerException; } if (e is FormatException) { e = new WrongDataFormatException( typeof(T).ToString(), tfi.name, value, row[i].LineNbr, m_fileName, e); } ae.AddException(e); } } } // Visit any remaining fields in the type for which no value was given // in the data row, to see whether any of those was required. // If only looking at fields with CsvColumn attribute, do ignore // fields that don't have that attribute. for (int i = row.Count; i < m_IndexToInfo.Length; i++) { TypeFieldInfo tfi = m_IndexToInfo[i]; if (((!m_fileDescription.EnforceCsvColumnAttribute) || tfi.hasColumnAttribute) && (!tfi.canBeNull)) { ae.AddException( new MissingRequiredFieldException( typeof(T).ToString(), tfi.name, row[row.Count - 1].LineNbr, m_fileName)); } } return(obj); }
/// /////////////////////////////////////////////////////////////////////// /// ReadObject /// <summary> /// Creates an object of type T from the data in row and returns that object. /// </summary> /// <param name="row"></param> /// <param name="firstRow"></param> /// <returns></returns> public T ReadObject(IDataRow row, AggregatedException ae) { //If there are more columns than the required if (row.Count > m_IndexToInfo.Length) { if (!m_fileDescription.IgnoreUnknownColumns) { throw new TooManyDataFieldsException(typeof(T).ToString(), row[0].LineNbr, m_fileName); } } // ----- var obj = new T(); //If we will be using the mappings, we just iterate through all the cells in this row var maxRowCount = _mappingIndexes.Count > 0 ? row.Count : Math.Min(row.Count, m_IndexToInfo.Length); for (var i = 0; i < maxRowCount; i++) { TypeFieldInfo tfi; //If there is some index mapping generated and the IgnoreUnknownColums is `true` if (m_fileDescription.IgnoreUnknownColumns && _mappingIndexes.Count > 0) { if (!_mappingIndexes.ContainsKey(i)) { continue; } tfi = m_IndexToInfo[_mappingIndexes[i]]; } else { tfi = m_IndexToInfo[i]; } if (m_fileDescription.EnforceCsvColumnAttribute && !tfi.hasColumnAttribute) { throw new TooManyNonCsvColumnDataFieldsException(typeof(T).ToString(), row[i].LineNbr, m_fileName); } // ----- if (!m_fileDescription.FirstLineHasColumnNames && tfi.index == CsvColumnAttribute.mc_DefaultFieldIndex) { throw new MissingFieldIndexException(typeof(T).ToString(), row[i].LineNbr, m_fileName); } // ----- if (m_fileDescription.UseFieldIndexForReadingData && !m_fileDescription.FirstLineHasColumnNames && tfi.index > row.Count) { throw new WrongFieldIndexException(typeof(T).ToString(), row[i].LineNbr, m_fileName); } var index = m_fileDescription.UseFieldIndexForReadingData ? tfi.index - 1 : i; // value to put in the object var value = row[index].Value; if (value == null) { if (!tfi.canBeNull) { ae.AddException( new MissingRequiredFieldException( typeof(T).ToString(), tfi.name, row[i].LineNbr, m_fileName)); } } else { try { object objValue = null; // Normally, either tfi.typeConverter is not null, // or tfi.parseNumberMethod is not null. // if (tfi.typeConverter != null) { objValue = tfi.typeConverter.ConvertFromString( null, m_fileDescription.FileCultureInfo, value); } else if (tfi.parseExactMethod != null) { objValue = tfi.parseExactMethod.Invoke( tfi.fieldType, new object[] { value, tfi.outputFormat, m_fileDescription.FileCultureInfo }); } else if (tfi.parseNumberMethod != null) { objValue = tfi.parseNumberMethod.Invoke( tfi.fieldType, new object[] { value, tfi.inputNumberStyle, m_fileDescription.FileCultureInfo }); } else { objValue = value; } switch (tfi.memberInfo) { case PropertyInfo property: property.SetValue(obj, objValue, null); break; case FieldInfo field: field.SetValue(obj, objValue); break; } } catch (Exception e) { if (e is TargetInvocationException) { e = e.InnerException; } if (e is FormatException) { e = new WrongDataFormatException( typeof(T).ToString(), tfi.name, value, row[i].LineNbr, m_fileName, e); } ae.AddException(e); } } } // Visit any remaining fields in the type for which no value was given // in the data row, to see whether any of those was required. // If only looking at fields with CsvColumn attribute, do ignore // fields that don't have that attribute. for (var i = row.Count; i < m_IndexToInfo.Length; i++) { var tfi = m_IndexToInfo[i]; if ((!m_fileDescription.EnforceCsvColumnAttribute || tfi.hasColumnAttribute) && !tfi.canBeNull) { ae.AddException( new MissingRequiredFieldException( typeof(T).ToString(), tfi.name, row[row.Count - 1].LineNbr, m_fileName)); } } return(obj); }