public static AdaSchemaMapping FillMapping(Action <Exception, DataTable, Object[]> onFillError, IAdaSchemaMappingAdapter adapter, DataSet dataset, DataTable datatable, string srcTable, AdaDataReaderContainer dataReader, int schemaCount, DataColumn parentChapterColumn, object parentChapterValue) { try { // only catch if a FillErrorEventHandler is registered so that in the default case we get the full callstack from users AdaSchemaMapping mapping = FillMappingInternal(adapter, dataset, datatable, srcTable, dataReader, schemaCount, parentChapterColumn, parentChapterValue); return(mapping); } catch (Exception ex) when(onFillError != null && ADP.IsCatchableExceptionType(ex)) { onFillError(ex, null, null); return(null); } }
public static async Task <DataTable[]> FillSchemaFromReaderAsync(IAdaSchemaMappingAdapter adapter, Boolean returnProviderSpecificTypes, DataSet dataset, DataTable singleDataTable, SchemaType schemaType, string srcTable, DbDataReader dataReader, CancellationToken cancellationToken) { DataTable[] dataTables = null; int schemaCount = 0; do { AdaDataReaderContainer readerHandler = AdaDataReaderContainer.Create(dataReader, useProviderSpecificDataReader: returnProviderSpecificTypes); if (0 >= readerHandler.FieldCount) { continue; } string sourceTableName = null; if (null != dataset) { sourceTableName = GetSourceTableName(srcTable, schemaCount); schemaCount++; // don't increment if no SchemaTable ( a non-row returning result ) } AdaSchemaMapping mapping = new AdaSchemaMapping(adapter: adapter, dataset, singleDataTable, dataReader: readerHandler, keyInfo: true, schemaType, sourceTableName, gettingData: false, parentChapterColumn: null, parentChapterValue: null); if (singleDataTable != null) { // do not read remaining results in single DataTable case return(new DataTable[] { mapping.DataTable }); } else if (null != mapping.DataTable) { if (null == dataTables) { dataTables = new DataTable[1] { mapping.DataTable }; } else { dataTables = AddDataTableToArray(dataTables, mapping.DataTable); } } }while (await dataReader.NextResultAsync(cancellationToken).ConfigureAwait(false)); // FillSchema does not capture errors for FillError event if (dataTables is null && singleDataTable is null) { return(Array.Empty <DataTable>()); }
public static async Task UpdateRowExecuteAsync(IAdaSchemaMappingAdapter adapter, Boolean returnProviderSpecificTypes, RowUpdatedEventArgs rowUpdatedEvent, DbCommand dataCommand, StatementType cmdIndex, CancellationToken cancellationToken) { Debug.Assert(null != rowUpdatedEvent, "null rowUpdatedEvent"); Debug.Assert(null != dataCommand, "null dataCommand"); Debug.Assert(rowUpdatedEvent.Command == dataCommand, "dataCommand differs from rowUpdatedEvent"); bool insertAcceptChanges = true; UpdateRowSource updatedRowSource = dataCommand.UpdatedRowSource; if ((StatementType.Delete == cmdIndex) || (0 == (UpdateRowSource.FirstReturnedRecord & updatedRowSource))) { int recordsAffected = await dataCommand.ExecuteNonQueryAsync().ConfigureAwait(false); rowUpdatedEvent.AdapterInit_(recordsAffected); } else if ((StatementType.Insert == cmdIndex) || (StatementType.Update == cmdIndex)) { // we only care about the first row of the first result using (DbDataReader dataReader = await dataCommand.ExecuteReaderAsync(CommandBehavior.SequentialAccess, cancellationToken).ConfigureAwait(false)) { AdaDataReaderContainer readerHandler = AdaDataReaderContainer.Create(dataReader, returnProviderSpecificTypes); try { bool getData = false; do { // advance to the first row returning result set // determined by actually having columns in the result set if (0 < readerHandler.FieldCount) { getData = true; break; } }while (await dataReader.NextResultAsync(cancellationToken).ConfigureAwait(false)); if (getData && (0 != dataReader.RecordsAffected)) { AdaSchemaMapping mapping = new AdaSchemaMapping(adapter, null, rowUpdatedEvent.Row.Table, readerHandler, false, SchemaType.Mapped, rowUpdatedEvent.TableMapping.SourceTable, true, null, null); if ((null != mapping.DataTable) && (null != mapping.DataValues)) { if (dataReader.Read()) { if ((StatementType.Insert == cmdIndex) && insertAcceptChanges) { // MDAC 64199 rowUpdatedEvent.Row.AcceptChanges(); insertAcceptChanges = false; } mapping.ApplyToDataRow(rowUpdatedEvent.Row); } } } } finally { // using Close which can optimize its { while(dataReader.NextResult()); } loop dataReader.Close(); // RecordsAffected is available after Close, but don't trust it after Dispose int recordsAffected = dataReader.RecordsAffected; rowUpdatedEvent.AdapterInit_(recordsAffected); } } } else { // StatementType.Select, StatementType.Batch Debug.Assert(false, "unexpected StatementType"); } // map the parameter results to the dataSet if ( ( (StatementType.Insert == cmdIndex) || (StatementType.Update == cmdIndex) ) && ( 0 != (UpdateRowSource.OutputParameters & updatedRowSource) ) && (0 != rowUpdatedEvent.RecordsAffected) ) { if ((StatementType.Insert == cmdIndex) && insertAcceptChanges) { rowUpdatedEvent.Row.AcceptChanges(); } ParameterMethods.ParameterOutput(adapter.MissingMappingAction, adapter.MissingSchemaAction, dataCommand.Parameters, rowUpdatedEvent.Row, rowUpdatedEvent.TableMapping); } // Only error if RecordsAffect == 0, not -1. A value of -1 means no count was received from server, // do not error in that situation (means 'set nocount on' was executed on server). switch (rowUpdatedEvent.Status) { case UpdateStatus.Continue: switch (cmdIndex) { case StatementType.Update: case StatementType.Delete: if (0 == rowUpdatedEvent.RecordsAffected) { // bug50526, an exception if no records affected and attempted an Update/Delete Debug.Assert(null == rowUpdatedEvent.Errors, "Continue - but contains an exception"); rowUpdatedEvent.Errors = ADP.UpdateConcurrencyViolation(cmdIndex, rowUpdatedEvent.RecordsAffected, 1, new DataRow[] { rowUpdatedEvent.Row }); // MDAC 55735 rowUpdatedEvent.Status = UpdateStatus.ErrorsOccurred; } break; } break; } }
public static async Task <int> FillFromReaderAsync(Action <Exception, DataTable, Object[]> onFillError, IAdaSchemaMappingAdapter adapter, DataSet dataset, DataTable datatable, string srcTable, AdaDataReaderContainer dataReader, int startRecord, int maxRecords, DataColumn parentChapterColumn, object parentChapterValue, CancellationToken cancellationToken) { int rowsAddedToDataSet = 0; int schemaCount = 0; do { if (0 >= dataReader.FieldCount) { continue; // loop to next result } AdaSchemaMapping mapping = FillMapping(onFillError, adapter, dataset, datatable, srcTable, dataReader, schemaCount, parentChapterColumn, parentChapterValue); schemaCount++; // don't increment if no SchemaTable ( a non-row returning result ) if (null == mapping) { continue; // loop to next result } if (null == mapping.DataValues) { continue; // loop to next result } if (null == mapping.DataTable) { continue; // loop to next result } mapping.DataTable.BeginLoadData(); try { // startRecord and maxRecords only apply to the first resultset if ((1 == schemaCount) && ((0 < startRecord) || (0 < maxRecords))) { rowsAddedToDataSet = await FillLoadDataRowChunkAsync(onFillError, mapping, startRecord, maxRecords, cancellationToken).ConfigureAwait(false); } else { int count = await FillLoadDataRowAsync(onFillError, mapping, cancellationToken).ConfigureAwait(false); if (1 == schemaCount) { // only return LoadDataRow count for first resultset // not secondary or chaptered results rowsAddedToDataSet = count; } } } finally { mapping.DataTable.EndLoadData(); } if (null != datatable) { break; // do not read remaining results in single DataTable case } }while(await FillNextResultAsync(onFillError, dataReader, cancellationToken).ConfigureAwait(false)); return(rowsAddedToDataSet); }
public static async Task <int> FillLoadDataRowAsync(Action <Exception, DataTable, Object[]> onFillError, AdaSchemaMapping mapping, CancellationToken cancellationToken) { int rowsAddedToDataSet = 0; AdaDataReaderContainer dataReader = mapping.DataReader; while (await dataReader.ReadAsync(cancellationToken).ConfigureAwait(false)) { // read remaining rows of first and subsequent resultsets try { // only try-catch if a FillErrorEventHandler is registered so that // in the default case we get the full callstack from users await mapping.LoadDataRowWithClearAsync(cancellationToken).ConfigureAwait(false); rowsAddedToDataSet++; } catch (Exception e) when(onFillError != null && ADP.IsCatchableExceptionType(e)) { onFillError(e, mapping.DataTable, mapping.DataValues); } } return(rowsAddedToDataSet); }
public static async Task <int> FillLoadDataRowChunkAsync(Action <Exception, DataTable, Object[]> onFillError, AdaSchemaMapping mapping, int startRecord, int maxRecords, CancellationToken cancellationToken) { AdaDataReaderContainer dataReader = mapping.DataReader; while (0 < startRecord) { if (!await dataReader.ReadAsync(cancellationToken).ConfigureAwait(false)) { // there are no more rows on first resultset return(0); } --startRecord; } int rowsAddedToDataSet = 0; if (0 < maxRecords) { while ((rowsAddedToDataSet < maxRecords) && await dataReader.ReadAsync(cancellationToken).ConfigureAwait(false)) { try { await mapping.LoadDataRowWithClearAsync(cancellationToken).ConfigureAwait(false); rowsAddedToDataSet++; } catch (Exception e) when(onFillError != null && ADP.IsCatchableExceptionType(e)) { onFillError(e, mapping.DataTable, mapping.DataValues); } } // skip remaining rows of the first resultset } else { rowsAddedToDataSet = await FillLoadDataRowAsync(onFillError, mapping, cancellationToken).ConfigureAwait(false); } return(rowsAddedToDataSet); }