protected override async Task <BulkCopyRowsCopied> ProviderSpecificCopyAsync <T>(ITable <T> table, BulkCopyOptions options, IAsyncEnumerable <T> source, CancellationToken cancellationToken) { if (table.TryGetDataConnection(out var dataConnection)) { var connection = _provider.TryGetProviderConnection(dataConnection, dataConnection.Connection); if (connection != null) { var enumerator = source.GetAsyncEnumerator(cancellationToken); await using (enumerator.ConfigureAwait(Configuration.ContinueOnCapturedContext)) { // call the synchronous provider-specific implementation return(ProviderSpecificCopyImpl( table, options, EnumerableHelper.AsyncToSyncEnumerable(enumerator), dataConnection, connection, _provider.Adapter.BulkCopy, TraceAction)); } } } return(await MultipleRowsCopyAsync(table, options, source, cancellationToken).ConfigureAwait(Configuration.ContinueOnCapturedContext)); }
protected override async Task <BulkCopyRowsCopied> ProviderSpecificCopyAsync <T>( ITable <T> table, BulkCopyOptions options, IAsyncEnumerable <T> source, CancellationToken cancellationToken) { var enumerator = source.GetAsyncEnumerator(cancellationToken); await using (enumerator.ConfigureAwait(Common.Configuration.ContinueOnCapturedContext)) { // call the synchronous provider-specific implementation return(ProviderSpecificCopy(table, options, EnumerableHelper.AsyncToSyncEnumerable(enumerator))); } }
protected override async Task <BulkCopyRowsCopied> ProviderSpecificCopyAsync <T>( ITable <T> table, BulkCopyOptions options, IAsyncEnumerable <T> source, CancellationToken cancellationToken) { if ((_provider.Adapter.InformixBulkCopy != null || _provider.Adapter.DB2BulkCopy != null) && table.TryGetDataConnection(out var dataConnection) && dataConnection.Transaction == null) { var connection = _provider.TryGetProviderConnection(dataConnection.Connection, table.DataContext.MappingSchema); if (connection != null) { var enumerator = source.GetAsyncEnumerator(cancellationToken); await using (enumerator.ConfigureAwait(Configuration.ContinueOnCapturedContext)) { // call the synchronous provider-specific implementation var syncSource = EnumerableHelper.AsyncToSyncEnumerable(enumerator); if (_provider.Adapter.InformixBulkCopy != null) { return(IDSProviderSpecificCopy( table, options, syncSource, dataConnection, connection, _provider.Adapter.InformixBulkCopy)); } else { return(DB2.DB2BulkCopy.ProviderSpecificCopyImpl( table, options, syncSource, dataConnection, connection, _provider.Adapter.DB2BulkCopy !, TraceAction)); } } } } return(await MultipleRowsCopyAsync(table, options, source, cancellationToken) .ConfigureAwait(Configuration.ContinueOnCapturedContext)); }
private async Task <BulkCopyRowsCopied> ProviderSpecificCopyImplAsync <T>(DataConnection dataConnection, ITable <T> table, BulkCopyOptions options, IAsyncEnumerable <T> source, CancellationToken cancellationToken) where T : notnull { var connection = _provider.TryGetProviderConnection(dataConnection.Connection, dataConnection.MappingSchema); if (connection == null) { return(await MultipleRowsCopyAsync(table, options, source, cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext)); } var sqlBuilder = (BasicSqlBuilder)_provider.CreateSqlBuilder(dataConnection.MappingSchema); var ed = dataConnection.MappingSchema.GetEntityDescriptor(typeof(T)); var tableName = GetTableName(sqlBuilder, options, table); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToArray(); var fields = string.Join(", ", columns.Select(column => sqlBuilder.ConvertInline(column.ColumnName, ConvertType.NameToQueryField))); var copyCommand = $"COPY {tableName} ({fields}) FROM STDIN (FORMAT BINARY)"; // batch size numbers not based on any strong grounds as I didn't found any recommendations for it var batchSize = Math.Max(10, options.MaxBatchSize ?? 10000); var npgsqlTypes = new NpgsqlProviderAdapter.NpgsqlDbType[columns.Length]; for (var i = 0; i < columns.Length; i++) { var npgsqlType = _provider.GetNativeType(columns[i].DbType, true); if (npgsqlType == null) { var columnType = columns[i].DataType != DataType.Undefined ? new SqlQuery.SqlDataType(columns[i]) : null; if (columnType == null || columnType.Type.DataType == DataType.Undefined) { columnType = columns[i].MappingSchema.GetDataType(columns[i].StorageType); } var sb = new System.Text.StringBuilder(); sqlBuilder.BuildTypeName(sb, columnType); npgsqlType = _provider.GetNativeType(sb.ToString(), true); } if (npgsqlType == null) { throw new LinqToDBException($"Cannot guess PostgreSQL type for column {columns[i].ColumnName}. Specify type explicitly in column mapping."); } npgsqlTypes[i] = npgsqlType.Value; } var writer = _provider.Adapter.BeginBinaryImport(connection, copyCommand); if (!writer.SupportsAsync) { // seems to be missing one of the required async methods; fallback to sync importer var enumerator = source.GetAsyncEnumerator(cancellationToken); await using (enumerator.ConfigureAwait(Common.Configuration.ContinueOnCapturedContext)) { return(ProviderSpecificCopySyncImpl(dataConnection, options, EnumerableHelper.AsyncToSyncEnumerable(enumerator), connection, tableName, columns, npgsqlTypes, copyCommand, batchSize, writer)); } } var rowsCopied = new BulkCopyRowsCopied(); var currentCount = 0; try { await foreach (var item in source.WithCancellation(cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext)) { await writer.StartRowAsync(cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); for (var i = 0; i < columns.Length; i++) { await writer.WriteAsync(columns[i].GetValue(item !), npgsqlTypes[i], cancellationToken) .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } currentCount++; rowsCopied.RowsCopied++; if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null && rowsCopied.RowsCopied % options.NotifyAfter == 0) { options.RowsCopiedCallback(rowsCopied); if (rowsCopied.Abort) { break; } } if (currentCount >= batchSize) { await writer.CompleteAsync(cancellationToken) .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); await writer.DisposeAsync() .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); writer = _provider.Adapter.BeginBinaryImport(connection, copyCommand); currentCount = 0; } } if (!rowsCopied.Abort) { await TraceActionAsync( dataConnection, () => "INSERT ASYNC BULK " + tableName + "(" + string.Join(", ", columns.Select(x => x.ColumnName)) + Environment.NewLine, async() => { var ret = await writer.CompleteAsync(cancellationToken) .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); return((int)rowsCopied.RowsCopied); }).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null) { options.RowsCopiedCallback(rowsCopied); } } finally { await writer.DisposeAsync() .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } return(rowsCopied); }
private async Task <BulkCopyRowsCopied> ProviderSpecificCopyImplAsync <T>(DataConnection dataConnection, ITable <T> table, BulkCopyOptions options, IAsyncEnumerable <T> source, CancellationToken cancellationToken) where T : notnull { var connection = _provider.TryGetProviderConnection(dataConnection, dataConnection.Connection); if (connection == null) { return(await MultipleRowsCopyAsync(table, options, source, cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext)); } var sqlBuilder = (PostgreSQLSqlBuilder)_provider.CreateSqlBuilder(table.DataContext.MappingSchema); var ed = table.DataContext.MappingSchema.GetEntityDescriptor(typeof(T)); var tableName = GetTableName(sqlBuilder, options, table); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToArray(); var fields = string.Join(", ", columns.Select(column => sqlBuilder.ConvertInline(column.ColumnName, ConvertType.NameToQueryField))); var copyCommand = $"COPY {tableName} ({fields}) FROM STDIN (FORMAT BINARY)"; // batch size numbers not based on any strong grounds as I didn't found any recommendations for it var batchSize = Math.Max(10, options.MaxBatchSize ?? 10000); var(npgsqlTypes, dbTypes, columnTypes) = BuildTypes(_provider.Adapter, sqlBuilder, columns); var writer = _provider.Adapter.BeginBinaryImportAsync != null ? await _provider.Adapter.BeginBinaryImportAsync(connection, copyCommand, cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext) : _provider.Adapter.BeginBinaryImport(connection, copyCommand); if (!writer.SupportsAsync) { // seems to be missing one of the required async methods; fallback to sync importer var enumerator = source.GetAsyncEnumerator(cancellationToken); await using (enumerator.ConfigureAwait(Common.Configuration.ContinueOnCapturedContext)) { return(ProviderSpecificCopySyncImpl(dataConnection, options, EnumerableHelper.AsyncToSyncEnumerable(enumerator), connection, tableName, columns, columnTypes, npgsqlTypes, dbTypes, copyCommand, batchSize, writer)); } } var rowsCopied = new BulkCopyRowsCopied(); var currentCount = 0; try { await foreach (var item in source.WithCancellation(cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext)) { await writer.StartRowAsync(cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); for (var i = 0; i < columns.Length; i++) { if (npgsqlTypes[i] != null) { await writer.WriteAsync(_provider.NormalizeTimeStamp(columns[i].GetProviderValue(item !), columnTypes[i]), npgsqlTypes[i] !.Value, cancellationToken) .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } else { await writer.WriteAsync(_provider.NormalizeTimeStamp(columns[i].GetProviderValue(item !), columnTypes[i]), dbTypes[i] !, cancellationToken) .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } } currentCount++; rowsCopied.RowsCopied++; if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null && rowsCopied.RowsCopied % options.NotifyAfter == 0) { options.RowsCopiedCallback(rowsCopied); if (rowsCopied.Abort) { break; } } if (currentCount >= batchSize) { await writer.CompleteAsync(cancellationToken) .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); await writer.DisposeAsync() .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); writer = _provider.Adapter.BeginBinaryImportAsync != null ? await _provider.Adapter.BeginBinaryImportAsync(connection, copyCommand, cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext) : _provider.Adapter.BeginBinaryImport(connection, copyCommand); currentCount = 0; } } if (!rowsCopied.Abort) { await TraceActionAsync( dataConnection, () => $"INSERT ASYNC BULK {tableName}({string.Join(", ", columns.Select(x => x.ColumnName))}){Environment.NewLine}", async() => { var ret = await writer.CompleteAsync(cancellationToken) .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); return((int)rowsCopied.RowsCopied); }).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null) { options.RowsCopiedCallback(rowsCopied); } } finally { await writer.DisposeAsync() .ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } return(rowsCopied); }