private BulkCopyRowsCopied ProviderSpecificCopyImpl <T>(DataConnection dataConnection, ITable <T> table, BulkCopyOptions options, IEnumerable <T> source) where T : notnull { var connection = _provider.TryGetProviderConnection(dataConnection, dataConnection.Connection); if (connection == null) { return(MultipleRowsCopy(table, options, source)); } var sqlBuilder = (PostgreSQLSqlBuilder)_provider.CreateSqlBuilder(table.DataContext.MappingSchema); var ed = table.DataContext.MappingSchema.GetEntityDescriptor(typeof(T)); var tableName = GetTableName(sqlBuilder, options, table); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToArray(); var(npgsqlTypes, dbTypes, columnTypes) = BuildTypes(_provider.Adapter, sqlBuilder, columns); var fields = string.Join(", ", columns.Select(column => sqlBuilder.ConvertInline(column.ColumnName, ConvertType.NameToQueryField))); var copyCommand = $"COPY {tableName} ({fields}) FROM STDIN (FORMAT BINARY)"; // batch size numbers not based on any strong grounds as I didn't found any recommendations for it var batchSize = Math.Max(10, options.MaxBatchSize ?? 10000); var writer = _provider.Adapter.BeginBinaryImport(connection, copyCommand); return(ProviderSpecificCopySyncImpl(dataConnection, options, source, connection, tableName, columns, columnTypes, npgsqlTypes, dbTypes, copyCommand, batchSize, writer)); }
private BulkCopyRowsCopied ProviderSpecificCopyImpl <T>(DataConnection dataConnection, ITable <T> table, BulkCopyOptions options, IEnumerable <T> source) { var connection = _provider.TryGetProviderConnection(dataConnection.Connection, dataConnection.MappingSchema); if (connection == null) { return(MultipleRowsCopy(table, options, source)); } var sqlBuilder = (BasicSqlBuilder)_provider.CreateSqlBuilder(dataConnection.MappingSchema); var ed = dataConnection.MappingSchema.GetEntityDescriptor(typeof(T)); var tableName = GetTableName(sqlBuilder, options, table); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToArray(); var npgsqlTypes = new NpgsqlProviderAdapter.NpgsqlDbType[columns.Length]; for (var i = 0; i < columns.Length; i++) { var npgsqlType = _provider.GetNativeType(columns[i].DbType, true); if (npgsqlType == null) { var columnType = columns[i].DataType != DataType.Undefined ? new SqlQuery.SqlDataType(columns[i]) : null; if (columnType == null || columnType.Type.DataType == DataType.Undefined) { columnType = columns[i].MappingSchema.GetDataType(columns[i].StorageType); } var sb = new System.Text.StringBuilder(); sqlBuilder.BuildTypeName(sb, columnType); npgsqlType = _provider.GetNativeType(sb.ToString(), true); } if (npgsqlType == null) { throw new LinqToDBException($"Cannot guess PostgreSQL type for column {columns[i].ColumnName}. Specify type explicitly in column mapping."); } npgsqlTypes[i] = npgsqlType.Value; } var fields = string.Join(", ", columns.Select(column => sqlBuilder.ConvertInline(column.ColumnName, ConvertType.NameToQueryField))); var copyCommand = $"COPY {tableName} ({fields}) FROM STDIN (FORMAT BINARY)"; // batch size numbers not based on any strong grounds as I didn't found any recommendations for it var batchSize = Math.Max(10, options.MaxBatchSize ?? 10000); var useComplete = _provider.Adapter.BinaryImporterHasComplete; var writer = _provider.Adapter.BeginBinaryImport(connection, copyCommand); return(ProviderSpecificCopySyncImpl(dataConnection, options, source, connection, tableName, columns, npgsqlTypes, copyCommand, batchSize, useComplete, writer)); }
protected override BulkCopyRowsCopied ProviderSpecificCopy <T>(DataConnection dataConnection, BulkCopyOptions options, IEnumerable <T> source) { if (dataConnection == null) { throw new ArgumentNullException(nameof(dataConnection)); } var connection = dataConnection.Connection; if (connection == null) { return(MultipleRowsCopy(dataConnection, options, source)); } if (!(connection.GetType() == _connectionType || connection.GetType().IsSubclassOfEx(_connectionType))) { return(MultipleRowsCopy(dataConnection, options, source)); } var sqlBuilder = _dataProvider.CreateSqlBuilder(); var ed = dataConnection.MappingSchema.GetEntityDescriptor(typeof(T)); var tableName = GetTableName(sqlBuilder, options, ed); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToArray(); var writerType = _connectionType.AssemblyEx().GetType("Npgsql.NpgsqlBinaryImporter", true); var fields = string.Join(", ", columns.Select(column => sqlBuilder.Convert(column.ColumnName, ConvertType.NameToQueryField))); var copyCommand = $"COPY {tableName} ({fields}) FROM STDIN (FORMAT BINARY)"; var rowsCopied = new BulkCopyRowsCopied(); // batch size numbers not based on any strong grounds as I didn't found any recommendations for it var batchSize = Math.Max(10, options.MaxBatchSize ?? 10000); var currentCount = 0; var dc = (dynamic)connection; var key = new { Type = typeof(T), options.KeepIdentity, ed }; var rowWriter = (Action <MappingSchema, object, ColumnDescriptor[], T>)_rowWriterCache.GetOrAdd( key, _ => BuildRowWriter <T>(writerType, columns, dataConnection.MappingSchema)); var writer = dc.BeginBinaryImport(copyCommand); // https://github.com/npgsql/npgsql/issues/1646 // npgsql 4.0 will revert logic by removing explicit Cancel() and add explicit Complete() var hasCancel = writer.GetType().GetMethod("Cancel") != null; var hasComplete = writer.GetType().GetMethod("Complete") != null; try { foreach (var item in source) { rowWriter(dataConnection.MappingSchema, writer, columns, item); currentCount++; rowsCopied.RowsCopied++; if (currentCount >= batchSize) { if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null && rowsCopied.RowsCopied % options.NotifyAfter == 0) { options.RowsCopiedCallback(rowsCopied); if (rowsCopied.Abort) { if (hasCancel) { writer.Cancel(); } break; } } if (hasComplete) { writer.Complete(); } writer.Dispose(); writer = dc.BeginBinaryImport(copyCommand); currentCount = 0; } } if (!rowsCopied.Abort && hasComplete) { writer.Complete(); } } catch when(hasCancel) { writer.Cancel(); throw; } finally { writer.Dispose(); } return(rowsCopied); }
protected override BulkCopyRowsCopied ProviderSpecificCopy <T>(ITable <T> table, BulkCopyOptions options, IEnumerable <T> source) { if (table.DataContext is DataConnection dataConnection) { var connection = _provider.TryGetProviderConnection(dataConnection.Connection, dataConnection.MappingSchema); if (connection == null) { return(MultipleRowsCopy(table, options, source)); } var sqlBuilder = (BasicSqlBuilder)_provider.CreateSqlBuilder(dataConnection.MappingSchema); var ed = dataConnection.MappingSchema.GetEntityDescriptor(typeof(T)); var tableName = GetTableName(sqlBuilder, options, table); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToArray(); var fields = string.Join(", ", columns.Select(column => sqlBuilder.ConvertInline(column.ColumnName, ConvertType.NameToQueryField))); var copyCommand = $"COPY {tableName} ({fields}) FROM STDIN (FORMAT BINARY)"; var rowsCopied = new BulkCopyRowsCopied(); // batch size numbers not based on any strong grounds as I didn't found any recommendations for it var batchSize = Math.Max(10, options.MaxBatchSize ?? 10000); var currentCount = 0; var key = new { Type = typeof(T), options.KeepIdentity, ed }; var rowWriter = (Action <MappingSchema, NpgsqlProviderAdapter.NpgsqlBinaryImporter, ColumnDescriptor[], T>)_rowWriterCache.GetOrAdd( key, _ => _provider.Adapter.CreateBinaryImportRowWriter <T>(_provider, sqlBuilder, columns, dataConnection.MappingSchema)); var useComplete = _provider.Adapter.BinaryImporterHasComplete; var writer = _provider.Adapter.BeginBinaryImport(connection, copyCommand); try { foreach (var item in source) { rowWriter(dataConnection.MappingSchema, writer, columns, item); currentCount++; rowsCopied.RowsCopied++; if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null && rowsCopied.RowsCopied % options.NotifyAfter == 0) { options.RowsCopiedCallback(rowsCopied); if (rowsCopied.Abort) { if (!useComplete) { writer.Cancel(); } break; } } if (currentCount >= batchSize) { if (useComplete) { writer.Complete(); } writer.Dispose(); writer = _provider.Adapter.BeginBinaryImport(connection, copyCommand); currentCount = 0; } } if (!rowsCopied.Abort) { TraceAction( dataConnection, () => "INSERT BULK " + tableName + "(" + string.Join(", ", columns.Select(x => x.ColumnName)) + Environment.NewLine, () => { if (useComplete) { writer.Complete(); } return((int)rowsCopied.RowsCopied); }); } if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null) { options.RowsCopiedCallback(rowsCopied); } } catch when(!useComplete) { writer.Cancel(); throw; } finally { writer.Dispose(); } return(rowsCopied); } return(MultipleRowsCopy(table, options, source)); }