protected override BulkCopyRowsCopied MultipleRowsCopy <T>( DataConnection dataConnection, BulkCopyOptions options, IEnumerable <T> source) { var sqlBuilder = _dataProvider.CreateSqlBuilder(); var descriptor = dataConnection.MappingSchema.GetEntityDescriptor(typeof(T)); var tableName = GetTableName(sqlBuilder, descriptor); var sb = new StringBuilder(); var buildValue = BasicSqlBuilder.GetBuildValue(sqlBuilder, sb); var columns = descriptor.Columns.Where(c => !c.SkipOnInsert).ToArray(); var pname = sqlBuilder.Convert("p", ConvertType.NameToQueryParameter).ToString(); var rowsCopied = new BulkCopyRowsCopied(); sb .AppendFormat("INSERT INTO {0}", tableName).AppendLine() .Append("("); foreach (var column in columns) { sb .AppendLine() .Append("\t") .Append(sqlBuilder.Convert(column.ColumnName, ConvertType.NameToQueryField)) .Append(","); } sb.Length--; sb .AppendLine() .Append(")"); sb .AppendLine() .Append("VALUES"); var headerLen = sb.Length; var currentCount = 0; var batchSize = options.MaxBatchSize ?? 1000; if (batchSize <= 0) { batchSize = 1000; } var parms = new List <DataParameter>(); var pidx = 0; foreach (var item in source) { sb .AppendLine() .Append("("); foreach (var column in columns) { var value = column.GetValue(item); if (value == null) { sb.Append("NULL"); } else { switch (Type.GetTypeCode(value.GetType())) { case TypeCode.DBNull: sb.Append("NULL"); break; case TypeCode.String: var isString = false; switch (column.DataType) { case DataType.NVarChar: case DataType.Char: case DataType.VarChar: case DataType.NChar: case DataType.Undefined: isString = true; break; } if (isString) { goto case TypeCode.Int32; } goto default; case TypeCode.Boolean: case TypeCode.Char: case TypeCode.SByte: case TypeCode.Byte: case TypeCode.Int16: case TypeCode.UInt16: case TypeCode.Int32: case TypeCode.UInt32: case TypeCode.Int64: case TypeCode.UInt64: case TypeCode.Single: case TypeCode.Double: case TypeCode.Decimal: case TypeCode.DateTime: //SetParameter(dataParam, "", column.DataType, value); buildValue(value); break; default: var name = pname + ++pidx; sb.Append(name); parms.Add(new DataParameter("p" + pidx, value, column.DataType)); break; } } sb.Append(","); } sb.Length--; sb.Append("),"); rowsCopied.RowsCopied++; currentCount++; if (currentCount >= batchSize || parms.Count > 100000 || sb.Length > 100000) { sb.Length--; dataConnection.Execute(sb.AppendLine().ToString(), parms.ToArray()); if (options.RowsCopiedCallback != null) { options.RowsCopiedCallback(rowsCopied); if (rowsCopied.Abort) { return(rowsCopied); } } parms.Clear(); pidx = 0; currentCount = 0; sb.Length = headerLen; } } if (currentCount > 0) { sb.Length--; dataConnection.Execute(sb.ToString(), parms.ToArray()); sb.Length = headerLen; if (options.RowsCopiedCallback != null) { options.RowsCopiedCallback(rowsCopied); } } return(rowsCopied); }
private async Task <BulkCopyRowsCopied> ProviderSpecificCopyInternal <T>( ProviderConnections providerConnections, ITable <T> table, BulkCopyOptions options, IEnumerable <T> source, bool runAsync, CancellationToken cancellationToken) { var dataConnection = providerConnections.DataConnection; var connection = providerConnections.ProviderConnection; var transaction = providerConnections.ProviderTransaction; var ed = dataConnection.MappingSchema.GetEntityDescriptor(typeof(T)); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToList(); var sb = _provider.CreateSqlBuilder(dataConnection.MappingSchema); var rc = new BulkCopyRowsCopied(); var bc = _provider.Adapter.BulkCopy !.Create(connection, transaction); if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null) { bc.NotifyAfter = options.NotifyAfter; bc.MySqlRowsCopied += (sender, args) => { rc.RowsCopied += args.RowsCopied; options.RowsCopiedCallback(rc); if (rc.Abort) { args.Abort = true; } }; } if (options.BulkCopyTimeout.HasValue) { bc.BulkCopyTimeout = options.BulkCopyTimeout.Value; } else if (Configuration.Data.BulkCopyUseConnectionCommandTimeout) { bc.BulkCopyTimeout = connection.ConnectionTimeout; } var tableName = GetTableName(sb, options, table); bc.DestinationTableName = GetTableName(sb, options, table); for (var i = 0; i < columns.Count; i++) { bc.AddColumnMapping(_provider.Adapter.BulkCopy.CreateColumnMapping(i, columns[i].ColumnName)); } // emulate missing BatchSize property // this is needed, because MySql fails on big batches, so users should be able to limit batch size foreach (var batch in EnumerableHelper.Batch(source, options.MaxBatchSize ?? int.MaxValue)) { var rd = new BulkCopyReader <T>(dataConnection, columns, batch); await TraceActionAsync( dataConnection, () => (runAsync && ( #if !NETFRAMEWORK bc.CanWriteToServerAsync2 || #endif bc.CanWriteToServerAsync) ? "INSERT ASYNC BULK " : "INSERT BULK ") + tableName + "(" + string.Join(", ", columns.Select(x => x.ColumnName)) + Environment.NewLine, async() => { if (runAsync) { #if !NETFRAMEWORK if (bc.CanWriteToServerAsync2) { await bc.WriteToServerAsync2(rd, cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } else #endif if (bc.CanWriteToServerAsync) { await bc.WriteToServerAsync(rd, cancellationToken).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); } else { bc.WriteToServer(rd); } } else { bc.WriteToServer(rd); } return(rd.Count); }).ConfigureAwait(Common.Configuration.ContinueOnCapturedContext); rc.RowsCopied += rd.Count; } if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null) { options.RowsCopiedCallback(rc); } return(rc); }
protected override BulkCopyRowsCopied ProviderSpecificCopy <T>( ITable <T> table, BulkCopyOptions options, IEnumerable <T> source) { if (_provider.Adapter.BulkCopy != null && table.DataContext is DataConnection dataConnection) { var connection = _provider.TryGetProviderConnection(dataConnection.Connection, dataConnection.MappingSchema); var transaction = dataConnection.Transaction; if (connection != null && transaction != null) { transaction = _provider.TryGetProviderTransaction(transaction, dataConnection.MappingSchema); } if (connection != null && (dataConnection.Transaction == null || transaction != null)) { var ed = dataConnection.MappingSchema.GetEntityDescriptor(typeof(T)); var columns = ed.Columns.Where(c => !c.SkipOnInsert || options.KeepIdentity == true && c.IsIdentity).ToList(); var sb = _provider.CreateSqlBuilder(dataConnection.MappingSchema); var rc = new BulkCopyRowsCopied(); var bc = _provider.Adapter.BulkCopy.Create(connection, transaction); if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null) { bc.NotifyAfter = options.NotifyAfter; bc.MySqlRowsCopied += (sender, args) => { rc.RowsCopied += args.RowsCopied; options.RowsCopiedCallback(rc); if (rc.Abort) { args.Abort = true; } }; } if (options.BulkCopyTimeout.HasValue) { bc.BulkCopyTimeout = options.BulkCopyTimeout.Value; } var tableName = GetTableName(sb, options, table); bc.DestinationTableName = GetTableName(sb, options, table); for (var i = 0; i < columns.Count; i++) { bc.AddColumnMapping(_provider.Adapter.BulkCopy.CreateColumnMapping(i, columns[i].ColumnName)); } // emulate missing BatchSize property // this is needed, because MySql fails on big batches, so users should be able to limit batch size foreach (var batch in EnumerableHelper.Batch(source, options.MaxBatchSize ?? int.MaxValue)) { var rd = new BulkCopyReader(dataConnection, columns, batch); TraceAction( dataConnection, () => "INSERT BULK " + tableName + "(" + string.Join(", ", columns.Select(x => x.ColumnName)) + Environment.NewLine, () => { bc.WriteToServer(rd); return(rd.Count); }); rc.RowsCopied += rd.Count; } if (options.NotifyAfter != 0 && options.RowsCopiedCallback != null) { options.RowsCopiedCallback(rc); } return(rc); } } return(MultipleRowsCopy(table, options, source)); }