private static void updateTable() { NpgsqlConnection conn = new NpgsqlConnection("Server=169.254.95.120;Port=5432;User Id=joe;Password=postgres;Database=postgres;"); conn.Open(); NpgsqlCommand command = new NpgsqlCommand("COPY myCopyTestTable FROM STDIN", conn); NpgsqlCopyIn cin = new NpgsqlCopyIn(command, conn, Console.OpenStandardInput()); // expecting input in server encoding! try { cin.Start(); } catch (Exception e) { try { cin.Cancel("Undo copy"); } catch (NpgsqlException e2) { // we should get an error in response to our cancel request: if (!("" + e2).Contains("Undo copy")) { throw new Exception("Failed to cancel copy: " + e2 + " upon failure: " + e); } } throw e; } conn.Close(); }
public void InvariantCultureNpgsqlCopySerializer() { // Test for https://github.com/npgsql/Npgsql/pull/92 // SetCulture is used to set a culture where a comma is used to separate decimal values (0,5) which will cause problems if Npgsql // doesn't convert correctly to use a point. (0.5) var cmd = new NpgsqlCommand("COPY data (field_int4, field_int8, field_float4) FROM STDIN", Conn); var npgsqlCopySerializer = new NpgsqlCopySerializer(Conn); var npgsqlCopyIn = new NpgsqlCopyIn(cmd, Conn, npgsqlCopySerializer.ToStream); npgsqlCopyIn.Start(); npgsqlCopySerializer.AddInt32(300000); npgsqlCopySerializer.AddInt64(1000000); npgsqlCopySerializer.AddNumber(0.5); npgsqlCopySerializer.EndRow(); npgsqlCopySerializer.Flush(); npgsqlCopyIn.End(); NpgsqlDataReader dr = new NpgsqlCommand("select field_int4, field_int8, field_float4 from data", Conn).ExecuteReader(); dr.Read(); Assert.AreEqual(300000, dr[0]); Assert.AreEqual(1000000, dr[1]); Assert.AreEqual(0.5, dr[2]); }
// Stream failure tests static public void FailCopyInFromStream() { cs = new CountStream(); cs.FailAt = 2; cs.WrapStream = new FileStream("test_copy.cs", FileMode.Open, FileAccess.Read); cin = new NpgsqlCopyIn(new NpgsqlCommand("COPY copy1 FROM STDIN DELIMITER '\b'", conn), conn, cs); try { cin.Start(); } catch (Exception e) { if (("" + e).Contains("Test Exception handling")) { Console.Out.WriteLine("Copy from stream failed as requested."); return; } else { Console.Out.WriteLine("Copy from stream failing failed: " + e); throw e; } } finally { cs.Close(); cin.End(); // should do nothing } Console.Out.WriteLine("Copy from stream did not fail as requested"); }
/// <summary> /// Потоковая загрузка данных /// </summary> public void StreamingLoad(string sqlQuery, Stream stream) { OpenConnection(); var command = new NpgsqlCopyIn(new NpgsqlCommand(sqlQuery, (NpgsqlConnection)DbConnection), (NpgsqlConnection)DbConnection, stream); command.Start(); command.End(); }
/// <summary> /// Inserts the /// </summary> /// <param name="qualifiedTableName"></param> /// <param name="fileName"></param> public override void InsertIntoTable(string qualifiedTableName, string fileName) { using (var iconnection = CreateConnection()) { using (var reader = OpenDataFile(fileName)) { // read the header var header = reader.ReadLine(); // get the real connection var connection = UnwrapConnection(iconnection); var command = new NpgsqlCommand(string.Format("COPY {0} FROM STDIN WITH CSV HEADER NULL '@null@' ESCAPE '\\'", qualifiedTableName), connection); var copyIn = new NpgsqlCopyIn(command, connection); var lineCount = 0; string line = string.Empty; try { copyIn.Start(); var copyInStream = copyIn.CopyStream; var copyInWriter = new StreamWriter(copyInStream, System.Text.Encoding.UTF8, 16384); // write the header copyInWriter.WriteLine(header); // stream the lines while ((line = reader.ReadLine()) != null) { // lines must be cleaned up so that nulls are transformed properly into null values line = RewriteNullValues(line); // write the line copyInWriter.WriteLine(line); lineCount++; } copyInWriter.Flush(); copyInStream.Close(); if (Logger != null) { Logger.Info("Loaded table: {0} : {1}", qualifiedTableName, lineCount); } } catch (NpgsqlException e) { copyIn.Cancel("undo copy"); if (Logger != null) { Logger.Info("Loading table: {0} : FAILURE", qualifiedTableName); Logger.Info(e.Message); Logger.Info(e.StackTrace); } } } } }
protected override void RunBulkCopy(IDataQueue queue) { int okRowCount = 0, failRowCount = 0; List <string> insertErrors = new List <string>(); ITableStructure dst = queue.GetRowFormat; var conn = (NpgsqlConnection)Connection.SystemConnection; NpgsqlCommand command = new NpgsqlCommand(Connection.Dialect.GenerateScript(d => d.Put("^copy %f (%,i) ^from ^stdin", DestinationTable.FullName, from c in dst.Columns select c.ColumnName)), conn); NpgsqlCopyIn cin = new NpgsqlCopyIn(command, conn); try { cin.Start(); var fw = new BinaryWriter(cin.CopyStream); while (!queue.IsEof) { IBedRecord rec = queue.GetRecord(); for (int i = 0; i < rec.FieldCount; i++) { if (i > 0) { fw.Write((byte)'\t'); } rec.ReadValue(i); WriteField(rec, fw); } fw.Write((byte)'\r'); fw.Write((byte)'\n'); okRowCount++; } fw.Flush(); cin.End(); } catch (Exception err) { cin.Cancel("canceled"); ProgressInfo.LogMessageDetail( "INSERT", DatAdmin.LogLevel.Error, String.Format("{0}", Texts.Get("s_error_inserting_into_table$table", "table", DestinationTable.FullName)), err.ToString()); throw; } if (failRowCount > 0) { ProgressInfo.LogMessageDetail( "INSERT", DatAdmin.LogLevel.Error, String.Format("{0}, OK:{1}, FAIL:{2}", Texts.Get("s_error_inserting_into_table$table", "table", DestinationTable.FullName), okRowCount, failRowCount), insertErrors.CreateDelimitedText("\r\n") ); } else { ProgressInfo.LogMessage("INSERT", DatAdmin.LogLevel.Info, Texts.Get("s_inserted_into_table$table$rows", "table", DestinationTable.FullName, "rows", okRowCount)); } }
public void BulkInsert(string table, IEnumerable <Stream> data) { var copy = new NpgsqlCopyIn("COPY " + table + " FROM STDIN DELIMITER '\t'", Connection); copy.Start(); foreach (var it in data) { it.CopyTo(copy.CopyStream); copy.CopyStream.WriteByte((byte)'\n'); it.Dispose(); } copy.End(); }
void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e) { foreach (DataRow dr in dtActs.Rows) { id = int.Parse(dr[0].ToString()); string an = dr[1].ToString(); int fp = int.Parse(dr[2].ToString()); string st = dr[3].ToString(); string et = dr[4].ToString(); conn.Close(); conn.Open(); NpgsqlCommand command1 = new NpgsqlCommand("insert into acts(name, showid, free_placement, start_time, end_time) values(:nm, :shid, :fp, :st, :et)", conn); command1.Parameters.Add(new NpgsqlParameter("nm", an)); command1.Parameters.Add(new NpgsqlParameter("shid", addedshowid)); command1.Parameters.Add(new NpgsqlParameter("fp", fp)); command1.Parameters.Add(new NpgsqlParameter("st", st)); command1.Parameters.Add(new NpgsqlParameter("et", et)); command1.ExecuteNonQuery(); NpgsqlCommand command2 = new NpgsqlCommand("select currval('acts_actid_seq');", conn); NpgsqlDataReader read2; read2 = command2.ExecuteReader(); read2.Read(); addedactid = read2[0].ToString(); conn.Close(); int nr = int.Parse(addedactid.ToString()); conn.Open(); NpgsqlCommand cmd2 = new NpgsqlCommand("Copy available_seats(actid, seatid) from STDIN", conn); NpgsqlCopySerializer ser = new NpgsqlCopySerializer(conn); NpgsqlCopyIn copy = new NpgsqlCopyIn(cmd2, conn, ser.ToStream); copy.Start(); foreach (DataRow row in cSeats.Rows) { if (int.Parse(row[0].ToString()) == id) { int seatid = int.Parse(row[1].ToString()); ser.AddInt32(nr); ser.AddInt32(seatid); ser.EndRow(); ser.Flush(); } } copy.End(); ser.Close(); } }
static public void FailCopyInByWriting() { cs = new CountStream(); cs.FailAt = 2; cs.WrapStream = new FileStream("test_copy.cs", FileMode.Open, FileAccess.Read); cin = new NpgsqlCopyIn("COPY copy1 FROM STDIN", conn); cin.Start(); if (!cin.IsActive) { throw new Exception("Copy started inactive"); } byte[] buf = new byte[8]; int i; try { while ((i = cs.Read(buf, 0, buf.Length)) > 0) { cin.CopyStream.Write(buf, 0, i); } } catch (Exception e) { if (("" + e).Contains("Test Exception handling")) { try { cin.Cancel("Test whether copy in fails correctly"); } catch (Exception e2) { if (("" + e2).Contains("Test whether copy in fails correctly")) { Console.Out.WriteLine("Copy from writing failed as requested."); return; } throw e2; } throw new Exception("CopyIn.Cancel() didn't throw up the expected exception"); } throw e; } finally { cs.Close(); cin.End(); // should do nothing } throw new Exception("Copy from writing did not fail as requested"); }
// Stream success tests static public void CopyInFromStream() { cs = new CountStream(); cs.WrapStream = new FileStream("test_copy.cs", FileMode.Open, FileAccess.Read); cin = new NpgsqlCopyIn(new NpgsqlCommand("COPY copy1 FROM STDIN DELIMITER '\b'", conn), conn, cs); cin.Start(); if (cin.IsActive) { throw new Exception("Copy from stream did not complete in single pass"); } InLength += cs.BytesPassed; InSum += cs.CheckSum; cs.Close(); Console.Out.WriteLine("Copy from stream ok"); }
protected void InsertDataToDbBulkMethod(DataTable table) { List <string> columns_names = new List <string>(); for (int i = 0; i < table.Columns.Count; i++) { columns_names.Add(table.Columns[i].ColumnName); } string sql = string.Format("COPY {0}({1}) FROM STDIN", table.TableName, string.Join(",", columns_names.ToArray())); _cmd = CreateCommand(sql); _cmd.CommandType = CommandType.Text; var serializer = new NpgsqlCopySerializer(_conn as NpgsqlConnection); NpgsqlCopyIn copyIn = new NpgsqlCopyIn((_cmd as NpgsqlCommand), (_conn as NpgsqlConnection), serializer.ToStream); try { copyIn.Start(); foreach (DataRow dr in table.Rows) { for (int i = 0; i < table.Columns.Count; i++) { AddValueToSerializer(serializer, dr[i]); } serializer.EndRow(); serializer.Flush(); } copyIn.End(); serializer.Close(); } catch (Exception e) { try { copyIn.Cancel("Exception has occured!"); } catch (NpgsqlException ex) { if (ex.BaseMessage.Contains("Exception has occured!")) { throw new Exception(string.Format("Copy was uncanceled. exception1: {0};exception2: {1}", e.Message, ex.Message)); } } } }
public void BulkInsert(string table, IEnumerable <IPostgresTuple[]> data) { if (!InTransaction) { throw new FrameworkException("BulkInsert can only be used within transaction"); } using (var cms = Revenj.Utility.ChunkedMemoryStream.Create()) { var sw = cms.GetWriter(); var buf = cms.SmallBuffer; foreach (var it in data) { var p = it[0]; if (p != null) { p.InsertRecord(sw, buf, string.Empty, PostgresTuple.EscapeBulkCopy); } else { sw.Write("\\N"); } for (int i = 1; i < it.Length; i++) { sw.Write('\t'); p = it[i]; if (p != null) { p.InsertRecord(sw, buf, string.Empty, PostgresTuple.EscapeBulkCopy); } else { sw.Write("\\N"); } } sw.Write('\n'); } sw.Flush(); cms.Position = 0; var copy = new NpgsqlCopyIn("COPY " + table + " FROM STDIN DELIMITER '\t'", Connection); copy.Start(); cms.CopyTo(copy.CopyStream); copy.End(); } }
private void BulkCopy(System.Data.DataTable items, NpgsqlConnection connection, NpgsqlCommand command) { var dataTableString = DataTableToString(items); var sql = string.Format("COPY {0} (\"{1}\") FROM STDIN WITH DELIMITER '|'", command.CommandText, dataTableString); command.CommandText = sql; var copy = new NpgsqlCopyIn(command, connection); try { copy.Start(); foreach (System.Data.DataRow item in items.Rows) { var data = SerializeData(item.ItemArray); var raw = Encoding.UTF8.GetBytes(string.Concat(data, "\n")); copy.CopyStream.Write(raw, 0, raw.Length); } } catch (Exception e) { try { copy.Cancel("Undo copy"); } catch (NpgsqlException e2) { // we should get an error in response to our cancel request: if (!("" + e2).Contains("Undo copy")) { throw new PriusException("Failed to cancel copy: " + copy + " upon failure: " + e); } } throw; } finally { copy.CopyStream?.Close(); copy.End(); } }
public void Bug221MillisecondsFieldNotCopied() { // Test for https://github.com/npgsql/Npgsql/issues/221 // The milliseconds field is not properly copied in NpgsqlCopySerializer.cs in method AddDateTime var cmd = new NpgsqlCommand("COPY data (field_timestamp) FROM STDIN", Conn); var npgsqlCopySerializer = new NpgsqlCopySerializer(Conn); var npgsqlCopyIn = new NpgsqlCopyIn(cmd, Conn, npgsqlCopySerializer.ToStream); var testDate = DateTime.Parse("2002-02-02 09:00:23.005"); npgsqlCopyIn.Start(); npgsqlCopySerializer.AddDateTime(testDate); npgsqlCopySerializer.EndRow(); npgsqlCopySerializer.Flush(); npgsqlCopyIn.End(); NpgsqlDataReader dr = new NpgsqlCommand("select field_timestamp from data", Conn).ExecuteReader(); dr.Read(); Assert.AreEqual(testDate, dr[0]); }
static public void CopyInByWriting() { cs = new CountStream(); cs.WrapStream = new FileStream("test_copy.cs", FileMode.Open, FileAccess.Read); cin = new NpgsqlCopyIn("COPY copy1 FROM STDIN DELIMITER '\b'", conn); cin.Start(); if (!cin.IsActive) { throw new Exception("Copy started inactive"); } byte[] buf = new byte[8]; int i; while ((i = cs.Read(buf, 0, buf.Length)) > 0) { cin.CopyStream.Write(buf, 0, i); } cin.End(); InLength += cs.BytesPassed; InSum += cs.CheckSum; cs.Close(); Console.Out.WriteLine("Copy from writing ok"); }
// Serializer success test static public void CopyInWithSerializer() { NpgsqlCopySerializer sink = new NpgsqlCopySerializer(conn); String q = "COPY copy2(field_int4, field_int8, field_text, field_timestamp, field_bool) FROM STDIN"; cin = new NpgsqlCopyIn(q, conn); cin.Start(); if (!cin.IsActive) { throw new Exception("Copy started inactive"); } sink.AddInt32(-13); sink.AddNull(); sink.AddString("First row"); sink.AddDateTime(new DateTime(2020, 12, 22, 23, 33, 45, 765)); sink.AddBool(true); sink.EndRow(); sink.AddNull(); sink.AddNull(); sink.AddString("Second row"); sink.Close(); Console.Out.WriteLine("Copy through serializer ok"); }
public bool export() { NpgsqlCommand cmd = new NpgsqlCommand(CreateInsertCmd(), connection); NpgsqlCopySerializer serializer = new NpgsqlCopySerializer(connection); NpgsqlCopyIn copyIn = new NpgsqlCopyIn(cmd, connection, serializer.ToStream); const int FLUSH_ROWS = 200000; copyIn.Start(); var linecounter = 0; foreach (var queryList in _queryLists) { bool[] array = new bool[200]; foreach (int i in queryList.Value) { array[i] = true; } serializer.AddInt32(queryList.Key); for (int i = 0; i < 200; i++) { serializer.AddBool(array[i]); } serializer.EndRow(); if (linecounter++ % FLUSH_ROWS == 0) { serializer.Flush(); } } serializer.Flush(); serializer.Close(); copyIn.End(); return(true); }
public void Bug188BufferNpgsqlCopySerializer() { var cmd = new NpgsqlCommand("COPY data (field_int4, field_text) FROM STDIN", Conn); var npgsqlCopySerializer = new NpgsqlCopySerializer(Conn); var npgsqlCopyIn = new NpgsqlCopyIn(cmd, Conn, npgsqlCopySerializer.ToStream); string str = "Very long string".PadRight(NpgsqlCopySerializer.DEFAULT_BUFFER_SIZE, 'z'); npgsqlCopyIn.Start(); npgsqlCopySerializer.AddInt32(12345678); npgsqlCopySerializer.AddString(str); npgsqlCopySerializer.EndRow(); npgsqlCopySerializer.Flush(); npgsqlCopyIn.End(); NpgsqlDataReader dr = new NpgsqlCommand("select field_int4, field_text from data", Conn).ExecuteReader(); dr.Read(); Assert.AreEqual(12345678, dr[0]); Assert.AreEqual(str, dr[1]); }
public void Bug219NpgsqlCopyInConcurrentUsage() { try { // Create temporary test tables ExecuteNonQuery(@"CREATE TABLE Bug219_table1 ( id integer, name character varying(100) ) WITH ( OIDS=FALSE );"); ExecuteNonQuery(@"CREATE TABLE Bug219_table2 ( id integer, null1 integer, name character varying(100), null2 integer, description character varying(1000), null3 integer ) WITH ( OIDS=FALSE );"); using (var connection1 = new NpgsqlConnection(ConnectionString)) using (var connection2 = new NpgsqlConnection(ConnectionString)) { connection1.Open(); connection2.Open(); var copy1 = new NpgsqlCopyIn("COPY Bug219_table1 FROM STDIN;", connection1); var copy2 = new NpgsqlCopyIn("COPY Bug219_table2 FROM STDIN;", connection2); copy1.Start(); copy2.Start(); NpgsqlCopySerializer cs1 = new NpgsqlCopySerializer(connection1); //NpgsqlCopySerializer cs2 = new NpgsqlCopySerializer(connection2); for (int index = 0; index < 10; index++) { cs1.AddInt32(index); cs1.AddString(string.Format("Index {0} ", index)); cs1.EndRow(); /*cs2.AddInt32(index); * cs2.AddNull(); * cs2.AddString(string.Format("Index {0} ", index)); * cs2.AddNull(); * cs2.AddString("jjjjj"); * cs2.AddNull(); * cs2.EndRow();*/ } cs1.Close(); //Exception //cs2.Close(); copy1.End(); copy2.End(); } } catch (Exception) { throw; } finally { ExecuteNonQuery(@"DROP TABLE IF EXISTS Bug219_table1"); ExecuteNonQuery(@"DROP TABLE IF EXISTS Bug219_table2"); } }
/// <summary> /// Bulk copies a set of objects to the server. /// </summary> /// <param name="connection">The connection to use.</param> /// <param name="tableName">The name of the table.</param> /// <param name="reader">The reader to read objects from.</param> /// <param name="configure">A callback method to configure the bulk copy object.</param> /// <param name="options">Options for initializing the bulk copy object.</param> /// <param name="transaction">An optional transaction to participate in.</param> public override void BulkCopy(IDbConnection connection, string tableName, IDataReader reader, Action <InsightBulkCopy> configure, InsightBulkCopyOptions options, IDbTransaction transaction) { if (reader == null) { throw new ArgumentNullException("reader"); } NpgsqlCopyIn bulk = new NpgsqlCopyIn(String.Format(CultureInfo.InvariantCulture, "COPY {0} FROM STDIN WITH CSV", tableName), (NpgsqlConnection)connection); PostgreSQLInsightBulkCopy insightBulkCopy = new PostgreSQLInsightBulkCopy(bulk); try { bulk.Start(); var stream = bulk.CopyStream; StreamWriter writer = new StreamWriter(stream); int row = 0; while (reader.Read()) { for (int i = 0; i < reader.FieldCount; i++) { if (i > 0) { writer.Write(CsvDelimiter); } object value = reader.GetValue(i); if (value != DBNull.Value) { writer.Write(CsvQuote); writer.Write(_csvRegex.Replace(value.ToString(), CsvReplacement)); writer.Write(CsvQuote); } } writer.WriteLine(); row++; if (insightBulkCopy.NotifyAfter != 0 && row % insightBulkCopy.NotifyAfter == 0) { InsightRowsCopiedEventArgs e = new InsightRowsCopiedEventArgs(); e.RowsCopied = row; insightBulkCopy.OnRowsCopied(insightBulkCopy, e); if (e.Abort) { bulk.Cancel("Cancelled"); return; } } } // must call flush before end // cannot call close on the stream before end writer.Flush(); bulk.End(); } catch (Exception e) { bulk.Cancel(e.Message); throw; } }
/// <summary> /// Does the actual bulk inserts. /// </summary> /// <param name="table"></param> /// <param name="table_name"></param> /// <param name="batch_size"></param> private void BulkCopy(DataTable table, string table_name, int batch_size) { if (table != null && table.Rows.Count > 0) { // the copy command. NpgsqlCommand command = new NpgsqlCommand(string.Format( "COPY {0} FROM STDIN WITH BINARY", table_name), _connection); // the copy in stream. // TODO: convert this to binary mode for speed and // to make sure the char ` can also be included in tags! NpgsqlCopyIn cin = new NpgsqlCopyIn(command, _connection); // copy line-by-line. cin.Start(); try { System.IO.Stream target = cin.CopyStream; //Stream target = new FileInfo(@"C:\Users\ben.abelshausen\Desktop\node_osmsharp.copy").OpenWrite(); // write header. List <byte> header = new List <byte>(); header.AddRange(System.Text.Encoding.ASCII.GetBytes("PGCOPY\n")); header.Add((byte)255); header.AddRange(System.Text.Encoding.ASCII.GetBytes("\r\n\0")); header.Add((byte)0); // start of Flags field header.Add((byte)0); header.Add((byte)0); header.Add((byte)0); header.Add((byte)0); // start of Flags field header.Add((byte)0); header.Add((byte)0); header.Add((byte)0); target.Write(header.ToArray(), 0, header.Count); for (int row_idx = 0; row_idx < table.Rows.Count; row_idx++) { // for each row generate the binary data. // write the 16-bit integer count of the number of fields byte[] field_count_data = BitConverter.GetBytes((short)table.Columns.Count); this.ReverseEndianness(target, field_count_data); //target.Write(field_count_data, 0, field_count_data.Length); for (int column_idx = 0; column_idx < table.Columns.Count; column_idx++) { // serialize the data. byte[] field_data = null; object value = table.Rows[row_idx][column_idx]; bool reverse = false; if (value == null || value == DBNull.Value) { // do nothing: just leave the field_data null. } else if (value is long) { // convert the long data into bytes postgres can understand. field_data = BitConverter.GetBytes((long)value); reverse = true; } else if (value is int) { // convert the int data into bytes postgres can understand. field_data = BitConverter.GetBytes((int)value); reverse = true; } else if (value is double) { // convert the double data into bytes postgres can understand. field_data = BitConverter.GetBytes((double)value); reverse = true; } else if (value is float) { // convert the float data into bytes postgres can understand. field_data = BitConverter.GetBytes((float)value); reverse = true; } else if (value is decimal) { // convert the decimal data into bytes postgres can understand. field_data = BitConverter.GetBytes((double)value); reverse = true; } else if (value is DateTime) { // convert the string data into bytes postgres can understand. long microseconds = (long)((DateTime)value - (new DateTime(2000, 01, 01))).TotalSeconds * 1000000; //field_data = System.Text.Encoding.ASCII.GetBytes(((DateTime)value).ToString( // System.Globalization.CultureInfo.InvariantCulture)); field_data = BitConverter.GetBytes(microseconds); reverse = true; } else if (value is string) { // convert the string data into bytes postgres can understand. field_data = _encoding.GetBytes(value as string); } else if (value is bool) { // convert the bool data into bytes postgres can understand. field_data = new byte[1]; if ((bool)value) { field_data[0] = (byte)1; } else { field_data[0] = (byte)0; } } else { // the type of the value is unsupported! throw new InvalidDataException(string.Format("Data type not supported: {0}!", value.GetType())); } // write the length of the field. int length = -1; // represents NULL. if (field_data != null) { // the lenght is non-zero. length = field_data.Length; } byte[] length_data = BitConverter.GetBytes(length); this.ReverseEndianness(target, length_data); // write the data. if (field_data != null) { if (reverse) { // write the data in reverse. this.ReverseEndianness(target, field_data); } else { // write the data in order. target.Write(field_data, 0, field_data.Length); } } } if (row_idx % 100 == 0) { // flush the data once in a while. target.Flush(); } } // write the file trailer: a 16-bit integer word containing -1 byte[] trailer = BitConverter.GetBytes((short)-1); target.Write(trailer, 0, trailer.Length); // flush the stream data and close. target.Flush(); target.Close(); } catch (Exception ex) { cin.Cancel(ex.Message); } finally { cin.End(); } OsmSharp.Logging.Log.TraceEvent("OsmSharp.Data.PostgreSQL.Osm.Streams.PostgeSQLOsmStreamTarget", OsmSharp.Logging.TraceEventType.Information, "Inserted {0} records into {1}!", table.Rows.Count, table_name); } }
/// <summary> /// Wrzuca dane do bazy danych. /// </summary> private bool importData() { tic("Importing data:", true, true); const int FLUSH_ROWS = 200000; String[] toImport = new String[] { sessionTableName, queryTableName, queryTermTableName, queryUrlTableName, clickTableName, urlTableName }; foreach (String tableName in toImport) { tic(tableName, false, true); int[] types = getTableTypes(tableName); NpgsqlCommand cmd = new NpgsqlCommand(buildInsertCommand(tableName), connection); NpgsqlCopySerializer serializer = new NpgsqlCopySerializer(connection); NpgsqlCopyIn copyIn = new NpgsqlCopyIn(cmd, connection, serializer.ToStream); copyIn.Start(); using (BufferedBinaryReader reader = new BufferedBinaryReader(workDir + tableName)) { int lineCounter = 0; while (reader.PeekChar() > -1) { lineCounter++; for (int i = 0; i < types.Length; i++) { if (types[i] == 0) { int value = reader.ReadInt32(); serializer.AddInt32(value); } if (types[i] == 1) { bool value = reader.ReadBool(); serializer.AddBool(value); } } serializer.EndRow(); if ((lineCounter + 1) % FLUSH_ROWS == 0) { serializer.Flush(); } } Console.Write(String.Format("{0,-15}", String.Format("({0})", lineCounter))); } serializer.Flush(); serializer.Close(); copyIn.End(); toc(); } toc(true); return(true); }