internal void Serialize(string fileName) { /* We are going to perform a series of queries: * One for the primary category (which we assume is the first in the dataset) * and then then one for every connection. * * A number of observations: * If we ever were to allow nested connections, we would need some recursion algorithm. * In the current setup, we only process the child relations of the primary table */ // collect some information before the dataread to prevent unnecessary calls var subQueries = _primaryTable.ChildRelations.Cast <DataRelation>().Select(s => new ChildTableQuery( // no checks if keys exist s.ChildTable.ExtendedProperties[DataSetHelper.LinkTableSelectCommandTextExtProp].ToString(), JsonConvert.DeserializeObject <CommenceConnection>(s.ExtendedProperties[DataSetHelper.CommenceConnectionDescriptionExtProp].ToString()) )).ToArray(); using (var connection = new SQLiteConnection(_cs)) { connection.Open(); using (var transaction = connection.BeginTransaction()) { using (var command = new SQLiteCommand(connection)) { command.Connection = connection; command.CommandText = SQLiteWriter.GetSQLiteSelectQueryForTable(_primaryTable); // start reading data using (var reader = command.ExecuteReader()) { using (StreamWriter sw = new StreamWriter(fileName)) { using (Newtonsoft.Json.JsonWriter wr = new JsonTextWriter(sw)) { wr.Formatting = Formatting.Indented; wr.WriteStartObject(); wr.WritePropertyName("CommenceDataSource"); wr.WriteValue(string.IsNullOrEmpty(_settings.CustomRootNode) ? _primaryTable.TableName : _settings.CustomRootNode); wr.WritePropertyName("CommenceCategory"); wr.WriteValue(_ocp.Category); wr.WritePropertyName("CommenceDataSourceType"); wr.WriteValue(_ocp.Type); wr.WritePropertyName("Items"); wr.WriteStartArray(); bool includeThids = ((ExportSettings)_settings).UserRequestedThids; while (reader.Read()) { wr.WriteStartObject(); WriteObjects(reader, wr, includeThids); SQLiteCommand sqCmd = new SQLiteCommand(connection); foreach (var sq in subQueries) { sqCmd.CommandText = sq.CommandText; sqCmd.Transaction = transaction; sqCmd.Parameters.AddWithValue("@id", reader.GetInt64(0)); // fragile var subreader = sqCmd.ExecuteReader(); WriteConnectedObjects(sq, subreader, wr, includeThids); sqCmd.Reset(); } // foreach subqueries wr.WriteEndObject(); } // while wr.WriteEndArray(); wr.WriteEndObject(); } // using streamwriter } // using jsonwriter } // using reader } // using cmd transaction.Commit(); } // using transaction } // using con } // method
// We could use async Read from SQLite and async XML writing, but there is no performance gain, I tested that. // Going async actually introduces the problem of ExportComplete firing too early, // but you will only notice that with huge exports. internal void Serialize(string fileName) { /* We are going to perform a series of queries: * One for the primary category * and then then one for every connection. * * A number of observations: * If we ever were to allow nested connections, we would need some recursion algorithm. * In the current setup, we only process the child relations of the primary table */ // collect some information before the dataread to prevent unnecessary calls var subQueries = _primaryTable.ChildRelations.Cast <DataRelation>().Select(s => new ChildTableQuery( // no checks if keys exist s.ChildTable.ExtendedProperties[DataSetHelper.LinkTableSelectCommandTextExtProp].ToString(), JsonConvert.DeserializeObject <CommenceConnection>(s.ExtendedProperties[DataSetHelper.CommenceConnectionDescriptionExtProp].ToString())) ).ToArray(); using (var connection = new SQLiteConnection(_cs)) { connection.Open(); using (var transaction = connection.BeginTransaction()) { using (var command = new SQLiteCommand(connection)) { command.Connection = connection; command.CommandText = SQLiteWriter.GetSQLiteSelectQueryForTable(_primaryTable); // start reading data using (var reader = command.ExecuteReader()) { XmlWriterSettings xmlSettings = new XmlWriterSettings { Async = false, WriteEndDocumentOnClose = true, Indent = true, Encoding = Encoding.UTF8 // this is what SQLite uses }; using (System.Xml.XmlWriter writer = System.Xml.XmlWriter.Create(fileName, xmlSettings)) { writer.WriteStartDocument(); writer.WriteStartElement(string.IsNullOrEmpty(_settings.CustomRootNode) ? XmlConvert.EncodeLocalName(_primaryTable.TableName) : XmlConvert.EncodeLocalName(_settings.CustomRootNode)); writer.WriteStartElement("Items"); bool includeThids = ((ExportSettings)_settings).UserRequestedThids; while (reader.Read()) { writer.WriteStartElement(null, "Item", null); WriteNodes(reader, writer, includeThids); // next step is to get the connected values // we use a separate query for that // that is probably way too convoluted // we should probably stick to using a more intelligent reader. // the problem is that we need to make sense of the lines that // the reader returns. The XmlWriter is forward only, // so botching together the nodes that belong together is a problem. // we could just use a fully filled dataset? What about size limitations? Performance? SQLiteCommand sqCmd = new SQLiteCommand(connection); foreach (var q in subQueries) { sqCmd.CommandText = q.CommandText; sqCmd.Transaction = transaction; sqCmd.Parameters.AddWithValue("@id", reader.GetInt64(0)); // fragile var subreader = sqCmd.ExecuteReader(); while (subreader.Read()) { if (_settings.NestConnectedItems) { WriteNestedNodes(subreader, writer, q.Connection, includeThids); } else { WriteNodes(subreader, writer, includeThids); } } // while rdr.Read sqCmd.Reset(); // make ready for next use } // foreach subqueries writer.WriteEndElement(); } // while } // xmlwriter } // reader } // cmd transaction.Commit(); } // transaction } // con } // method