static void Main(string[] args) { // Arguments for update int lookFor = 1; string value = "AC/DC"; // Arguments for insert lookFor = Int16.MaxValue; value = "joedotnet"; using (SqlCeConnection conn = new SqlCeConnection(@"Data Source=C:\Users\xeej\Downloads\ChinookPart2\Chinook.sdf")) { conn.Open(); using (SqlCeCommand cmd = new SqlCeCommand("Artist")) { SqlCeUpdatableRecord myRec = null; cmd.Connection = conn; cmd.CommandType = System.Data.CommandType.TableDirect; cmd.IndexName = "PK_Artist"; SqlCeResultSet myResultSet = cmd.ExecuteResultSet(ResultSetOptions.Updatable | ResultSetOptions.Scrollable); bool found = myResultSet.Seek(DbSeekOptions.FirstEqual, new object[] { lookFor }); if (found) { myResultSet.Read(); } else { myRec = myResultSet.CreateRecord(); } foreach (KeyValuePair <int, object> item in CommonMethodToFillRowData(value)) { if (found) { myResultSet.SetValue(item.Key, item.Value); } else { myRec.SetValue(item.Key, item.Value); } } if (found) { myResultSet.Update(); } else { myResultSet.Insert(myRec); } } } }
public SqlCeWrapper(SqlCeResultSet resultSet) { _resultSet = resultSet; _found = resultSet.Seek(); if (_found) { resultSet.Read(); } else { _newRecord = resultSet.CreateRecord(); } }
protected override bool InternalSeek(object[] key) { EnsureReader(null, true, true); object[] localKey = new object[key.Length]; for (int index = 0; index < localKey.Length; index++) { localKey[index] = NativeToStoreValue(key[index]); } _resultSet.Seek(DbSeekOptions.FirstEqual, localKey); // Despite the fact that the documentation says that the // result value of the Seek operation indicates whether or // not the cursor is positioned on a row, this does not // seem to be the case. (As of 3.5 sp1) return(false); }
private void FlushCalls(SqlCeResultSet resultSet) { //a lock is already taken at this point int hitsOrdinal = resultSet.GetOrdinal("HitCount"); int childOrdinal = resultSet.GetOrdinal("ChildId"); int parentOrdinal = resultSet.GetOrdinal("ParentId"); int threadOrdinal = resultSet.GetOrdinal("ThreadId"); foreach (KeyValuePair <int, SortedDictionary <int, SortedList <int, int> > > threadKvp in m_calls.Graph) { int threadId = threadKvp.Key; foreach (KeyValuePair <int, SortedList <int, int> > parentKvp in threadKvp.Value) { int parentId = parentKvp.Key; foreach (KeyValuePair <int, int> hitsKvp in parentKvp.Value) { int childId = hitsKvp.Key; int hits = hitsKvp.Value; bool result = resultSet.Seek(DbSeekOptions.FirstEqual, threadId, parentId, childId); if (result && resultSet.Read()) { //found it, update the hit count and move on hits += (int)resultSet[hitsOrdinal]; resultSet.SetInt32(hitsOrdinal, hits); resultSet.Update(); } else { //not in the db, create a new record CreateRecord(resultSet, threadId, parentId, childId, hits); } } parentKvp.Value.Clear(); } } }
private void FlushSamples(SqlCeResultSet resultSet) { //now to update the samples table foreach (KeyValuePair <int, SortedList <int, int> > sampleKvp in m_samples) { if (sampleKvp.Value.Count == 0) { continue; } int threadOrdinal = resultSet.GetOrdinal("ThreadId"); int functionOrdinal = resultSet.GetOrdinal("FunctionId"); int hitsOrdinal = resultSet.GetOrdinal("HitCount"); foreach (KeyValuePair <int, int> threadKvp in sampleKvp.Value) { if (!resultSet.Seek(DbSeekOptions.FirstEqual, threadKvp.Key, sampleKvp.Key)) { //doesn't exist in the table, we need to add it var row = resultSet.CreateRecord(); row[threadOrdinal] = threadKvp.Key; row[functionOrdinal] = sampleKvp.Key; row[hitsOrdinal] = threadKvp.Value; resultSet.Insert(row, DbInsertOptions.PositionOnInsertedRow); } else { resultSet.Read(); resultSet.SetValue(hitsOrdinal, (int)resultSet[hitsOrdinal] + threadKvp.Value); resultSet.Update(); } } sampleKvp.Value.Clear(); } }
private void FlushTimings(SqlCeResultSet resultSet) { foreach(KeyValuePair<int, List<long>> timingKvp in m_timings) { if(timingKvp.Value.Count == 0) continue; int funcOrdinal = resultSet.GetOrdinal("FunctionId"); int minOrdinal = resultSet.GetOrdinal("RangeMin"); int maxOrdinal = resultSet.GetOrdinal("RangeMax"); int hitsOrdinal = resultSet.GetOrdinal("HitCount"); for(int t = 0; t < timingKvp.Value.Count; ++t) { bool foundBin = true; long time = timingKvp.Value[t]; if(!resultSet.Seek(DbSeekOptions.BeforeEqual, timingKvp.Key, time)) { foundBin = false; } if(foundBin) { resultSet.Read(); var id = resultSet.GetInt32(funcOrdinal); if(id != timingKvp.Key) { if(!resultSet.Read()) { foundBin = false; } } if(foundBin) { var min = resultSet.GetInt64(minOrdinal); var max = resultSet.GetInt64(maxOrdinal); if(id != timingKvp.Key || time < min || time > max) foundBin = false; } } if(foundBin) { //we've got a usable bin, increment and move on var hits = resultSet.GetInt32(hitsOrdinal); resultSet.SetInt32(hitsOrdinal, hits + 1); resultSet.Update(); continue; } //didn't find a bin, create a new one for this entry var row = resultSet.CreateRecord(); row[funcOrdinal] = timingKvp.Key; row[minOrdinal] = time; row[maxOrdinal] = time; row[hitsOrdinal] = 1; resultSet.Insert(row, DbInsertOptions.KeepCurrentPosition); //we need to bin-merge //start by seeking to the first record for this function if(!resultSet.Seek(DbSeekOptions.BeforeEqual, timingKvp.Key, 0.0f)) resultSet.ReadFirst(); else resultSet.Read(); var mergeId = resultSet.GetInt32(funcOrdinal); if(mergeId != timingKvp.Key) resultSet.Read(); mergeId = resultSet.GetInt32(funcOrdinal); //we know at least one exists, cause we just inserted one Debug.Assert(mergeId == timingKvp.Key); //Search for the merge that produces the smallest merged bucket long lastMin = resultSet.GetInt64(minOrdinal); int lastHits = resultSet.GetInt32(hitsOrdinal); bool shouldMerge = resultSet.Read(); //these store all the data about the best merge so far long smallestRange = long.MaxValue; long bestMin = 0; long bestMax = 0; int mergedHits = 0; for(int b = 0; b < kTimingBuckets && shouldMerge; ++b) { long max = resultSet.GetInt64(maxOrdinal); long range = max - lastMin; if(range < smallestRange) { smallestRange = range; bestMin = lastMin; bestMax = max; mergedHits = lastHits + resultSet.GetInt32(hitsOrdinal); } lastMin = resultSet.GetInt64(minOrdinal); lastHits = resultSet.GetInt32(hitsOrdinal); //if this read fails, we have insufficient buckets to bother merging shouldMerge = resultSet.Read(); } if(shouldMerge) { //seek to the first (lower) bin resultSet.Seek(DbSeekOptions.FirstEqual, timingKvp.Key, bestMin); resultSet.Read(); //expand this bin to include the next one resultSet.SetInt64(maxOrdinal, bestMax); resultSet.SetInt32(hitsOrdinal, mergedHits); //go to the now redundant bin resultSet.Update(); resultSet.Read(); //delete the bin resultSet.Delete(); } } #if FALSE //DEBUG ONLY HACK: display buckets if(!resultSet.Seek(DbSeekOptions.BeforeEqual, timingKvp.Key, 0.0f)) resultSet.ReadFirst(); else resultSet.Read(); var tempId = resultSet.GetInt32(funcOrdinal); if(tempId != timingKvp.Key) resultSet.Read(); Console.WriteLine("Buckets for function {0}:", timingKvp.Key); for(int b = 0; b < kTimingBuckets; ++b) { long min = resultSet.GetInt64(minOrdinal); long max = resultSet.GetInt64(maxOrdinal); int hits = resultSet.GetInt32(hitsOrdinal); Console.WriteLine("[{0}, {1}]: {2}", min, max, hits); resultSet.Read(); } #endif } }
private void FlushSamples(SqlCeResultSet resultSet) { //now to update the samples table foreach(KeyValuePair<int, SortedList<int, int>> sampleKvp in m_samples) { if(sampleKvp.Value.Count == 0) continue; int threadOrdinal = resultSet.GetOrdinal("ThreadId"); int functionOrdinal = resultSet.GetOrdinal("FunctionId"); int hitsOrdinal = resultSet.GetOrdinal("HitCount"); foreach(KeyValuePair<int, int> threadKvp in sampleKvp.Value) { if(!resultSet.Seek(DbSeekOptions.FirstEqual, threadKvp.Key, sampleKvp.Key)) { //doesn't exist in the table, we need to add it var row = resultSet.CreateRecord(); row[threadOrdinal] = threadKvp.Key; row[functionOrdinal] = sampleKvp.Key; row[hitsOrdinal] = threadKvp.Value; resultSet.Insert(row, DbInsertOptions.PositionOnInsertedRow); } else { resultSet.Read(); resultSet.SetValue(hitsOrdinal, (int) resultSet[hitsOrdinal] + threadKvp.Value); resultSet.Update(); } } sampleKvp.Value.Clear(); } }
private void FlushCalls(SqlCeResultSet resultSet) { //a lock is already taken at this point int hitsOrdinal = resultSet.GetOrdinal("HitCount"); int childOrdinal = resultSet.GetOrdinal("ChildId"); int parentOrdinal = resultSet.GetOrdinal("ParentId"); int threadOrdinal = resultSet.GetOrdinal("ThreadId"); foreach(KeyValuePair<int, SortedDictionary<int, SortedList<int, int>>> threadKvp in m_calls.Graph) { int threadId = threadKvp.Key; foreach(KeyValuePair<int, SortedList<int, int>> parentKvp in threadKvp.Value) { int parentId = parentKvp.Key; foreach(KeyValuePair<int, int> hitsKvp in parentKvp.Value) { int childId = hitsKvp.Key; int hits = hitsKvp.Value; bool result = resultSet.Seek(DbSeekOptions.FirstEqual, threadId, parentId, childId); if(result && resultSet.Read()) { //found it, update the hit count and move on hits += (int) resultSet[hitsOrdinal]; resultSet.SetInt32(hitsOrdinal, hits); resultSet.Update(); } else { //not in the db, create a new record CreateRecord(resultSet, threadId, parentId, childId, hits); } } parentKvp.Value.Clear(); } } }
private void FlushTimings(SqlCeResultSet resultSet) { foreach (KeyValuePair <int, List <long> > timingKvp in m_timings) { if (timingKvp.Value.Count == 0) { continue; } int funcOrdinal = resultSet.GetOrdinal("FunctionId"); int minOrdinal = resultSet.GetOrdinal("RangeMin"); int maxOrdinal = resultSet.GetOrdinal("RangeMax"); int hitsOrdinal = resultSet.GetOrdinal("HitCount"); for (int t = 0; t < timingKvp.Value.Count; ++t) { bool foundBin = true; long time = timingKvp.Value[t]; if (!resultSet.Seek(DbSeekOptions.BeforeEqual, timingKvp.Key, time)) { foundBin = false; } if (foundBin) { resultSet.Read(); var id = resultSet.GetInt32(funcOrdinal); if (id != timingKvp.Key) { if (!resultSet.Read()) { foundBin = false; } } if (foundBin) { var min = resultSet.GetInt64(minOrdinal); var max = resultSet.GetInt64(maxOrdinal); if (id != timingKvp.Key || time < min || time > max) { foundBin = false; } } } if (foundBin) { //we've got a usable bin, increment and move on var hits = resultSet.GetInt32(hitsOrdinal); resultSet.SetInt32(hitsOrdinal, hits + 1); resultSet.Update(); continue; } //didn't find a bin, create a new one for this entry var row = resultSet.CreateRecord(); row[funcOrdinal] = timingKvp.Key; row[minOrdinal] = time; row[maxOrdinal] = time; row[hitsOrdinal] = 1; resultSet.Insert(row, DbInsertOptions.KeepCurrentPosition); //we need to bin-merge //start by seeking to the first record for this function if (!resultSet.Seek(DbSeekOptions.BeforeEqual, timingKvp.Key, 0.0f)) { resultSet.ReadFirst(); } else { resultSet.Read(); } var mergeId = resultSet.GetInt32(funcOrdinal); if (mergeId != timingKvp.Key) { resultSet.Read(); } mergeId = resultSet.GetInt32(funcOrdinal); //we know at least one exists, cause we just inserted one Debug.Assert(mergeId == timingKvp.Key); //Search for the merge that produces the smallest merged bucket long lastMin = resultSet.GetInt64(minOrdinal); int lastHits = resultSet.GetInt32(hitsOrdinal); bool shouldMerge = resultSet.Read(); //these store all the data about the best merge so far long smallestRange = long.MaxValue; long bestMin = 0; long bestMax = 0; int mergedHits = 0; for (int b = 0; b < kTimingBuckets && shouldMerge; ++b) { long max = resultSet.GetInt64(maxOrdinal); long range = max - lastMin; if (range < smallestRange) { smallestRange = range; bestMin = lastMin; bestMax = max; mergedHits = lastHits + resultSet.GetInt32(hitsOrdinal); } lastMin = resultSet.GetInt64(minOrdinal); lastHits = resultSet.GetInt32(hitsOrdinal); //if this read fails, we have insufficient buckets to bother merging shouldMerge = resultSet.Read(); } if (shouldMerge) { //seek to the first (lower) bin resultSet.Seek(DbSeekOptions.FirstEqual, timingKvp.Key, bestMin); resultSet.Read(); //expand this bin to include the next one resultSet.SetInt64(maxOrdinal, bestMax); resultSet.SetInt32(hitsOrdinal, mergedHits); //go to the now redundant bin resultSet.Update(); resultSet.Read(); //delete the bin resultSet.Delete(); } } #if FALSE //DEBUG ONLY HACK: display buckets if (!resultSet.Seek(DbSeekOptions.BeforeEqual, timingKvp.Key, 0.0f)) { resultSet.ReadFirst(); } else { resultSet.Read(); } var tempId = resultSet.GetInt32(funcOrdinal); if (tempId != timingKvp.Key) { resultSet.Read(); } Console.WriteLine("Buckets for function {0}:", timingKvp.Key); for (int b = 0; b < kTimingBuckets; ++b) { long min = resultSet.GetInt64(minOrdinal); long max = resultSet.GetInt64(maxOrdinal); int hits = resultSet.GetInt32(hitsOrdinal); Console.WriteLine("[{0}, {1}]: {2}", min, max, hits); resultSet.Read(); } #endif } }
// Permite realizar la integración de datos de a partir de un dataset serializado // El dataset debe contener tablas con nombres y campos iguales a los creados en la base de // datos public bool IntegrarDatos(string sSerializedData, bool bUpdateCurrentRows, IEstadoCarga Estado) { StringReader sr = new StringReader(sSerializedData); string sLine = null; string[] sFields = null; string[] sFieldsTypes = null; string[] sValues = null; SqlCeResultSet rs = null; SqlCeUpdatableRecord record = null; int I = 0; int J = 0; int nIndex = 0; int nTableCount = 0; int nRowCount = 0; int nTotalRowCount = 0; int nTables = 0; int nRows = 0; int nTotalRows = 0; int nProgresoTabla = 0; int nProgresoTotal = 0; DataTable dtNucleo = null; DataRow row = null; object FieldValue = null; try { // Se lee la liena con el número de tablas serializadas y el numero total de filas a procesar sLine = sr.ReadLine(); nTableCount = System.Convert.ToInt32(sLine.Substring(12)); sLine = sr.ReadLine(); nTotalRowCount = System.Convert.ToInt32(sLine.Substring(15)); nProgresoTotal = 0; nTables = 0; nTotalRows = 0; this.OpenConnection(); while (!Estado.Cancelado) { // Se obtiene el nombre y cantidad de registros de cada tabla serializada string sTableName = null; sLine = sr.ReadLine(); if (sLine == null) { break; } sTableName = sLine.Substring(7); sLine = sr.ReadLine(); nRowCount = System.Convert.ToInt32(sLine.Substring(10)); if (nRowCount > 0) { nProgresoTabla = 0; nRows = 0; Estado.IniciarTabla(sTableName); // Se revisa si es una tabla del nucleo y se actualiza // Revisar esto dtNucleo = null; if (bUpdateCurrentRows) { // Se filtra la información del indice de llave primario, para la busqueda de // de las filas actuales m_dvPK.RowFilter = "TABLE_NAME = '" + sTableName + "'"; } else { // Si es una tabla del nucleo si eliminan las filas actuales if (dtNucleo != null) { dtNucleo.Rows.Clear(); } } // Se obtiene el objeto ResultSet por medio del cual se hará la actualización // especificando el indice de llave primaria de la tabla SqlCeCommand cmd = new SqlCeCommand(); cmd.Connection = (SqlCeConnection)this.Connection; cmd.CommandType = CommandType.TableDirect; cmd.CommandText = sTableName; if (bUpdateCurrentRows) { cmd.IndexName = System.Convert.ToString(m_dvPK[0]["CONSTRAINT_NAME"]); rs = cmd.ExecuteResultSet(ResultSetOptions.Updatable | ResultSetOptions.Sensitive | ResultSetOptions.Scrollable); } else { rs = cmd.ExecuteResultSet(ResultSetOptions.Updatable); } // se obtienen los nombres de los campos sLine = sr.ReadLine(); sFields = sLine.Split('|'); // se obtienen los tipos de datos de las columnas sLine = sr.ReadLine(); sFieldsTypes = sLine.Split('|'); // Se procesa cada fila que venga serializada en la cadena sLine = sr.ReadLine(); bool bInsertRecord = false; while ((sLine != null) & (!Estado.Cancelado)) { if (sLine.Trim() == string.Empty) { break; } // Se obtienen los valores que vienen en el registro sValues = sLine.Split('|'); // Se obtienen los valores de llave primaria del registro // Se crea la matriz de objetos para guardar los valores de la llave primaria de cada registro bInsertRecord = true; if (bUpdateCurrentRows) { // Se obtiene la llave primaria del registro object[] RecordKey = new object[m_dvPK.Count]; for (I = 0; I < m_dvPK.Count; I++) { for (J = 0; J < sFields.GetUpperBound(0); J++) { if (System.Convert.ToString(m_dvPK[I]["COLUMN_NAME"]).ToUpper() == sFields[J]) { RecordKey[I] = GetColumnValue(sFieldsTypes[J], sValues[J]); } } } // se busca el registro actual y luego se actualizan los datos // si no se encuentra se inserta un nuevo registro if (rs.Seek(DbSeekOptions.FirstEqual, RecordKey)) { bInsertRecord = false; // Se obtiene la fila a modificar rs.Read(); if (dtNucleo != null) { row = dtNucleo.Rows.Find(RecordKey); } // Se actualizan los valores de cada columna en el registro en la base de datos y si // se esta procesando una tabla del nucleo tambien se actualiza en memoria if (dtNucleo != null && row != null) { for (I = 0; I < sFields.GetUpperBound(0); I++) { try { nIndex = rs.GetOrdinal(sFields[I]); FieldValue = GetColumnValue(rs.GetFieldType(nIndex).ToString(), sValues[I]); rs.SetValue(nIndex, FieldValue); nIndex = row.Table.Columns.IndexOf(sFields[I]); if (nIndex >= 0) { row[nIndex] = FieldValue; } } catch (Exception ex) { throw new InvalidOperationException("Field: " + sFields[I] + "\r\n" + "Type: " + rs.GetFieldType(nIndex).ToString() + "\r\n" + "Value: " + sValues[I] + "\r\n" + ex.Message); } } } else { for (I = 0; I < sFields.GetUpperBound(0); I++) { try { nIndex = rs.GetOrdinal(sFields[I]); FieldValue = GetColumnValue(rs.GetFieldType(nIndex).ToString(), sValues[I]); rs.SetValue(nIndex, FieldValue); } catch (Exception ex) { throw new InvalidOperationException("Field: " + sFields[I] + "\r\n" + "Type: " + rs.GetFieldType(nIndex).ToString() + "\r\n" + "Value: " + sValues[I] + "\r\n" + ex.Message); } } } rs.Update(); } } if (bInsertRecord) { // Se crea el nuevo registro record = rs.CreateRecord(); if (dtNucleo != null) { row = dtNucleo.NewRow(); } else { row = null; } // Se actualizan los valores de cada columna en el registro if (dtNucleo != null && row != null) { for (I = 0; I < sFields.GetUpperBound(0); I++) { try { nIndex = rs.GetOrdinal(sFields[I]); FieldValue = GetColumnValue(rs.GetFieldType(nIndex).ToString(), sValues[I]); record.SetValue(nIndex, FieldValue); nIndex = row.Table.Columns.IndexOf(sFields[I]); if (nIndex >= 0) { row[nIndex] = FieldValue; } } catch (Exception ex) { throw new InvalidOperationException("Field: " + sFields[I] + "\r\n" + "Type: " + rs.GetFieldType(nIndex).ToString() + "\r\n" + "Value: " + sValues[I] + "\r\n" + ex.Message); } } } else { for (I = 0; I < sFields.GetUpperBound(0); I++) { try { nIndex = rs.GetOrdinal(sFields[I]); FieldValue = GetColumnValue(rs.GetFieldType(nIndex).ToString(), sValues[I]); record.SetValue(nIndex, FieldValue); } catch (Exception ex) { throw new InvalidOperationException("Field: " + sFields[I] + "\r\n" + "Type: " + rs.GetFieldType(nIndex).ToString() + "\r\n" + "Value: " + sValues[I] + "\r\n" + ex.Message); } } } // Se almacena el nuevo registro try { rs.Insert(record, DbInsertOptions.KeepCurrentPosition); if (dtNucleo != null && row != null) { dtNucleo.Rows.Add(row); row.AcceptChanges(); } } catch (Exception ex) { object[] values = new object[rs.FieldCount + 1]; record.GetValues(values); throw ex; } } // Se registra el avance de la tabla nRows += 1; nTotalRows += 1; if ((nRows % 100) == 0 || nRows == nRowCount) { Estado.ProgresoTabla = System.Convert.ToInt32((nRows * 100 / nRowCount)); Estado.ProgresoTotal = System.Convert.ToInt32(nTotalRows * 100 / nTotalRowCount); } // Se se lee el siguiente registro sLine = sr.ReadLine(); } rs.Close(); } } } catch (Exception ex) { throw ex; } finally { if (rs != null) { if (!rs.IsClosed) { rs.Close(); rs = null; } } this.CloseConnection(); sr.Close(); } return(true); }