// TODO: Create Command interface public bool executeStatement(string strSql) { bool wasSuccessful = false; // TODO: Think how to track multiple tables/TableContexts // Note that the constructor will set up the table named // in the SELECT statement in _table. CommandParts updateParts = new CommandParts(_database, _table, strSql, CommandParts.COMMAND_TYPES.UPDATE); if (MainClass.bDebug) { Console.WriteLine("SELECT: " + updateParts.strSelect); Console.WriteLine("FROM: " + updateParts.strFrom); if (!string.IsNullOrEmpty(updateParts.strInnerJoinKludge)) { throw new Exception("Syntax error: INNER JOIN in an UPDATE statement is not supported: " + strSql); } Console.WriteLine("WHERE: " + updateParts.strWhere); // Note that WHEREs aren't applied to inner joined tables right now. Console.WriteLine("ORDER BY: " + updateParts.strOrderBy); } _table = _database.getTableByName(updateParts.strTableName); DataTable dtThrowAway = new DataTable(); WhereProcessor.ProcessRows(ref dtThrowAway, _table, updateParts); return wasSuccessful; }
public static void ProcessRows( ref DataTable dtWithCols, TableContext table, CommandParts commandParts ) { string strWhere = commandParts.strWhere; List<Comparison> lstWhereConditions = _CreateWhereConditions(strWhere, table); // TODO: Really need to design a legitimate table locking system. int delayFactor = 1; try { using (BinaryReader b = new BinaryReader(File.Open(table.strTableFileLoc, FileMode.Open))) { int intRowCount = table.intFileLength / table.intRowLength; b.BaseStream.Seek(2 * table.intRowLength, SeekOrigin.Begin); // TODO: Code more defensively in case it's somehow not the right/minimum length for (int i = 2; i < intRowCount; i++) { byte[] abytRow = b.ReadBytes(table.intRowLength); bool bMatchingRow = true; // Check and make sure this is an active row, and has // the standard row lead byte, 0x11. If not, the row // should not be read. // I'm going to switch this to make it more defensive // and a little easier to follow. switch (abytRow[0]) { case 0x88: // DELETED bMatchingRow = false; break; case 0x11: // ACTIVE // Find if the WHERE clause says to exclude this row. foreach (Comparison comparison in lstWhereConditions) { // For now, we're (somewhat clumsily) processing INs as lots of small ORs. // And no, we're not actually supporting the OR statement in a regular WHERE yet. if (comparison is CompoundComparison) { bool bInKeeper = false; // Could use a lot more indexed logic here, but that'll need to be // an extension to this package to keep the logic simple. // This is a painful, bullheaded Moore's comparison. foreach (Comparison compInner in ((CompoundComparison)comparison).lstComparisons) { if (_ComparisonEngine(compInner, abytRow)) { bInKeeper = true; break; } } bMatchingRow = bMatchingRow && bInKeeper; } else { bMatchingRow = bMatchingRow && _ComparisonEngine(comparison, abytRow); } } break; default: throw new Exception("Unexpected row state in SELECT: " + abytRow[0]); } if (bMatchingRow) { switch (commandParts.commandType) { case CommandParts.COMMAND_TYPES.SELECT: #region SELECT Dictionary<string, string> dictFuzzyToColName = new Dictionary<string, string>(commandParts.dictFuzzyToColNameMappings); // resets with each row. DataRow row = dtWithCols.NewRow(); foreach (Column mCol in commandParts.acolInSelect) { byte[] abytCol = new byte[mCol.intColLength]; Array.Copy(abytRow, mCol.intColStart, abytCol, 0, mCol.intColLength); //Console.WriteLine(System.Text.Encoding.Default.GetString(abytCol)); // now translate/cast the value to the column in the row. // OLD: row[OperativeName(mCol.strColName, dictColNameMapping)] = Router.routeMe(mCol).toNative(abytCol); // foreach b/c we're supporting multiple calls to the same col in a SELECT now. foreach (DataColumn dc in dtWithCols.Columns) { // See if we should use this column's (mCol's) value with this DataColumn. if (dictFuzzyToColName.ContainsValue(mCol.strColName) || mCol.strColName.Equals(dc.ColumnName)) { // If so, see if there's a fuzzy name mapped for this column. string strColName = GetFuzzyNameIfExists(mCol.strColName, dictFuzzyToColName); row[strColName] = Router.routeMe(mCol).toNative(abytCol); // If we had a fuzzy name, remove from the dictionary so we don't dupe it. if (dictFuzzyToColName.ContainsKey(strColName)) { dictFuzzyToColName.Remove(strColName); } } } } dtWithCols.Rows.Add(row); #endregion SELECT break; case CommandParts.COMMAND_TYPES.UPDATE: #region UPDATE // kludge for fuzzy names: // (This should be a one-way process, so I don't think having the logic // in this cruddy a place is a huge problem that'll cause wasted // resources; it's just having me rethink fuzzy names in general.) Dictionary<string, string> dictLaunderedUpdateVals = new Dictionary<string,string>(); foreach (string key in commandParts.dictUpdateColVals.Keys) { dictLaunderedUpdateVals.Add(table.getRawColName(key), commandParts.dictUpdateColVals[key]); } foreach (Column mCol in table.getColumns()) { Column colToPullValueFrom = null; string strUpdateValueModifier = string.Empty; if (dictLaunderedUpdateVals.ContainsKey(mCol.strColName)) { // Column needs updating; take values from update byte[] abytNewColVal = null; // Will hold "raw" value. Might not be the full column length. // Check to see if we're updating using another column from the same row or a value. // TODO: Performance here should be crappy. Create a mapping of col names & Cols for // in-statement column value transfers. ie, "UPDATE table1 SET col1 = col2 WHERE col1 = 'update me';" string valueAsString = dictLaunderedUpdateVals[mCol.strColName]; // Check for operators inside of update values. // TODO: Handle strings with operators, but then that's what CONCAT is for. // See PIPES_AS_CONCAT in MySQL for more fun. (Note that SQL Server does // allow string concat via `+`.) // // TODO: Note that tabs in the phrase (though strange) should be legit. // The current state of the code will choke on them, however. // // NOTE: I'm going to slowly refactor to ConstructValue as I add the operation // functions to the serializers. So right now I've only got IntSerializer ready. // (... but I want to check this in instead of stash). COLUMN_TYPES[] validValueModiferTypes = new COLUMN_TYPES[] { COLUMN_TYPES.INT }; if (validValueModiferTypes.Contains(mCol.colType)) { // New method that allows composition update clauses (eg, `col1 + 4 - col2`) abytNewColVal = CompositeColumnValueModifier.ConstructValue(mCol, valueAsString, abytRow, table); } else { // Old method to update value (no composite clauses). colToPullValueFrom = table.getColumnByName(valueAsString); if (null != colToPullValueFrom) { if (mCol.intColLength < colToPullValueFrom.intColLength || !CompositeColumnValueModifier.ColsAreCompatible(mCol, colToPullValueFrom)) { throw new Exception("UPDATE attempted to update with a value that was potentially too large or with columns of incompatible types."); } abytNewColVal = new byte[colToPullValueFrom.intColLength]; Array.Copy(abytRow, colToPullValueFrom.intColStart, abytNewColVal, 0, colToPullValueFrom.intColLength); } else { BaseSerializer serializer = Router.routeMe(mCol); abytNewColVal = serializer.toByteArray(dictLaunderedUpdateVals[mCol.strColName]); } } // double check that the serializer at least // gave you a value that's the right length so // that everything doesn't go to heck (moved where // that was previously checked into the serializers) if (abytNewColVal.Length != mCol.intColLength) { throw new Exception("Improperly lengthed field from serializer (UPDATE): " + mCol.strColName); } // keep in mind that column.intColLength should always match abytColValue.Length. While I'm // testing, I'm going to put in this check, but at some point, you should be confident enough // to consider removing this check. if (abytNewColVal.Length != mCol.intColLength) { throw new Exception("Surprising value and column length mismatch"); } Buffer.BlockCopy(abytNewColVal, 0, abytRow, mCol.intColStart, abytNewColVal.Length); } // else don't touch what's in the row; it's not an updated colum } b.BaseStream.Seek(-1 * table.intRowLength, SeekOrigin.Current); b.BaseStream.Write(abytRow, 0, abytRow.Length); #endregion UPDATE break; case CommandParts.COMMAND_TYPES.DELETE: byte[] abytErase = new byte[table.intRowLength]; // should be initialized to zeroes. // at least to test, I'm going to write it all over with 0x88s. for (int j = 0; j < table.intRowLength; j++) { abytErase[j] = 0x88; } // move pointer back to the first byte of this row. b.BaseStream.Seek(-1 * table.intRowLength, SeekOrigin.Current); b.BaseStream.Write(abytErase, 0, abytErase.Length); break; default: throw new Exception("Unhandled command type in WhereProcessor: " + commandParts.commandType); } } } // eo for loop i < intRowCount } // eo using statement for the binary reader. } catch (IOException) { delayFactor = delayFactor * 2; if (delayFactor > (3 * 60 * 1000)) { throw new Exception("Statement timeout: " + commandParts.strOriginal); } Thread.Sleep(delayFactor * 200); //org.rufwork.mooresDb.SqlDbSharpLogger.LogMessage(table.strTableName + ".mdbf is locked. Waiting " + delayFactor + " millis to try again.", "WhereProcessor.ProcessRows"); } // nothing to return -- dt was passed by ref. }
// TODO: Create Command interface public DataTable executeStatement(string strSql) { DataTable dtReturn = new DataTable(); // TODO: Think how to track multiple tables/TableContexts // Note that the constructor will set up the table named // in the SELECT statement in _table. CommandParts selectParts = new CommandParts(_database, _table, strSql, CommandParts.COMMAND_TYPES.SELECT); if (MainClass.bDebug) { string strDebug = "SELECT: " + selectParts.strSelect + "\n"; strDebug += "FROM: " + selectParts.strFrom + "\n"; if (!string.IsNullOrEmpty(selectParts.strInnerJoinKludge)) { strDebug += "INNER JOIN: " + selectParts.strInnerJoinKludge + "\n"; } strDebug += "WHERE: " + selectParts.strWhere + "\n"; // Note that WHEREs aren't applied to inner joined tables right now. strDebug += "ORDER BY: " + selectParts.strOrderBy + "\n"; SqlDbSharpLogger.LogMessage(strDebug, "SelectCommand executeStatement"); } _table = _database.getTableByName(selectParts.strTableName); Queue<TableContext> qAllTables = new Queue<TableContext>(); qAllTables.Enqueue(_table); dtReturn = _initDataTable(selectParts); WhereProcessor.ProcessRows(ref dtReturn, _table, selectParts); //===================================================================== // POST-PROCESS INNER JOINS // (Joins are only in selects, so this isn't part of WhereProcessing.) // // To take account of joins, we basically need to create a SelectParts // per inner join. So we need to create a WHERE from the table we // just selected and then send those values down to a new _selectRows. //===================================================================== if (selectParts.strInnerJoinKludge.Length > 0) { if (selectParts.qInnerJoinFields.Count < 1) { selectParts.qInnerJoinFields.EnqueueIfNotContains("*"); // Kludge for "SELECT * FROM Table1 INNER JOIN..." or "SELECT test, * From...", etc } // TODO: Why aren't we just throwing in the whole selectParts again? dtReturn = _processInnerJoin(qAllTables, dtReturn, selectParts.strInnerJoinKludge, selectParts.strTableName, selectParts.strOrderBy, selectParts.qInnerJoinFields); // Now we need to make sure the order of the DataColumns reflects what we had // in the original SQL. At least initially, column order wasn't guaranteed in // _processInnerJoin, as it would add columns first for the "main" table and then // for each "inner SELECT". string strFromSelect = string.Join(", ", selectParts.qstrAllColumnNames.ToArray()); string strInTable = string.Join(", ", dtReturn.Columns.Cast<DataColumn>().Select(c => c.ColumnName).ToArray()); MainClass.logIt(string.Format(@"Select fields: {0} Fields pushed into dtReturn: {1}", strFromSelect, strInTable)); try { string[] astrFromSelect = selectParts.qstrAllColumnNames.ToArray(); for (int i = 0; i < astrFromSelect.Length; i++) { dtReturn.Columns[astrFromSelect[i]].SetOrdinal(i); } // TODO: There are better ways to do this. // TODO: Figure out if this handles all fuzzy name translations // earlier in the SELECT process. if (selectParts.lstrJoinONLYFields.Count() > 0) { foreach (string colName in selectParts.lstrJoinONLYFields) { dtReturn.Columns.Remove(colName); } } } catch (Exception e) { throw new SyntaxException("Problem reordering columns in inner join -- " + e.ToString()); } } //===================================================================== // EO POST-PROCESS INNER JOINS //===================================================================== // strOrderBy has had all whitespace shortened to one space, so we can get away with the hardcoded 9. if (null != selectParts.strOrderBy && selectParts.strOrderBy.Length > 9) { // ORDER BY needs to make sure it's not sorting on a fuzzy named column // that may not have been explicitly selected in the SELECT. string[] astrOrderByFields = selectParts.strOrderBy.Substring(9).Split(','); // Substring(9) to get rid of "ORDER BY " <<< But, ultimately, why not tokenize here too? string strCleanedOrderBy = string.Empty; foreach (string orderByClause in astrOrderByFields) { bool ascNotDesc = true; string strOrderByClause = orderByClause.Trim(); string strField = orderByClause.Trim(); if (strField.Split().Length > 1) { strField = strOrderByClause.Substring(0, strOrderByClause.IndexOf(' ')).Trim(); string strAscDesc = strOrderByClause.Substring(strOrderByClause.IndexOf(' ')).Trim(); ascNotDesc = (-1 == strAscDesc.IndexOf("DESC", StringComparison.CurrentCultureIgnoreCase)); } strOrderByClause += ","; // This is the default value if there's no fuzziness, and it needs the comma put back. // TODO: Integrate fields prefixed by specific table names. if (!dtReturn.Columns.Contains(strField)) { // Check for fuzziness. foreach (TableContext table in qAllTables) { if (!table.containsColumn(strField, false) && table.containsColumn(strField, true)) { strOrderByClause = table.getRawColName(strField) + (ascNotDesc ? " ASC" : " DESC") + ","; break; } } } strCleanedOrderBy += " " + strOrderByClause; } dtReturn.DefaultView.Sort = strCleanedOrderBy.Trim(','); dtReturn = dtReturn.DefaultView.ToTable(); } if (selectParts.dictRawNamesToASNames.Count > 0) { try { foreach (KeyValuePair<string, string> kvp in selectParts.dictRawNamesToASNames) { dtReturn.Columns[kvp.Key].ColumnName = kvp.Value; } } catch (Exception e) { throw new SyntaxException("Illegal AS usage: " + e.ToString()); } } return dtReturn; }
// TODO: Remove concept of mainTable, and pass in an IEnumerable of all table // contexts so that we can figure out which owns each SELECTed column. private DataTable _initDataTable(CommandParts selectParts) { DataTable dtReturn = new DataTable(); dtReturn.TableName = selectParts.strTableName; // TODO: This borks on JOINs, right? That is, you need to call this something else "X JOIN Y" or similar. // So that I can have columns appear more than once in a single table, // I'm going to make a dupe of dictColToSelectMapping. We'd have to go a // touch more complicated to keep the order from the original SELECT accurate. Dictionary<string, string> dictColMappingCopy = new Dictionary<string, string>(selectParts.dictFuzzyToColNameMappings); // info on creating a datatable by hand here: // http://msdn.microsoft.com/en-us/library/system.data.datacolumn.datatype.aspx // TODO: Distinct() is probably/should be overkill. foreach (Column colTemp in selectParts.acolInSelect.Distinct()) { // "Translate" the SqlDBSharp column name to the name used in the SELECT statement. string strColNameForDT = WhereProcessor.GetFuzzyNameIfExists(colTemp.strColName, dictColMappingCopy); if (dictColMappingCopy.ContainsKey(strColNameForDT)) // these col names are from the SELECT statement, so they could be "fuzzy" { dictColMappingCopy.Remove(strColNameForDT); // This is the kludge that allows us to have the same col with different names. } DataColumn colForDt = new DataColumn(strColNameForDT); //colForDt.MaxLength = colTemp.intColLength; // MaxLength is only useful for string columns, strangely enough. // TODO: Be more deliberate about these mappings. // Right now, I'm not really worried about casting correctly, etc. // Probably grouping them poorly. switch (colTemp.colType) { case COLUMN_TYPES.SINGLE_CHAR: case COLUMN_TYPES.CHAR: colForDt.DataType = System.Type.GetType("System.String"); colForDt.MaxLength = colTemp.intColLength; break; case COLUMN_TYPES.AUTOINCREMENT: case COLUMN_TYPES.TINYINT: case COLUMN_TYPES.BIT: // TODO: This will "work", but non 0/1 values can be inserted, obviously. So it's a kludge for now. case COLUMN_TYPES.INT: colForDt.DataType = System.Type.GetType("System.Int32"); break; case COLUMN_TYPES.FLOAT: case COLUMN_TYPES.DECIMAL: colForDt.DataType = System.Type.GetType("System.Decimal"); break; case COLUMN_TYPES.DATETIME: colForDt.DataType = System.Type.GetType("System.DateTime"); break; default: throw new Exception("Unhandled column type in Select Command: " + colTemp.colType); } dtReturn.Columns.Add(colForDt); } return dtReturn; }
public static void ProcessRows( ref DataTable dtWithCols, TableContext table, CommandParts commandParts ) { string strWhere = commandParts.strWhere; List <Comparison> lstWhereConditions = _CreateWhereConditions(strWhere, table); // TODO: Really need to design a legitimate table locking system. int delayFactor = 1; try { using (BinaryReader b = new BinaryReader(File.Open(table.strTableFileLoc, FileMode.Open))) { int intRowCount = table.intFileLength / table.intRowLength; b.BaseStream.Seek(2 * table.intRowLength, SeekOrigin.Begin); // TODO: Code more defensively in case it's somehow not the right/minimum length for (int i = 2; i < intRowCount; i++) { byte[] abytRow = b.ReadBytes(table.intRowLength); bool bMatchingRow = true; // Check and make sure this is an active row, and has // the standard row lead byte, 0x11. If not, the row // should not be read. // I'm going to switch this to make it more defensive // and a little easier to follow. switch (abytRow[0]) { case 0x88: // DELETED bMatchingRow = false; break; case 0x11: // ACTIVE // Find if the WHERE clause says to exclude this row. foreach (Comparison comparison in lstWhereConditions) { // For now, we're (somewhat clumsily) processing INs as lots of small ORs. // And no, we're not actually supporting the OR statement in a regular WHERE yet. if (comparison is CompoundComparison) { bool bInKeeper = false; // Could use a lot more indexed logic here, but that'll need to be // an extension to this package to keep the logic simple. // This is a painful, bullheaded Moore's comparison. foreach (Comparison compInner in ((CompoundComparison)comparison).lstComparisons) { if (_ComparisonEngine(compInner, abytRow)) { bInKeeper = true; break; } } bMatchingRow = bMatchingRow && bInKeeper; } else { bMatchingRow = bMatchingRow && _ComparisonEngine(comparison, abytRow); } } break; default: throw new Exception("Unexpected row state in SELECT: " + abytRow[0]); } if (bMatchingRow) { switch (commandParts.commandType) { case CommandParts.COMMAND_TYPES.SELECT: #region SELECT Dictionary <string, string> dictFuzzyToColName = new Dictionary <string, string>(commandParts.dictFuzzyToColNameMappings); // resets with each row. DataRow row = dtWithCols.NewRow(); foreach (Column mCol in commandParts.acolInSelect) { byte[] abytCol = new byte[mCol.intColLength]; Array.Copy(abytRow, mCol.intColStart, abytCol, 0, mCol.intColLength); //Console.WriteLine(System.Text.Encoding.Default.GetString(abytCol)); // now translate/cast the value to the column in the row. // OLD: row[OperativeName(mCol.strColName, dictColNameMapping)] = Router.routeMe(mCol).toNative(abytCol); // foreach b/c we're supporting multiple calls to the same col in a SELECT now. foreach (DataColumn dc in dtWithCols.Columns) { // See if we should use this column's (mCol's) value with this DataColumn. if (dictFuzzyToColName.ContainsValue(mCol.strColName) || mCol.strColName.Equals(dc.ColumnName)) { // If so, see if there's a fuzzy name mapped for this column. string strColName = GetFuzzyNameIfExists(mCol.strColName, dictFuzzyToColName); row[strColName] = Router.routeMe(mCol).toNative(abytCol); // If we had a fuzzy name, remove from the dictionary so we don't dupe it. if (dictFuzzyToColName.ContainsKey(strColName)) { dictFuzzyToColName.Remove(strColName); } } } } dtWithCols.Rows.Add(row); #endregion SELECT break; case CommandParts.COMMAND_TYPES.UPDATE: #region UPDATE // kludge for fuzzy names: // (This should be a one-way process, so I don't think having the logic // in this cruddy a place is a huge problem that'll cause wasted // resources; it's just having me rethink fuzzy names in general.) Dictionary <string, string> dictLaunderedUpdateVals = new Dictionary <string, string>(); foreach (string key in commandParts.dictUpdateColVals.Keys) { dictLaunderedUpdateVals.Add(table.getRawColName(key), commandParts.dictUpdateColVals[key]); } foreach (Column mCol in table.getColumns()) { Column colToPullValueFrom = null; string strUpdateValueModifier = string.Empty; if (dictLaunderedUpdateVals.ContainsKey(mCol.strColName)) { // Column needs updating; take values from update byte[] abytNewColVal = null; // Will hold "raw" value. Might not be the full column length. // Check to see if we're updating using another column from the same row or a value. // TODO: Performance here should be crappy. Create a mapping of col names & Cols for // in-statement column value transfers. ie, "UPDATE table1 SET col1 = col2 WHERE col1 = 'update me';" string valueAsString = dictLaunderedUpdateVals[mCol.strColName]; // Check for operators inside of update values. // TODO: Handle strings with operators, but then that's what CONCAT is for. // See PIPES_AS_CONCAT in MySQL for more fun. (Note that SQL Server does // allow string concat via `+`.) // // TODO: Note that tabs in the phrase (though strange) should be legit. // The current state of the code will choke on them, however. // // NOTE: I'm going to slowly refactor to ConstructValue as I add the operation // functions to the serializers. So right now I've only got IntSerializer ready. // (... but I want to check this in instead of stash). COLUMN_TYPES[] validValueModiferTypes = new COLUMN_TYPES[] { COLUMN_TYPES.INT }; if (validValueModiferTypes.Contains(mCol.colType)) { // New method that allows composition update clauses (eg, `col1 + 4 - col2`) abytNewColVal = CompositeColumnValueModifier.ConstructValue(mCol, valueAsString, abytRow, table); } else { // Old method to update value (no composite clauses). colToPullValueFrom = table.getColumnByName(valueAsString); if (null != colToPullValueFrom) { if (mCol.intColLength < colToPullValueFrom.intColLength || !CompositeColumnValueModifier.ColsAreCompatible(mCol, colToPullValueFrom)) { throw new Exception("UPDATE attempted to update with a value that was potentially too large or with columns of incompatible types."); } abytNewColVal = new byte[colToPullValueFrom.intColLength]; Array.Copy(abytRow, colToPullValueFrom.intColStart, abytNewColVal, 0, colToPullValueFrom.intColLength); } else { BaseSerializer serializer = Router.routeMe(mCol); abytNewColVal = serializer.toByteArray(dictLaunderedUpdateVals[mCol.strColName]); } } // double check that the serializer at least // gave you a value that's the right length so // that everything doesn't go to heck (moved where // that was previously checked into the serializers) if (abytNewColVal.Length != mCol.intColLength) { throw new Exception("Improperly lengthed field from serializer (UPDATE): " + mCol.strColName); } // keep in mind that column.intColLength should always match abytColValue.Length. While I'm // testing, I'm going to put in this check, but at some point, you should be confident enough // to consider removing this check. if (abytNewColVal.Length != mCol.intColLength) { throw new Exception("Surprising value and column length mismatch"); } Buffer.BlockCopy(abytNewColVal, 0, abytRow, mCol.intColStart, abytNewColVal.Length); } // else don't touch what's in the row; it's not an updated colum } b.BaseStream.Seek(-1 * table.intRowLength, SeekOrigin.Current); b.BaseStream.Write(abytRow, 0, abytRow.Length); #endregion UPDATE break; case CommandParts.COMMAND_TYPES.DELETE: byte[] abytErase = new byte[table.intRowLength]; // should be initialized to zeroes. // at least to test, I'm going to write it all over with 0x88s. for (int j = 0; j < table.intRowLength; j++) { abytErase[j] = 0x88; } // move pointer back to the first byte of this row. b.BaseStream.Seek(-1 * table.intRowLength, SeekOrigin.Current); b.BaseStream.Write(abytErase, 0, abytErase.Length); break; default: throw new Exception("Unhandled command type in WhereProcessor: " + commandParts.commandType); } } } // eo for loop i < intRowCount } // eo using statement for the binary reader. } catch (IOException) { delayFactor = delayFactor * 2; if (delayFactor > (3 * 60 * 1000)) { throw new Exception("Statement timeout: " + commandParts.strOriginal); } Thread.Sleep(delayFactor * 200); //org.rufwork.mooresDb.SqlDbSharpLogger.LogMessage(table.strTableName + ".mdbf is locked. Waiting " + delayFactor + " millis to try again.", "WhereProcessor.ProcessRows"); } // nothing to return -- dt was passed by ref. }