internal void btreeParseCellPtr(byte[] cell, int cellID, ref CellInfo info) { var nPayload = (uint)0; // Number of bytes of cell payload Debug.Assert(MutexEx.Held(Shared.Mutex)); if (info.Cells != cell) { info.Cells = cell; } info.CellID = cellID; Debug.Assert(Leaf == 0 || Leaf == 1); var n = (ushort)ChildPtrSize; // Number bytes in cell content header Debug.Assert(n == 4 - 4 * Leaf); if (HasIntKey) { if (HasData != 0) { n += (ushort)ConvertEx.GetVariant4(cell, (uint)(cellID + n), out nPayload); } else { nPayload = 0; } n += (ushort)ConvertEx.GetVariant9L(cell, (uint)(cellID + n), out info.nKey); info.nData = nPayload; } else { info.nData = 0; n += (ushort)ConvertEx.GetVariant4(cell, (uint)(cellID + n), out nPayload); info.nKey = nPayload; } info.nPayload = nPayload; info.nHeader = n; if (Check.LIKELY(nPayload <= this.MaxLocal)) { // This is the (easy) common case where the entire payload fits on the local page. No overflow is required. if ((info.nSize = (ushort)(n + nPayload)) < 4) { info.nSize = 4; } info.nLocal = (ushort)nPayload; info.iOverflow = 0; } else { // If the payload will not fit completely on the local page, we have to decide how much to store locally and how much to spill onto // overflow pages. The strategy is to minimize the amount of unused space on overflow pages while keeping the amount of local storage // in between minLocal and maxLocal. // Warning: changing the way overflow payload is distributed in any way will result in an incompatible file format. var minLocal = (int)MinLocal; // Minimum amount of payload held locally var maxLocal = (int)MaxLocal; // Maximum amount of payload held locally var surplus = (int)(minLocal + (nPayload - minLocal) % (Shared.UsableSize - 4)); // Overflow payload available for local storage info.nLocal = (surplus <= maxLocal ? (ushort)surplus : (ushort)minLocal); info.iOverflow = (ushort)(info.nLocal + n); info.nSize = (ushort)(info.iOverflow + 4); } }
internal ushort cellSizePtr(byte[] pCell) { int _pIter = this.ChildPtrSize; uint nSize = 0; #if DEBUG // The value returned by this function should always be the same as the (CellInfo.nSize) value found by doing a full parse of the // cell. If SQLITE_DEBUG is defined, an Debug.Assert() at the bottom of this function verifies that this invariant is not violated. */ var debuginfo = new CellInfo(); btreeParseCellPtr(pCell, ref debuginfo); #else var debuginfo = new CellInfo(); #endif if (this.HasIntKey) { if (this.HasData != 0) { _pIter += ConvertEx.GetVariant4(pCell, out nSize); } else { nSize = 0; } // pIter now points at the 64-bit integer key value, a variable length integer. The following block moves pIter to point at the first byte // past the end of the key value. */ var pEnd = _pIter + 9; while (((pCell[_pIter++]) & 0x80) != 0 && _pIter < pEnd) { ; } } else { _pIter += ConvertEx.GetVariant4(pCell, (uint)_pIter, out nSize); } if (nSize > this.MaxLocal) { var minLocal = (int)this.MinLocal; nSize = (ushort)(minLocal + (nSize - minLocal) % (this.Shared.UsableSize - 4)); if (nSize > this.MaxLocal) { nSize = (ushort)minLocal; } nSize += 4; } nSize += (uint)_pIter; // The minimum size of any cell is 4 bytes. if (nSize < 4) { nSize = 4; } Debug.Assert(nSize == debuginfo.nSize); return((ushort)nSize); }
// was:sqlite3BtreeMovetoUnpacked public RC MoveToUnpacked(Btree.UnpackedRecord idxKey, long intKey, bool biasRight, ref int pRes) { Debug.Assert(HoldsMutex()); Debug.Assert(MutexEx.Held(Tree.DB.Mutex)); Debug.Assert((idxKey == null) == (KeyInfo == null)); // If the cursor is already positioned at the point we are trying to move to, then just return without doing any work if (State == CursorState.VALID && ValidNKey && Pages[0].HasIntKey) { if (Info.nKey == intKey) { pRes = 0; return(RC.OK); } if (AtLast && Info.nKey < intKey) { pRes = -1; return(RC.OK); } } var rc = MoveToRoot(); if (rc != RC.OK) { return(rc); } Debug.Assert(Pages[PageID] != null); Debug.Assert(Pages[PageID].HasInit); Debug.Assert(Pages[PageID].Cells > 0 || State == CursorState.INVALID); if (State == CursorState.INVALID) { pRes = -1; Debug.Assert(Pages[PageID].Cells == 0); return(RC.OK); } Debug.Assert(Pages[0].HasIntKey || idxKey != null); for (; ;) { var page = Pages[PageID]; // pPage.nCell must be greater than zero. If this is the root-page the cursor would have been INVALID above and this for(;;) loop // not run. If this is not the root-page, then the moveToChild() routine would have already detected db corruption. Similarly, pPage must // be the right kind (index or table) of b-tree page. Otherwise a moveToChild() or moveToRoot() call would have detected corruption. Debug.Assert(page.Cells > 0); Debug.Assert(page.HasIntKey == (idxKey == null)); var lwr = 0; var upr = page.Cells - 1; int idx; PagesIndexs[PageID] = (ushort)(biasRight ? (idx = upr) : (idx = (upr + lwr) / 2)); int c; for (; ;) { Debug.Assert(idx == PagesIndexs[PageID]); Info.nSize = 0; var cell = page.FindCell(idx) + page.ChildPtrSize; // Pointer to current cell in pPage if (page.HasIntKey) { var nCellKey = 0L; if (page.HasData != 0) { uint dummy0; cell += ConvertEx.GetVariant4(page.Data, (uint)cell, out dummy0); } ConvertEx.GetVariant9L(page.Data, (uint)cell, out nCellKey); if (nCellKey == intKey) { c = 0; } else if (nCellKey < intKey) { c = -1; } else { Debug.Assert(nCellKey > intKey); c = 1; } ValidNKey = true; Info.nKey = nCellKey; } else { // The maximum supported page-size is 65536 bytes. This means that the maximum number of record bytes stored on an index B-Tree // page is less than 16384 bytes and may be stored as a 2-byte varint. This information is used to attempt to avoid parsing // the entire cell by checking for the cases where the record is stored entirely within the b-tree page by inspecting the first // 2 bytes of the cell. var nCell = (int)page.Data[cell + 0]; if (0 == (nCell & 0x80) && nCell <= page.MaxLocal) { // This branch runs if the record-size field of the cell is a single byte varint and the record fits entirely on the main b-tree page. c = Btree._vdbe.sqlite3VdbeRecordCompare(nCell, page.Data, cell + 1, idxKey); } else if (0 == (page.Data[cell + 1] & 0x80) && (nCell = ((nCell & 0x7f) << 7) + page.Data[cell + 1]) <= page.MaxLocal) { // The record-size field is a 2 byte varint and the record fits entirely on the main b-tree page. c = Btree._vdbe.sqlite3VdbeRecordCompare(nCell, page.Data, cell + 2, idxKey); } else { // The record flows over onto one or more overflow pages. In this case the whole cell needs to be parsed, a buffer allocated // and accessPayload() used to retrieve the record into the buffer before VdbeRecordCompare() can be called. var pCellBody = new byte[page.Data.Length - cell + page.ChildPtrSize]; Buffer.BlockCopy(page.Data, cell - page.ChildPtrSize, pCellBody, 0, pCellBody.Length); page.btreeParseCellPtr(pCellBody, ref Info); nCell = (int)Info.nKey; var pCellKey = MallocEx.sqlite3Malloc(nCell); rc = AccessPayload(0, (uint)nCell, pCellKey, false); if (rc != RC.OK) { pCellKey = null; goto moveto_finish; } c = Btree._vdbe.sqlite3VdbeRecordCompare(nCell, pCellKey, idxKey); pCellKey = null; } } if (c == 0) { if (page.HasIntKey && 0 == page.Leaf) { lwr = idx; upr = lwr - 1; break; } else { pRes = 0; rc = RC.OK; goto moveto_finish; } } if (c < 0) { lwr = idx + 1; } else { upr = idx - 1; } if (lwr > upr) { break; } PagesIndexs[PageID] = (ushort)(idx = (lwr + upr) / 2); } Debug.Assert(lwr == upr + 1); Debug.Assert(page.HasInit); Pgno chldPg; if (page.Leaf != 0) { chldPg = 0; } else if (lwr >= page.Cells) { chldPg = ConvertEx.Get4(page.Data, page.HeaderOffset + 8); } else { chldPg = ConvertEx.Get4(page.Data, page.FindCell(lwr)); } if (chldPg == 0) { Debug.Assert(PagesIndexs[PageID] < Pages[PageID].Cells); pRes = c; rc = RC.OK; goto moveto_finish; } PagesIndexs[PageID] = (ushort)lwr; Info.nSize = 0; ValidNKey = false; rc = MoveToChild(chldPg); if (rc != RC.OK) { goto moveto_finish; } } moveto_finish: return(rc); }