public void Clear() { this.pData = null; this.pExtra = null; this.pDirty = null; this.pgno = 0; this.pPager = null; #if SQLITE_CHECK_PAGES this.pageHash = 0; #endif this.flags = 0; this.nRef = 0; this.pCache = null; this.pDirtyNext = null; this.pDirtyPrev = null; this.pPgHdr1 = null; }
public void Clear() { this.pData = null; this.pExtra = null; this.pDirty = null; this.pgno = 0; this.pPager = null; #if SQLITE_CHECK_PAGES this.pageHash=0; #endif this.flags = 0; this.nRef = 0; this.pCache = null; this.pDirtyNext = null; this.pDirtyPrev = null; this.pPgHdr1 = null; }
public void Clear() { sqlite3_free(ref pData); pData = null; pExtra = null; pDirty = null; pgno = 0; pPager = null; #if SQLITE_CHECK_PAGES this.pageHash = 0; #endif flags = 0; nRef = 0; pCache = null; pDirtyNext = null; pDirtyPrev = null; pPgHdr1 = null; }
public void Clear() { sqlite3_free(ref this.pData); this.pData = null; this.pExtra = null; this.pDirty = null; this.pgno = 0; this.pPager = null; #if SQLITE_CHECK_PAGES this.pageHash = 0; #endif this.flags = 0; this.nRef = 0; this.CacheAllocated = false; this.pCache = null; this.pDirtyNext = null; this.pDirtyPrev = null; this.pPgHdr1 = null; }
public void Clear() { sqlite3_free(ref this.pData); this.pData = null; this.pExtra = null; this.pDirty = null; this.pgno = 0; this.pPager = null; #if SQLITE_CHECK_PAGES this.pageHash=0; #endif this.flags = 0; this.nRef = 0; this.CacheAllocated = false; this.pCache = null; this.pDirtyNext = null; this.pDirtyPrev = null; this.pPgHdr1 = null; }
public Pgno pgno; /* Page number for this page */ public MemPage Copy() { MemPage cp = (MemPage)MemberwiseClone(); if (aOvfl != null) { cp.aOvfl = new _OvflCell[aOvfl.Length]; for (int i = 0; i < aOvfl.Length; i++) { cp.aOvfl[i] = aOvfl[i].Copy(); } } if (aData != null) { cp.aData = new byte[aData.Length]; Buffer.BlockCopy(aData, 0, cp.aData, 0, aData.Length); } return(cp); }
public MemPage GetAdmin(string accName, string deptId, int pageindex, int pagesize = 2) { int count = 0; List <RbacAdmin> memlist = new List <RbacAdmin>(); if (accName == null) { accName = ""; } if (deptId != "undefined")//判断是否有效 { memlist = _rbac.GetAdmin(u => u.AccName.Contains(accName) & u.DeptId.ToString() == deptId, u => u.AccName, pageindex, pagesize, out count); } else { memlist = _rbac.GetAdmin(u => u.AccName.Contains(accName), u => u.AccName, pageindex, pagesize, out count); } //成员显示 var list = from s in memlist join d in _rbac.GetDept() on s.DeptId equals d.Id select new Member() { Id = s.Id, AccNum = s.AccNum, AccName = s.AccName, AccPass = s.AccPass, AccPhone = s.AccPhone, DeptName = d.DeptName, CreateTime = s.CreateTime, UpdateTime = s.UpdateTime, IsEnable = s.IsEnable }; MemPage memPage = new MemPage { Members = list.ToList(), Count = count }; return(memPage); }
static RC allocateSpace(MemPage page, int bytes, ref uint idx) { Debug.Assert(Pager.Iswriteable(page.DBPage)); Debug.Assert(page.Bt != null); Debug.Assert(MutexEx.Held(page.Bt.Mutex)); Debug.Assert(bytes >= 0); // Minimum cell size is 4 Debug.Assert(page.Frees >= bytes); Debug.Assert(page.Overflows == 0); var usableSize = page.Bt.UsableSize; // Usable size of the page Debug.Assert(bytes < usableSize - 8); var hdr = page.HdrOffset; // Local cache of pPage.hdrOffset var data = page.Data; // Local cache of pPage.aData var frags = data[hdr + 7]; // Number of fragmented bytes on pPage Debug.Assert(page.CellOffset == hdr + 12 - 4 * (page.Leaf ? 1 : 0)); var gap = page.CellOffset + 2 * page.Cells; // First byte of gap between cell pointers and cell content var top = (uint)ConvertEx.Get2nz(data, hdr + 5); // First byte of cell content area if (gap > top) return SysEx.CORRUPT_BKPT(); RC rc; if (frags >= 60) { // Always defragment highly fragmented pages rc = defragmentPage(page); if (rc != RC.OK) return rc; top = ConvertEx.Get2nz(data, hdr + 5); } else if (gap + 2 <= top) { // Search the freelist looking for a free slot big enough to satisfy the request. The allocation is made from the first free slot in // the list that is large enough to accomadate it. int pc; for (int addr = hdr + 1; (pc = ConvertEx.Get2(data, addr)) > 0; addr = pc) { if (pc > usableSize - 4 || pc < addr + 4) return SysEx.CORRUPT_BKPT(); int size = ConvertEx.Get2(data, pc + 2); // Size of free slot if (size >= bytes) { int x = size - bytes; if (x < 4) { // Remove the slot from the free-list. Update the number of fragmented bytes within the page. data[addr + 0] = data[pc + 0]; // memcpy( data[addr], ref data[pc], 2 ); data[addr + 1] = data[pc + 1]; data[hdr + 7] = (byte)(frags + x); } else if (size + pc > usableSize) return SysEx.CORRUPT_BKPT(); else // The slot remains on the free-list. Reduce its size to account for the portion used by the new allocation. ConvertEx.Put2(data, pc + 2, x); idx = (uint)(pc + x); return RC.OK; } } } // Check to make sure there is enough space in the gap to satisfy the allocation. If not, defragment. if (gap + 2 + bytes > top) { rc = defragmentPage(page); if (rc != RC.OK) return rc; top = ConvertEx.Get2nz(data, hdr + 5); Debug.Assert(gap + bytes <= top); } // Allocate memory from the gap in between the cell pointer array and the cell content area. The btreeInitPage() call has already // validated the freelist. Given that the freelist is valid, there is no way that the allocation can extend off the end of the page. // The assert() below verifies the previous sentence. top -= (uint)bytes; ConvertEx.Put2(data, hdr + 5, top); Debug.Assert(top + bytes <= (int)page.Bt.UsableSize); idx = top; return RC.OK; }
static void ptrmapPutOvflPtr(MemPage page, byte[] cell, ref RC rcRef) { if (rcRef != RC.OK) return; Debug.Assert(cell != null); var info = new CellInfo(); btreeParseCellPtr(page, cell, ref info); Debug.Assert((info.Data + (page.IntKey ? 0 : info.Key)) == info.Payload); if (info.Overflow != 0) { Pid ovfl = ConvertEx.Get4(cell, info.Overflow); ptrmapPut(page.Bt, ovfl, PTRMAP.OVERFLOW1, page.ID, ref rcRef); } }
static ushort cellSize(MemPage page, uint cell) { return cellSizePtr(page, findCell(page, cell)); }
static ushort cellSizePtr(MemPage page, byte[] cell, uint offset_) // For C# { var info = new CellInfo(); info.Cell = C._alloc(cell.Length); Buffer.BlockCopy(cell, (int)offset_, info.Cell, 0, (int)(cell.Length - offset_)); btreeParseCellPtr(page, info.Cell, ref info); return info.Size; }
static void btreeParseCell(MemPage page, uint cell, ref CellInfo info) { parseCell(page, cell, ref info); }
static int checkTreePage(IntegrityCk check, Pid pageID, string parentContext, ref long parentMinKey, bool hasParentMinKey, ref long parentMaxKey, bool hasParentMaxKey) { var msg = new StringBuilder(100); msg.AppendFormat("Page {0}: ", pageID); // Check that the page exists var bt = check.Bt; var usableSize = (int)bt.UsableSize; if (pageID == 0) return 0; if (checkRef(check, pageID, parentContext)) return 0; RC rc; MemPage page = new MemPage(); if ((rc = btreeGetPage(bt, pageID, ref page, false)) != RC.OK) { checkAppendMsg(check, msg.ToString(), "unable to get the page. error code=%d", rc); return 0; } // Clear MemPage.isInit to make sure the corruption detection code in btreeInitPage() is executed. page.IsInit = false; if ((rc = btreeInitPage(page)) != RC.OK) { Debug.Assert(rc == RC.CORRUPT); // The only possible error from InitPage checkAppendMsg(check, msg.ToString(), "btreeInitPage() returns error code %d", rc); releasePage(page); return 0; } // Check out all the cells. Pid id; uint i; int depth = 0; long minKey = 0; long maxKey = 0; for (i = 0U; i < page.Cells && check.MaxErrors != 0; i++) { // Check payload overflow pages msg.AppendFormat("On tree page {0} cell {1}: ", pageID, i); uint cell_ = findCell(page, i); var info = new CellInfo(); btreeParseCellPtr(page, cell_, ref info); uint sizeCell = info.Data; if (!page.IntKey) sizeCell += (uint)info.Key; // For intKey pages, check that the keys are in order. else if (i == 0) minKey = maxKey = info.Key; else { if (info.Key <= maxKey) checkAppendMsg(check, msg.ToString(), "Rowid %lld out of order (previous was %lld)", info.Key, maxKey); maxKey = info.Key; } Debug.Assert(sizeCell == info.Payload); if (sizeCell > info.Local) //&& pCell[info.iOverflow]<=&pPage.aData[pBt.usableSize] { int pages = (int)(sizeCell - info.Local + usableSize - 5) / (usableSize - 4); Pid ovflID = ConvertEx.Get4(page.Data, cell_ + info.Overflow); #if !OMIT_AUTOVACUUM if (bt.AutoVacuum) checkPtrmap(check, ovflID, PTRMAP.OVERFLOW1, pageID, msg.ToString()); #endif checkList(check, false, ovflID, pages, msg.ToString()); } // Check sanity of left child page. if (!page.Leaf) { id = (Pid)ConvertEx.Get4(page.Data, cell_); #if !OMIT_AUTOVACUUM if (bt.AutoVacuum) checkPtrmap(check, id, PTRMAP.BTREE, pageID, msg.ToString()); #endif int depth2; if (i == 0) depth2 = checkTreePage(check, id, msg.ToString(), ref minKey, true, ref _nullRef_, false); else depth2 = checkTreePage(check, id, msg.ToString(), ref minKey, true, ref maxKey, true); if (i > 0 && depth2 != depth) checkAppendMsg(check, msg, "Child page depth differs"); depth = depth2; } } if (!page.Leaf) { id = (Pid)ConvertEx.Get4(page.Data, page.HdrOffset + 8); msg.AppendFormat("On page {0} at right child: ", pageID); #if !OMIT_AUTOVACUUM if (bt.AutoVacuum) checkPtrmap(check, id, PTRMAP.BTREE, pageID, msg.ToString()); #endif if (page.Cells == 0) checkTreePage(check, id, msg.ToString(), ref _nullRef_, false, ref _nullRef_, false); else checkTreePage(check, id, msg.ToString(), ref _nullRef_, false, ref maxKey, true); } // For intKey leaf pages, check that the min/max keys are in order with any left/parent/right pages. if (page.Leaf && page.IntKey) { // if we are a left child page if (hasParentMinKey) { // if we are the left most child page if (!hasParentMaxKey) { if (maxKey > parentMinKey) checkAppendMsg(check, msg, "Rowid %lld out of order (max larger than parent min of %lld)", maxKey, parentMinKey); } else { if (minKey <= parentMinKey) checkAppendMsg(check, msg, "Rowid %lld out of order (min less than parent min of %lld)", minKey, parentMinKey); if (maxKey > parentMaxKey) checkAppendMsg(check, msg, "Rowid %lld out of order (max larger than parent max of %lld)", maxKey, parentMaxKey); parentMinKey = maxKey; } } // else if we're a right child page else if (hasParentMaxKey) { if (minKey <= parentMaxKey) checkAppendMsg(check, msg, "Rowid %lld out of order (min less than parent max of %lld)", minKey, parentMaxKey); } } // Check for complete coverage of the page byte[] data = page.Data; uint hdr = page.HdrOffset; byte[] hit = PCache.PageAlloc2((int)bt.PageSize); if (hit == null) check.MallocFailed = true; else { uint contentOffset = ConvertEx.Get2nz(data, hdr + 5); Debug.Assert(contentOffset <= usableSize); // Enforced by btreeInitPage() Array.Clear(hit, (int)contentOffset, (int)(usableSize - contentOffset)); { for (uint z = contentOffset - 1; z >= 0; z--) hit[z] = 1; }// memset(hit, 1, contentOffset); uint cells = ConvertEx.Get2(data, hdr + 3); uint cellStart = hdr + 12 - 4 * (page.Leaf ? 1U : 0U); for (i = 0; i < cells; i++) { var sizeCell = 65536U; uint pc = ConvertEx.Get2(data, cellStart + i * 2); if (pc <= usableSize - 4) sizeCell = cellSizePtr(page, data, pc); if ((int)(pc + sizeCell - 1) >= usableSize) checkAppendMsg(check, (string)null, "Corruption detected in cell %d on page %d", i, pageID); else for (var j = (int)(pc + sizeCell - 1); j >= pc; j--) hit[j]++; } i = ConvertEx.Get2(data, hdr + 1); while (i > 0) { Debug.Assert(i <= usableSize - 4); // Enforced by btreeInitPage() uint size = ConvertEx.Get2(data, i + 2); Debug.Assert(i + size <= usableSize); // Enforced by btreeInitPage() uint j; for (j = i + size - 1; j >= i; j--) hit[j]++; j = ConvertEx.Get2(data, i); Debug.Assert(j == 0 || j > i + size); // Enforced by btreeInitPage() Debug.Assert(j <= usableSize - 4); // Enforced by btreeInitPage() i = j; } uint cnt; for (i = cnt = 0; i < usableSize; i++) { if (hit[i] == 0) cnt++; else if (hit[i] > 1) { checkAppendMsg(check, (string)null, "Multiple uses for byte %d of page %d", i, pageID); break; } } if (cnt != data[hdr + 7]) checkAppendMsg(check, (string)null, "Fragmentation of %d bytes reported as %d on page %d", cnt, data[hdr + 7], pageID); } PCache.PageFree2(ref hit); releasePage(page); return depth + 1; }
static int ptrmapCheckPages(MemPage[] pageSet, int pages) { for (int i = 0; i < pages; i++) { MemPage page = pageSet[i]; BtShared bt = page.Bt; Debug.Assert(page.IsInit); Pid n; PTRMAP e; for (uint j = 0U; j < page.Cells; j++) { uint z_ = findCell(page, j); CellInfo info = new CellInfo(); btreeParseCellPtr(page, z_, ref info); if (info.Overflow != 0) { Pid ovfl = ConvertEx.Get4(page.Data, z_ + info.Overflow); ptrmapGet(bt, ovfl, ref e, ref n); Debug.Assert(n == page.ID && e == PTRMAP.OVERFLOW1); } if (!page.Leaf) { Pid child = ConvertEx.Get4(page.Data, z_); ptrmapGet(bt, child, ref e, ref n); Debug.Assert(n == page.ID && e == PTRMAP.BTREE); } } if (!page.Leaf) { Pid child = ConvertEx.Get4(page.Data, page.HdrOffset + 8); ptrmapGet(bt, child, ref e, ref n); Debug.Assert(n == page.ID && e == PTRMAP.BTREE); } } return 1; }
static int NB = (NN * 2 + 1); // Total pages involved in the balance #if !OMIT_QUICKBALANCE static RC balance_quick(MemPage parent, MemPage page, byte[] space) { BtShared bt = page.Bt; // B-Tree Database Debug.Assert(MutexEx.Held(page.Bt.Mutex)); Debug.Assert(Pager.Iswriteable(parent.DBPage)); Debug.Assert(page.Overflows == 1); // This error condition is now caught prior to reaching this function if (page.Cells <= 0) return SysEx.CORRUPT_BKPT(); // Allocate a new page. This page will become the right-sibling of pPage. Make the parent page writable, so that the new divider cell // may be inserted. If both these operations are successful, proceed. MemPage newPage = new MemPage(); // Newly allocated page Pid newPageID = 0; // Page number of pNew var rc = allocateBtreePage(bt, ref newPage, ref newPageID, 0, BTALLOC.ANY); if (rc == RC.OK) { ushort out_ = 4; //byte[] out_ = &space[4]; byte[] cell = page.Ovfls[0].Cell; ushort[] sizeCell = new ushort[1]; sizeCell[0] = cellSizePtr(page, cell); Debug.Assert(Pager.Iswriteable(newPage.DBPage)); Debug.Assert(page.Data[0] == (PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF)); zeroPage(newPage, PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF); assemblePage(newPage, 1, cell, sizeCell); // If this is an auto-vacuum database, update the pointer map with entries for the new page, and any pointer from the // cell on the page to an overflow page. If either of these operations fails, the return code is set, but the contents // of the parent page are still manipulated by thh code below. That is Ok, at this point the parent page is guaranteed to // be marked as dirty. Returning an error code will cause a rollback, undoing any changes made to the parent page. #if !OMIT_AUTOVACUUM if (bt.AutoVacuum) { ptrmapPut(bt, newPageID, PTRMAP.BTREE, parent.ID, ref rc); if (sizeCell[0] > newPage.MinLocal) ptrmapPutOvflPtr(newPage, cell, ref rc); } #endif // Create a divider cell to insert into pParent. The divider cell consists of a 4-byte page number (the page number of pPage) and // a variable length key value (which must be the same value as the largest key on pPage). // // To find the largest key value on pPage, first find the right-most cell on pPage. The first two fields of this cell are the // record-length (a variable length integer at most 32-bits in size) and the key value (a variable length integer, may have any value). // The first of the while(...) loops below skips over the record-length field. The second while(...) loop copies the key value from the // cell on pPage into the pSpace buffer. uint cell_ = findCell(page, page.Cells - 1U); cell = page.Data; uint stop = cell_ + 9; while (((cell[cell_++]) & 0x80) != 0 && cell_ < stop) ; stop = cell_ + 9; while (((space[out_++] = cell[cell_++]) & 0x80) != 0 && cell_ < stop) ; // Insert the new divider cell into pParent. insertCell(parent, parent.Cells, space, out_, null, page.ID, ref rc); // Set the right-child pointer of pParent to point to the new page. ConvertEx.Put4(parent.Data, parent.HdrOffset + 8, newPageID); // Release the reference to the new page. releasePage(newPage); } return rc; }
static RC freeSpace(MemPage page, int start, int size) { Debug.Assert(page.Bt != null); Debug.Assert(Pager.Iswriteable(page.DBPage)); Debug.Assert(start >= page.HdrOffset + 6 + page.ChildPtrSize); Debug.Assert((start + size) <= (int)page.Bt.UsableSize); Debug.Assert(MutexEx.Held(page.Bt.Mutex)); Debug.Assert(size >= 0); // Minimum cell size is 4 var data = page.Data; if ((page.Bt.BtsFlags & BTS.SECURE_DELETE) != 0) // Overwrite deleted information with zeros when the secure_delete option is enabled Array.Clear(data, start, size); // Add the space back into the linked list of freeblocks. Note that even though the freeblock list was checked by btreeInitPage(), // btreeInitPage() did not detect overlapping cells or freeblocks that overlapped cells. Nor does it detect when the // cell content area exceeds the value in the page header. If these situations arise, then subsequent insert operations might corrupt // the freelist. So we do need to check for corruption while scanning the freelist. int hdr = page.HdrOffset; int addr = hdr + 1; int last = (int)page.Bt.UsableSize - 4; // Largest possible freeblock offset Debug.Assert(start <= last); int pbegin; while ((pbegin = ConvertEx.Get2(data, addr)) < start && pbegin > 0) { if (pbegin < addr + 4) return SysEx.CORRUPT_BKPT(); addr = pbegin; } if (pbegin > last) return SysEx.CORRUPT_BKPT(); Debug.Assert(pbegin > addr || pbegin == 0); ConvertEx.Put2(data, addr, start); ConvertEx.Put2(data, start, pbegin); ConvertEx.Put2(data, start + 2, size); page.Frees = (ushort)(page.Frees + size); // Coalesce adjacent free blocks addr = hdr + 1; while ((pbegin = ConvertEx.Get2(data, addr)) > 0) { Debug.Assert(pbegin > addr); Debug.Assert(pbegin <= (int)page.Bt.UsableSize - 4); int pnext = ConvertEx.Get2(data, pbegin); int psize = ConvertEx.Get2(data, pbegin + 2); if (pbegin + psize + 3 >= pnext && pnext > 0) { int frag = pnext - (pbegin + psize); if (frag < 0 || frag > (int)data[hdr + 7]) return SysEx.CORRUPT_BKPT(); data[hdr + 7] -= (byte)frag; int x = ConvertEx.Get2(data, pnext); ConvertEx.Put2(data, pbegin, x); x = pnext + ConvertEx.Get2(data, pnext + 2) - pbegin; ConvertEx.Put2(data, pbegin + 2, x); } else addr = pbegin; } // If the cell content area begins with a freeblock, remove it. if (data[hdr + 1] == data[hdr + 5] && data[hdr + 2] == data[hdr + 6]) { pbegin = ConvertEx.Get2(data, hdr + 1); ConvertEx.Put2(data, hdr + 1, ConvertEx.Get2(data, pbegin)); // memcpy( data[hdr + 1], ref data[pbegin], 2 ); int top = ConvertEx.Get2(data, hdr + 5) + ConvertEx.Get2(data, pbegin + 2); ConvertEx.Put2(data, hdr + 5, top); } Debug.Assert(Pager.Iswriteable(page.DBPage)); return RC.OK; }
static RC btreeInitPage(MemPage page) { Debug.Assert(page.Bt != null); Debug.Assert(MutexEx.Held(page.Bt.Mutex)); Debug.Assert(page.ID == Pager.get_PageID(page.DBPage)); Debug.Assert(page == Pager.GetExtra<MemPage>(page.DBPage)); Debug.Assert(page.Data == Pager.GetData(page.DBPage)); if (!page.IsInit) { var bt = page.Bt; // The main btree structure var hdr = page.HdrOffset; // Offset to beginning of page header var data = page.Data; // Equal to pPage.aData if (decodeFlags(page, data[hdr]) != RC.OK) return SysEx.CORRUPT_BKPT(); Debug.Assert(bt.PageSize >= 512 && bt.PageSize <= 65536); page.MaskPage = (ushort)(bt.PageSize - 1); page.Overflows = 0; int usableSize = (int)bt.UsableSize; // Amount of usable space on each page ushort cellOffset; // Offset from start of page to first cell pointer page.CellOffset = (cellOffset = (ushort)(hdr + 12 - 4 * (page.Leaf ? 1 : 0))); int top = ConvertEx.Get2nz(data, hdr + 5); // First byte of the cell content area page.Cells = (ushort)(ConvertEx.Get2(data, hdr + 3)); if (page.Cells > MX_CELL(bt)) // To many cells for a single page. The page must be corrupt return SysEx.CORRUPT_BKPT(); // A malformed database page might cause us to read past the end of page when parsing a cell. // // The following block of code checks early to see if a cell extends past the end of a page boundary and causes SQLITE_CORRUPT to be // returned if it does. int cellFirst = cellOffset + 2 * page.Cells; // First allowable cell or freeblock offset int cellLast = usableSize - 4; // Last possible cell or freeblock offset ushort pc; // Address of a freeblock within pPage.aData[] #if ENABLE_OVERSIZE_CELL_CHECK { if (!page.Leaf) cellLast--; for (var i = 0; i < page.Cells; i++) { pc = (ushort)ConvertEx.Get2(data, cellOffset + i * 2); if (pc < cellFirst || pc > cellLast) return SysEx.CORRUPT_BKPT(); int sz = cellSizePtr(page, data, pc); // Size of a cell if (pc + sz > usableSize) return SysEx.CORRUPT_BKPT(); } if (!page.Leaf) cellLast++; } #endif // Compute the total free space on the page pc = (ushort)ConvertEx.Get2(data, hdr + 1); int free = (ushort)(data[hdr + 7] + top); // Number of unused bytes on the page while (pc > 0) { if (pc < cellFirst || pc > cellLast) // Start of free block is off the page return SysEx.CORRUPT_BKPT(); var next = (ushort)ConvertEx.Get2(data, pc); var size = (ushort)ConvertEx.Get2(data, pc + 2); if ((next > 0 && next <= pc + size + 3) || pc + size > usableSize) // Free blocks must be in ascending order. And the last byte of the free-block must lie on the database page. return SysEx.CORRUPT_BKPT(); free = (ushort)(free + size); pc = next; } // At this point, nFree contains the sum of the offset to the start of the cell-content area plus the number of free bytes within // the cell-content area. If this is greater than the usable-size of the page, then the page must be corrupted. This check also // serves to verify that the offset to the start of the cell-content area, according to the page header, lies within the page. if (free > usableSize) return SysEx.CORRUPT_BKPT(); page.Frees = (ushort)(free - cellFirst); page.IsInit = true; } return RC.OK; }
static void copyNodeContent(MemPage from, MemPage to, ref RC rcRef) { if (rcRef == RC.OK) { BtShared bt = from.Bt; var fromData = from.Data; var toData = to.Data; int fromHdr = from.HdrOffset; int toHdr = (to.ID == 1 ? 100 : 0); Debug.Assert(from.IsInit); Debug.Assert(from.Frees >= toHdr); Debug.Assert(ConvertEx.Get2(fromData, fromHdr + 5) <= (int)bt.UsableSize); // Copy the b-tree node content from page pFrom to page pTo. int data = ConvertEx.Get2(fromData, fromHdr + 5); Buffer.BlockCopy(fromData, data, toData, data, (int)bt.UsableSize - data); Buffer.BlockCopy(fromData, fromHdr, toData, toHdr, from.CellOffset + 2 * from.Cells); // Reinitialize page pTo so that the contents of the MemPage structure match the new data. The initialization of pTo can actually fail under // fairly obscure circumstances, even though it is a copy of initialized page pFrom. to.IsInit = false; var rc = btreeInitPage(to); if (rc != RC.OK) { rcRef = rc; return; } // If this is an auto-vacuum database, update the pointer-map entries for any b-tree or overflow pages that pTo now contains the pointers to. #if !OMIT_AUTOVACUUM if (bt.AutoVacuum) rcRef = setChildPtrmaps(to); #endif } }
static void btreeParseCellPtr(MemPage page, byte[] cell, uint cellIdx, ref CellInfo info) { Debug.Assert(MutexEx.Held(page.Bt.Mutex)); if (info.Cell != cell) info.Cell = cell; info.Cell_ = cellIdx; ushort n = page.ChildPtrSize; // Number bytes in cell content header Debug.Assert(n == (page.Leaf ? 0 : 4)); uint payloadLength = 0; // Number of bytes of cell payload if (page.IntKey) { if (page.HasData) n += (ushort)ConvertEx.GetVarint32(cell, cellIdx + n, out payloadLength); else payloadLength = 0; n += (ushort)ConvertEx.GetVarint(cell, cellIdx + n, out info.Key); info.Data = payloadLength; } else { info.Data = 0; n += (ushort)ConvertEx.GetVarint32(cell, cellIdx + n, out payloadLength); info.Key = payloadLength; } info.Payload = payloadLength; info.Header = n; if (payloadLength <= page.MaxLocal) { // This is the (easy) common case where the entire payload fits on the local page. No overflow is required. if ((info.Size = (ushort)(n + payloadLength)) < 4) info.Size = 4; info.Local = (ushort)payloadLength; info.Overflow = 0; } else { // If the payload will not fit completely on the local page, we have to decide how much to store locally and how much to spill onto // overflow pages. The strategy is to minimize the amount of unused space on overflow pages while keeping the amount of local storage // in between minLocal and maxLocal. // // Warning: changing the way overflow payload is distributed in any way will result in an incompatible file format. int minLocal = page.MinLocal; // Minimum amount of payload held locally int maxLocal = page.MaxLocal; // Maximum amount of payload held locally int surplus = (int)(minLocal + (payloadLength - minLocal) % (page.Bt.UsableSize - 4)); // Overflow payload available for local storage ASSERTCOVERAGE(surplus == maxLocal); ASSERTCOVERAGE(surplus == maxLocal + 1); if (surplus <= maxLocal) info.Local = (ushort)surplus; else info.Local = (ushort)minLocal; info.Overflow = (ushort)(info.Local + n); info.Size = (ushort)(info.Overflow + 4); } }
// under C#; Try to reuse Memory static RC balance_nonroot(MemPage parent, uint parentIdx, byte[] ovflSpace, bool isRoot, bool bulk) { BtShared bt = parent.Bt; // The whole database Debug.Assert(MutexEx.Held(bt.Mutex)); Debug.Assert(Pager.Iswriteable(parent.DBPage)); #if false TRACE("BALANCE: begin page %d child of %d\n", page.ID, parent.ID); #endif // At this point pParent may have at most one overflow cell. And if this overflow cell is present, it must be the cell with // index iParentIdx. This scenario comes about when this function is called (indirectly) from sqlite3BtreeDelete(). Debug.Assert(parent.Overflows == 0 || parent.Overflows == 1); Debug.Assert(parent.Overflows == 0 || parent.OvflIdxs[0] == parentIdx); if (ovflSpace == null) return RC.NOMEM; // Find the sibling pages to balance. Also locate the cells in pParent that divide the siblings. An attempt is made to find NN siblings on // either side of pPage. More siblings are taken from one side, however, if there are fewer than NN siblings on the other side. If pParent // has NB or fewer children then all children of pParent are taken. // // This loop also drops the divider cells from the parent page. This way, the remainder of the function does not have to deal with any // overflow cells in the parent page, since if any existed they will have already been removed. uint i = (uint)parent.Overflows + parent.Cells; uint nxDiv; // Next divider slot in pParent.aCell[] if (i < 2) nxDiv = 0; else { if (parentIdx == 0) nxDiv = 0; else if (parentIdx == i) nxDiv = i - 2 + (bulk ? 1U : 0U); else { Debug.Assert(!bulk); nxDiv = parentIdx - 1; } i = 2 - (bulk ? 1U : 0U); } uint right_; // Location in parent of right-sibling pointer if ((i + nxDiv - parent.Overflows) == parent.Cells) right_ = parent.HdrOffset + 8U; else right_ = findCell(parent, i + nxDiv - parent.Overflows); Pid id = ConvertEx.Get4(parent.Data, right_); // Temp var to store a page number in RC rc; MemPage[] oldPages = new MemPage[NB]; // pPage and up to two siblings MemPage[] copyPages = new MemPage[NB]; // Private copies of apOld[] pages MemPage[] newPages = new MemPage[NB + 2]; // pPage and up to NB siblings after balancing uint oldPagesUsed = i + 1; // Number of pages in apOld[] uint newPagesUsed = 0; // Number of pages in apNew[] uint maxCells = 0; // Allocated size of apCell, szCell, aFrom. uint[] divs_ = new uint[NB - 1]; // Divider cells in pParent Pid[] countNew = new Pid[NB + 2]; // Index in aCell[] of cell after i-th page ushort[] sizeNew = new ushort[NB + 2]; // Combined size of cells place on i-th page byte[][] cell = null; // All cells begin balanced while (true) { rc = getAndInitPage(bt, id, ref oldPages[i]); if (rc != RC.OK) { //_memset(oldPages, 0, (i + 1) * sizeof(MemPage *)); goto balance_cleanup; } maxCells += 1U + oldPages[i].Cells + oldPages[i].Overflows; if (i-- == 0) break; if (i + nxDiv == parent.OvflIdxs[0] && parent.Overflows != 0) { divs_[i] = 0; //parent.Ovfls[0]; id = (Pid)ConvertEx.Get4(parent.Ovfls[0].Cell, divs_[i]); sizeNew[i] = cellSizePtr(parent, divs_[i]); parent.Overflows = 0; } else { divs_[i] = findCell(parent, i + nxDiv - parent.Overflows); id = ConvertEx.Get4(parent.Data, divs_[i]); sizeNew[i] = cellSizePtr(parent, divs_[i]); // Drop the cell from the parent page. apDiv[i] still points to the cell within the parent, even though it has been dropped. // This is safe because dropping a cell only overwrites the first four bytes of it, and this function does not need the first // four bytes of the divider cell. So the pointer is safe to use later on. // // But not if we are in secure-delete mode. In secure-delete mode, the dropCell() routine will overwrite the entire cell with zeroes. // In this case, temporarily copy the cell into the aOvflSpace[] buffer. It will be copied out again as soon as the aSpace[] buffer is allocated. //if ((bt.BtsFlags & BTS.SECURE_DELETE) != 0) //{ // int off = (int)(divs[i]) - (int)(parent.Data); // if ((off + newPages[i]) > (int)bt.UsableSize) // { // rc = SysEx.CORRUPT_BKPT(); // Array.Clear(oldPages[0].Data, 0, oldPages[0].Data.Length); // goto balance_cleanup; // } // else // { // memcpy(ovflSpace,off, divs,i,, sizeNew[i]); // divs[i] = ovflSpace[divs[i] - parent.Data]; // } //} dropCell(parent, i + nxDiv - parent.Overflows, sizeNew[i], ref rc); } } // Make nMaxCells a multiple of 4 in order to preserve 8-byte alignment maxCells = (maxCells + 3U) & ~3U; // Allocate space for memory structures uint j, k; //uint k = bt.PageSize + SysEx.ROUND8(sizeof(MemPage)); //int szScratch = // Size of scratch memory requested // maxCells * sizeof(byte *) // apCell // + maxCells * sizeof(ushort) // szCell // + bt.PageSize // aSpace1 // + k * oldPagesUsed; // Page copies (apCopy) Pid cells = 0; // Number of cells in apCell[] cell = SysEx.ScratchAlloc(cell, (int)maxCells); if (cell == null) { rc = RC.NOMEM; goto balance_cleanup; } var sizeCell = new ushort[1]; // Local size of all cells in apCell[] if (sizeCell.Length < maxCells) Array.Resize(ref sizeCell, (int)maxCells); // sizeCell = (ushort *)&cell[maxCells]; //var space1 = new byte[bt.PageSize * maxCells]; // Space for copies of dividers cells //Debug.Assert(_HASALIGNMENT8(space1)); // Load pointers to all cells on sibling pages and the divider cells into the local apCell[] array. Make copies of the divider cells // into space obtained from aSpace1[] and remove the divider cells from pParent. // // If the siblings are on leaf pages, then the child pointers of the divider cells are stripped from the cells before they are copied // into aSpace1[]. In this way, all cells in apCell[] are without child pointers. If siblings are not leaves, then all cell in // apCell[] include child pointers. Either way, all cells in apCell[] are alike. // // leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf. // leafData: 1 if pPage holds key+data and pParent holds only keys. ushort leafCorrection = (ushort)(oldPages[0].Leaf ? 4 : 0); // 4 if pPage is a leaf. 0 if not uint leafData = (oldPages[0].HasData ? 1U : 0U); // True if pPage is a leaf of a LEAFDATA tree for (i = 0U; i < oldPagesUsed; i++) { // Before doing anything else, take a copy of the i'th original sibling The rest of this function will use data from the copies rather // that the original pages since the original pages will be in the process of being overwritten. //MemPage oldPage = copyPages[i] = (MemPage *)&space1[bt.PageSize + k * i]; //_memcpy(oldPage, oldPages[i], sizeof(MemPage)); //oldPage.Data = (void *)&oldPages[1]; //_memcpy(oldPage.Data, oldPages[i].Data, bt.PageSize); MemPage oldPage = copyPages[i] = oldPages[i].memcopy(); int limit = oldPage.Cells + oldPage.Overflows; if (oldPage.Overflows > 0 || true) { for (j = 0U; j < limit; j++) { Debug.Assert(cells < maxCells); uint fofc = findOverflowCell(oldPage, j); sizeCell[cells] = cellSizePtr(oldPage, fofc); // Copy the Data Locally if (cell[cells] == null) cell[cells] = new byte[sizeCell[cells]]; else if (cell[cells].Length < sizeCell[cells]) Array.Resize(ref cell[cells], sizeCell[cells]); if (fofc < 0) // Overflow Cell Buffer.BlockCopy(oldPage.Ovfls[-(fofc + 1)].Cell, 0, cell[cells], 0, sizeCell[cells]); else Buffer.BlockCopy(oldPage.Data, (int)fofc, cell[cells], 0, sizeCell[cells]); cells++; } } //else //{ // var data = oldPage.Data; // var maskPage = oldPage.MaskPage; // var cellOffset = oldPage.CellOffset; // for (int j = 0; j < limit; j++) // { // Debug.Assert(cells < maxCells); // cell[cells] = findCellv2(data, maskPage, cellOffset, j); // sizeCell[cells] = cellSizePtr(oldPage, cell[cells]); // cells++; // } //} if (i < oldPagesUsed - 1 && leafData == 0) { var size = (ushort)sizeNew[i]; Debug.Assert(cells < maxCells); sizeCell[cells] = size; var temp = new byte[size + leafCorrection]; Debug.Assert(size <= bt.MaxLocal + 23); Buffer.BlockCopy(parent.Data, (int)divs_[i], temp, 0, size); if (cell[cells] == null || cell[cells].Length < size) Array.Resize(ref cell[cells], size); Buffer.BlockCopy(temp, leafCorrection, cell[cells], 0, size); Debug.Assert(leafCorrection == 0 || leafCorrection == 4); sizeCell[cells] = (ushort)(sizeCell[cells] - leafCorrection); if (!oldPage.Leaf) { Debug.Assert(leafCorrection == 0); Debug.Assert(oldPage.HdrOffset == 0); // The right pointer of the child page pOld becomes the left pointer of the divider cell Buffer.BlockCopy(oldPage.Data, 8, cell[cells], 0, 4); } else { Debug.Assert(leafCorrection == 4); if (sizeCell[cells] < 4) // Do not allow any cells smaller than 4 bytes. sizeCell[cells] = 4; } cells++; } } // Figure out the number of pages needed to hold all nCell cells. Store this number in "k". Also compute szNew[] which is the total // size of all cells on the i-th page and cntNew[] which is the index in apCell[] of the cell that divides page i from page i+1. // cntNew[k] should equal nCell. // // Values computed by this block: // k: The total number of sibling pages // szNew[i]: Spaced used on the i-th sibling page. // cntNew[i]: Index in apCell[] and szCell[] for the first cell to the right of the i-th sibling page. // usableSpace: Number of bytes of space available on each sibling. ushort subtotal; // Subtotal of bytes in cells on one page var usableSpace = (uint)bt.UsableSize - 12 + leafCorrection; // Bytes in pPage beyond the header for (subtotal = 0, k = i = 0; i < cells; i++) { Debug.Assert(i < maxCells); subtotal += (ushort)(sizeCell[i] + 2U); if (subtotal > usableSpace) { sizeNew[k] = (ushort)(subtotal - sizeCell[i]); countNew[k] = i; if (leafData != 0) i--; subtotal = 0; k++; if (k > NB + 1) { rc = SysEx.CORRUPT_BKPT(); goto balance_cleanup; } } } sizeNew[k] = subtotal; countNew[k] = cells; k++; // The packing computed by the previous block is biased toward the siblings on the left side. The left siblings are always nearly full, while the // right-most sibling might be nearly empty. This block of code attempts to adjust the packing of siblings to get a better balance. // // This adjustment is more than an optimization. The packing above might be so out of balance as to be illegal. For example, the right-most // sibling might be completely empty. This adjustment is not optional. for (i = k - 1; i > 0; i--) { ushort sizeRight = sizeNew[i]; // Size of sibling on the right ushort sizeLeft = sizeNew[i - 1]; // Size of sibling on the left Pid r = countNew[i - 1] - 1; // Index of right-most cell in left sibling uint d = r + 1U - leafData; // Index of first cell to the left of right sibling Debug.Assert(d < maxCells); Debug.Assert(r < maxCells); while (sizeRight == 0 || (!bulk && sizeRight + sizeCell[d] + 2 <= sizeLeft - (sizeCell[r] + 2))) { sizeRight += (ushort)(sizeCell[d] + 2U); sizeLeft -= (ushort)(sizeCell[r] + 2U); countNew[i - 1]--; r = countNew[i - 1] - 1; d = r + 1U - leafData; } sizeNew[i] = sizeRight; sizeNew[i - 1] = sizeLeft; } // Either we found one or more cells (cntnew[0])>0) or pPage is a virtual root page. A virtual root page is when the real root // page is page 1 and we are the only child of that page. // // UPDATE: The assert() below is not necessarily true if the database file is corrupt. The corruption will be detected and reported later // in this procedure so there is no need to act upon it now. #if false Debug.Assert(countNew[0] > 0 || (parent.ID == 1 && parent.Cells == 0)); #endif TRACE("BALANCE: old: %d %d %d ", oldPages[0].ID, oldPagesUsed >= 2 ? oldPages[1].ID : 0, oldPagesUsed >= 3 ? oldPages[2].ID : 0); // Allocate k new pages. Reuse old pages where possible. if (oldPages[0].ID <= 1) { rc = SysEx.CORRUPT_BKPT(); goto balance_cleanup; } int pageFlags = oldPages[0].Data[0]; // Value of pPage->aData[0] for (i = 0; i < k; i++) { var newPage = new MemPage(); if (i < oldPagesUsed) { newPage = newPages[i] = oldPages[i]; oldPages[i] = null; rc = Pager.Write(newPage.DBPage); newPagesUsed++; if (rc != RC.OK) goto balance_cleanup; } else { Debug.Assert(i > 0); rc = allocateBtreePage(bt, ref newPage, ref id, (bulk ? 1 : id), BTALLOC.ANY); if (rc != RC.OK) goto balance_cleanup; newPages[i] = newPage; newPagesUsed++; // Set the pointer-map entry for the new sibling page. #if !OMIT_AUTOVACUUM if (bt.AutoVacuum) { ptrmapPut(bt, newPage.ID, PTRMAP.BTREE, parent.ID, ref rc); if (rc != RC.OK) goto balance_cleanup; } #endif } } // Free any old pages that were not reused as new pages. while (i < oldPagesUsed) { freePage(oldPages[i], ref rc); if (rc != RC.OK) goto balance_cleanup; releasePage(oldPages[i]); oldPages[i] = null; i++; } // Put the new pages in accending order. This helps to keep entries in the disk file in order so that a scan // of the table is a linear scan through the file. That in turn helps the operating system to deliver pages // from the disk more rapidly. // // An O(n^2) insertion sort algorithm is used, but since n is never more than NB (a small constant), that should // not be a problem. // // When NB==3, this one optimization makes the database about 25% faster for large insertions and deletions. for (i = 0; i < k - 1; i++) { Pid minV = newPages[i].ID; Pid minI = (Pid)i; for (j = i + 1; j < k; j++) { if (newPages[j].ID < minV) { minI = j; minV = newPages[j].ID; } } if (minI > i) { var t = newPages[i]; newPages[i] = newPages[minI]; newPages[minI] = t; } } TRACE("new: %d(%d) %d(%d) %d(%d) %d(%d) %d(%d)\n", newPages[0].ID, sizeNew[0], newPagesUsed >= 2 ? newPages[1].ID : 0, newPagesUsed >= 2 ? sizeNew[1] : 0, newPagesUsed >= 3 ? newPages[2].ID : 0, newPagesUsed >= 3 ? sizeNew[2] : 0, newPagesUsed >= 4 ? newPages[3].ID : 0, newPagesUsed >= 4 ? sizeNew[3] : 0, newPagesUsed >= 5 ? newPages[4].ID : 0, newPagesUsed >= 5 ? sizeNew[4] : 0); Debug.Assert(Pager.Iswriteable(parent.DBPage)); ConvertEx.Put4(parent.Data, right_, newPages[newPagesUsed - 1].ID); // Evenly distribute the data in apCell[] across the new pages. Insert divider cells into pParent as necessary. int ovflSpaceID = 0; // First unused byte of aOvflSpace[] j = 0; for (i = 0; i < newPagesUsed; i++) { // Assemble the new sibling page. MemPage newPage = newPages[i]; Debug.Assert(j < maxCells); zeroPage(newPage, pageFlags); assemblePage(newPage, (int)(countNew[i] - j), cell, sizeCell, (int)j); Debug.Assert(newPage.Cells > 0 || (newPagesUsed == 1 && countNew[0] == 0)); Debug.Assert(newPage.Overflows == 0); j = countNew[i]; // If the sibling page assembled above was not the right-most sibling, insert a divider cell into the parent page. Debug.Assert(i < newPagesUsed - 1 || j == cells); if (j < cells) { Debug.Assert(j < maxCells); byte[] pCell = cell[j]; int size = sizeCell[j] + leafCorrection; byte[] pTemp = new byte[size]; //&aOvflSpace[iOvflSpace]; if (!newPage.Leaf) Buffer.BlockCopy(pCell, 0, newPage.Data, 8, 4); else if (leafData != 0) { // If the tree is a leaf-data tree, and the siblings are leaves, then there is no divider cell in apCell[]. Instead, the divider // cell consists of the integer key for the right-most cell of the sibling-page assembled above only. j--; CellInfo info = new CellInfo(); btreeParseCellPtr(newPage, cell[j], ref info); pCell = pTemp; size = 4 + ConvertEx.PutVarint(pCell, 4, (ulong)info.Key); pTemp = null; } else { { byte[] cell_4 = new byte[pCell.Length + 4]; Buffer.BlockCopy(pCell, 0, cell_4, 4, pCell.Length); pCell = cell_4; } //pCell -= 4; // Obscure case for non-leaf-data trees: If the cell at pCell was previously stored on a leaf node, and its reported size was 4 // bytes, then it may actually be smaller than this (see btreeParseCellPtr(), 4 bytes is the minimum size of // any cell). But it is important to pass the correct size to insertCell(), so reparse the cell now. // // Note that this can never happen in an SQLite data file, as all cells are at least 4 bytes. It only happens in b-trees used // to evaluate "IN (SELECT ...)" and similar clauses. if (sizeCell[j] == 4) { Debug.Assert(leafCorrection == 4); size = cellSizePtr(parent, pCell); } } ovflSpaceID += size; Debug.Assert(size <= bt.MaxLocal + 23); Debug.Assert(ovflSpaceID <= (int)bt.PageSize); insertCell(parent, nxDiv, pCell, (ushort)size, pTemp, newPage.ID, ref rc); if (rc != RC.OK) goto balance_cleanup; Debug.Assert(Pager.Iswriteable(parent.DBPage)); j++; nxDiv++; } } Debug.Assert(j == cells); Debug.Assert(oldPagesUsed > 0); Debug.Assert(newPagesUsed > 0); if ((pageFlags & PTF_LEAF) == 0) Buffer.BlockCopy(copyPages[oldPagesUsed - 1].Data, 8, newPages[newPagesUsed - 1].Data, 8, 4); //u8* zChild = &apCopy[nOld - 1].aData[8]; memcpy( apNew[nNew - 1].aData[8], zChild, 4 ); if (isRoot && parent.Cells == 0 && parent.HdrOffset <= newPages[0].Frees) { // The root page of the b-tree now contains no cells. The only sibling page is the right-child of the parent. Copy the contents of the // child page into the parent, decreasing the overall height of the b-tree structure by one. This is described as the "balance-shallower" // sub-algorithm in some documentation. // // If this is an auto-vacuum database, the call to copyNodeContent() sets all pointer-map entries corresponding to database image pages // for which the pointer is stored within the content being copied. // // The second assert below verifies that the child page is defragmented (it must be, as it was just reconstructed using assemblePage()). This // is important if the parent page happens to be page 1 of the database image. Debug.Assert(newPagesUsed == 1); Debug.Assert(newPages[0].Frees == (ConvertEx.Get2(newPages[0].Data, 5) - newPages[0].CellOffset - newPages[0].Cells * 2)); copyNodeContent(newPages[0], parent, ref rc); freePage(newPages[0], ref rc); } #if !OMIT_AUTOVACUUM else if (bt.AutoVacuum) { // Fix the pointer-map entries for all the cells that were shifted around. There are several different types of pointer-map entries that need to // be dealt with by this routine. Some of these have been set already, but many have not. The following is a summary: // // 1) The entries associated with new sibling pages that were not siblings when this function was called. These have already // been set. We don't need to worry about old siblings that were moved to the free-list - the freePage() code has taken care // of those. // // 2) The pointer-map entries associated with the first overflow page in any overflow chains used by new divider cells. These // have also already been taken care of by the insertCell() code. // // 3) If the sibling pages are not leaves, then the child pages of cells stored on the sibling pages may need to be updated. // // 4) If the sibling pages are not internal intkey nodes, then any overflow pages used by these cells may need to be updated // (internal intkey nodes never contain pointers to overflow pages). // // 5) If the sibling pages are not leaves, then the pointer-map entries for the right-child pages of each sibling may need // to be updated. // // Cases 1 and 2 are dealt with above by other code. The next block deals with cases 3 and 4 and the one after that, case 5. Since // setting a pointer map entry is a relatively expensive operation, this code only sets pointer map entries for child or overflow pages that have // actually moved between pages. MemPage newPage = newPages[0]; MemPage oldPage = copyPages[0]; uint overflows = oldPage.Overflows; Pid nextOldID = oldPage.Cells + overflows; int overflowID = (overflows != 0 ? oldPage.OvflIdxs[0] : -1); j = 0; // Current 'old' sibling page k = 0; // Current 'new' sibling page for (i = 0; i < cells; i++) { bool isDivider = false; while (i == nextOldID) { // Cell i is the cell immediately following the last cell on old sibling page j. If the siblings are not leaf pages of an // intkey b-tree, then cell i was a divider cell. Debug.Assert(j + 1 < copyPages.Length); Debug.Assert(j + 1 < oldPagesUsed); oldPage = copyPages[++j]; nextOldID = (i + (leafData == 0 ? 1U : 0U) + oldPage.Cells + oldPage.Overflows); if (oldPage.Overflows != 0) { overflows = oldPage.Overflows; overflowID = (int)(i + (leafData == 0 ? 1U : 0U) + oldPage.OvflIdxs[0]); } isDivider = (leafData == 0); } Debug.Assert(overflows > 0 || overflowID < i); Debug.Assert(overflows < 2 || oldPage.OvflIdxs[0] == oldPage.OvflIdxs[1] - 1); Debug.Assert(overflows < 3 || oldPage.OvflIdxs[1] == oldPage.OvflIdxs[2] - 1); if (i == overflowID) { isDivider = true; if (--overflows > 0) overflowID++; } if (i == countNew[k]) { // Cell i is the cell immediately following the last cell on new sibling page k. If the siblings are not leaf pages of an // intkey b-tree, then cell i is a divider cell. newPage = newPages[++k]; if (leafData == 0) continue; } Debug.Assert(j < oldPagesUsed); Debug.Assert(k < newPagesUsed); // If the cell was originally divider cell (and is not now) or an overflow cell, or if the cell was located on a different sibling // page before the balancing, then the pointer map entries associated with any child or overflow pages need to be updated. if (isDivider || oldPage.ID != newPage.ID) { if (leafCorrection == 0) ptrmapPut(bt, ConvertEx.Get4(cell[i]), PTRMAP.BTREE, newPage.ID, ref rc); if (sizeCell[i] > newPage.MinLocal) ptrmapPutOvflPtr(newPage, cell[i], ref rc); } } #endif if (leafCorrection == 0) { for (i = 0; i < newPagesUsed; i++) { uint key = ConvertEx.Get4(newPages[i].Data, 8); ptrmapPut(bt, key, PTRMAP.BTREE, newPages[i].ID, ref rc); } } #if false // The ptrmapCheckPages() contains assert() statements that verify that all pointer map pages are set correctly. This is helpful while // debugging. This is usually disabled because a corrupt database may cause an assert() statement to fail. ptrmapCheckPages(newPages, newPagesUsed); ptrmapCheckPages(parent, 1); #endif } Debug.Assert(parent.IsInit); TRACE("BALANCE: finished: old=%d new=%d cells=%d\n", oldPagesUsed, newPagesUsed, cells); // Cleanup before returning. balance_cleanup: SysEx.ScratchFree(cell); for (i = 0; i < oldPagesUsed; i++) releasePage(oldPages[i]); for (i = 0; i < newPagesUsed; i++) releasePage(newPages[i]); return rc; }
static void parseCell(MemPage page, uint cell_, ref CellInfo info) { btreeParseCellPtr(page, findCell(page, cell_), ref info); }
static RC balance_deeper(MemPage root, ref MemPage childOut) { var bt = root.Bt; // The BTree Debug.Assert(root.Overflows > 0); Debug.Assert(MutexEx.Held(bt.Mutex)); // Make pRoot, the root page of the b-tree, writable. Allocate a new page that will become the new right-child of pPage. Copy the contents // of the node stored on pRoot into the new child page. MemPage child = null; // Pointer to a new child page Pid childID = 0; // Page number of the new child page var rc = Pager.Write(root.DBPage); if (rc == RC.OK) { rc = allocateBtreePage(bt, ref child, ref childID, root.ID, BTALLOC.ANY); copyNodeContent(root, child, ref rc); #if !OMIT_AUTOVACUUM if (bt.AutoVacuum) ptrmapPut(bt, childID, PTRMAP.BTREE, root.ID, ref rc); #endif } if (rc != RC.OK) { childOut = null; releasePage(child); return rc; } Debug.Assert(Pager.Iswriteable(child.DBPage)); Debug.Assert(Pager.Iswriteable(root.DBPage)); Debug.Assert(child.Cells == root.Cells); TRACE("BALANCE: copy root %d into %d\n", root.ID, child.ID); // Copy the overflow cells from pRoot to pChild Array.Copy(root.OvflIdxs, child.OvflIdxs, root.Overflows); Array.Copy(root.Ovfls, child.Ovfls, root.Overflows); child.Overflows = root.Overflows; // Zero the contents of pRoot. Then install pChild as the right-child. zeroPage(root, child.Data[0] & ~PTF_LEAF); ConvertEx.Put4(root.Data, root.HdrOffset + 8, childID); childOut = child; return RC.OK; }
static ushort cellSizePtr(MemPage page, uint cell_) // For C# { var info = new CellInfo(); var cell2 = new byte[13]; // Minimum Size = (2 bytes of Header or (4) Child Pointer) + (maximum of) 9 bytes data if (cell_ < 0) // Overflow Cell Buffer.BlockCopy(page.Ovfls[-(cell_ + 1)].Cell, 0, cell2, 0, cell2.Length < page.Ovfls[-(cell_ + 1)].Cell.Length ? cell2.Length : page.Ovfls[-(cell_ + 1)].Cell.Length); else if (cell_ >= page.Data.Length + 1 - cell2.Length) Buffer.BlockCopy(page.Data, (int)cell_, cell2, 0, (int)(page.Data.Length - cell_)); else Buffer.BlockCopy(page.Data, (int)cell_, cell2, 0, cell2.Length); btreeParseCellPtr(page, cell2, ref info); return info.Size; }
static RC btreeCreateTable(Btree p, ref int tableID, int createTabFlags) { BtShared bt = p.Bt; Debug.Assert(p.HoldsMutex()); Debug.Assert(bt.InTransaction == TRANS.WRITE); Debug.Assert((bt.BtsFlags & BTS.READ_ONLY) == 0); RC rc; MemPage root = new MemPage(); Pid rootID = 0; #if OMIT_AUTOVACUUM rc = allocateBtreePage(bt, ref root, ref rootID, 1, BTALLOC.ANY); if (rc != RC.OK) return rc; #else if (bt.AutoVacuum) { // Creating a new table may probably require moving an existing database to make room for the new tables root page. In case this page turns // out to be an overflow page, delete all overflow page-map caches held by open cursors. invalidateAllOverflowCache(bt); // Read the value of meta[3] from the database to determine where the root page of the new table should go. meta[3] is the largest root-page // created so far, so the new root-page is (meta[3]+1). p.GetMeta(META.LARGEST_ROOT_PAGE, ref rootID); rootID++; // The new root-page may not be allocated on a pointer-map page, or the PENDING_BYTE page. while (rootID == PTRMAP_PAGENO(bt, rootID) || rootID == PENDING_BYTE_PAGE(bt)) rootID++; Debug.Assert(rootID >= 3); // Allocate a page. The page that currently resides at pgnoRoot will be moved to the allocated page (unless the allocated page happens // to reside at pgnoRoot). Pid moveID = 0; // Move a page here to make room for the root-page MemPage pageMove = new MemPage(); // The page to move to. rc = allocateBtreePage(bt, ref pageMove, ref moveID, rootID, BTALLOC.EXACT); if (rc != RC.OK) return rc; if (moveID != rootID) { releasePage(pageMove); // Move the page currently at pgnoRoot to pgnoMove. rc = btreeGetPage(bt, rootID, ref root, false); if (rc != RC.OK) return rc; // pgnoRoot is the page that will be used for the root-page of the new table (assuming an error did not occur). But we were // allocated pgnoMove. If required (i.e. if it was not allocated by extending the file), the current page at position pgnoMove // is already journaled. PTRMAP type = 0; Pid ptrPageID = 0; rc = ptrmapGet(bt, rootID, ref type, ref ptrPageID); if (type == PTRMAP.ROOTPAGE || type == PTRMAP.FREEPAGE) rc = SysEx.CORRUPT_BKPT(); if (rc != RC.OK) { releasePage(root); return rc; } Debug.Assert(type != PTRMAP.ROOTPAGE); Debug.Assert(type != PTRMAP.FREEPAGE); rc = relocatePage(bt, root, type, ptrPageID, moveID, false); releasePage(root); // Obtain the page at pgnoRoot if (rc != RC.OK) return rc; rc = btreeGetPage(bt, rootID, ref root, false); if (rc != RC.OK) return rc; rc = Pager.Write(root.DBPage); if (rc != RC.OK) { releasePage(root); return rc; } } else root = pageMove; // Update the 0pointer-map and meta-data with the new root-page number. ptrmapPut(bt, rootID, PTRMAP.ROOTPAGE, 0, ref rc); if (rc != RC.OK) { releasePage(root); return rc; } // When the new root page was allocated, page 1 was made writable in order either to increase the database filesize, or to decrement the // freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. Debug.Assert(Pager.Iswriteable(bt.Page1.DBPage)); rc = p.UpdateMeta(META.LARGEST_ROOT_PAGE, rootID); if (C._NEVER(rc != RC.OK)) { releasePage(root); return rc; } } else { rc = allocateBtreePage(bt, ref root, ref rootID, 1, BTALLOC.ANY); if (rc != RC.OK) return rc; } #endif Debug.Assert(Pager.Iswriteable(root.DBPage)); int ptfFlags; // Page-type flage for the root page of new table if ((createTabFlags & BTREE_INTKEY) != 0) ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF; else ptfFlags = PTF_ZERODATA | PTF_LEAF; zeroPage(root, ptfFlags); Pager.Unref(root.DBPage); Debug.Assert((bt.OpenFlags & OPEN.SINGLE) == 0 || rootID == 2); tableID = (int)rootID; return RC.OK; }
static ushort cellSizePtr(MemPage page, byte[] cell) { #if DEBUG // The value returned by this function should always be the same as the (CellInfo.nSize) value found by doing a full parse of the // cell. If SQLITE_DEBUG is defined, an assert() at the bottom of this function verifies that this invariant is not violated. var debuginfo = new CellInfo(); btreeParseCellPtr(page, cell, ref debuginfo); #else var debuginfo = new CellInfo(); #endif var iter = page.ChildPtrSize; uint size = 0; if (page.IntKey) { if (page.HasData) iter += ConvertEx.GetVarint32(cell, out size); // iter += ConvertEx.GetVarint32(iter, out size); else size = 0; // pIter now points at the 64-bit integer key value, a variable length integer. The following block moves pIter to point at the first byte // past the end of the key value. int end = iter + 9; // end = &pIter[9]; while (((cell[iter++]) & 0x80) != 0 && iter < end) { } // while ((iter++) & 0x80 && iter < end); } else iter += ConvertEx.GetVarint32(cell, iter, out size); //pIter += getVarint32( pIter, out nSize ); if (size > page.MaxLocal) { int minLocal = page.MinLocal; size = (uint)(minLocal + (size - minLocal) % (page.Bt.UsableSize - 4)); if (size > page.MaxLocal) size = (uint)minLocal; size += 4; } size += (uint)iter; // size += (uint32)(iter - cell); // The minimum size of any cell is 4 bytes. if (size < 4) size = 4; Debug.Assert(size == debuginfo.Size); return (ushort)size; }
//#define ptrmapPut(w,x,y,z,rc) //#define ptrmapGet(w,x,y,z) RC.OK //#define ptrmapPutOvflPtr(x, y, rc) #endif static uint findCell(MemPage page, uint cell) { return ConvertEx.Get2(page.Data, page.CellOffset + 2 * cell); }
static ushort cellSize(MemPage page, int cell) { return -1; }
static uint findOverflowCell(MemPage page, uint cell) { Debug.Assert(MutexEx.Held(page.Bt.Mutex)); for (var i = page.Overflows - 1; i >= 0; i--) { ushort k = page.OvflIdxs[i]; if (k <= cell) { if (k == cell) return (uint)-i - 1; //page.Ovfls[i]; // Negative Offset means overflow cells cell--; } } return findCell(page, (uint)cell); }
static RC defragmentPage(MemPage page) { Debug.Assert(Pager.Iswriteable(page.DBPage)); Debug.Assert(page.Bt != null); Debug.Assert(page.Bt.UsableSize <= Pager.MAX_PAGE_SIZE); Debug.Assert(page.Overflows == 0); Debug.Assert(MutexEx.Held(page.Bt.Mutex)); byte[] temp = page.Bt.Pager.get_TempSpace(); // Temp area for cell content byte[] data = page.Data; // The page data int hdr = page.HdrOffset; // Offset to the page header int cellOffset = page.CellOffset; // Offset to the cell pointer array int cells = page.Cells; // Number of cells on the page Debug.Assert(cells == ConvertEx.Get2(data, hdr + 3)); uint usableSize = page.Bt.UsableSize; // Number of usable bytes on a page uint cbrk = (uint)ConvertEx.Get2(data, hdr + 5); // Offset to the cell content area Buffer.BlockCopy(data, (int)cbrk, temp, (int)cbrk, (int)(usableSize - cbrk)); // memcpy(temp[cbrk], ref data[cbrk], usableSize - cbrk); cbrk = usableSize; ushort cellFirst = (ushort)(cellOffset + 2 * cells); // First allowable cell index ushort cellLast = (ushort)(usableSize - 4); // Last possible cell index var addr = 0; // The i-th cell pointer for (var i = 0; i < cells; i++) { addr = cellOffset + i * 2; uint pc = ConvertEx.Get2(data, addr); // Address of a i-th cell #if !ENABLE_OVERSIZE_CELL_CHECK // These conditions have already been verified in btreeInitPage() if ENABLE_OVERSIZE_CELL_CHECK is defined if (pc < cellFirst || pc > cellLast) return SysEx.CORRUPT_BKPT(); #endif Debug.Assert(pc >= cellFirst && pc <= cellLast); uint size = cellSizePtr(page, temp, pc); // Size of a cell cbrk -= size; #if ENABLE_OVERSIZE_CELL_CHECK if (cbrk < cellFirst || pc + size > usableSize) return SysEx.CORRUPT_BKPT(); #else if (cbrk < cellFirst || pc + size > usableSize) return SysEx.CORRUPT_BKPT(); #endif Debug.Assert(cbrk + size <= usableSize && cbrk >= cellFirst); Buffer.BlockCopy(temp, (int)pc, data, (int)cbrk, (int)size); ConvertEx.Put2(data, addr, cbrk); } Debug.Assert(cbrk >= cellFirst); ConvertEx.Put2(data, hdr + 5, cbrk); data[hdr + 1] = 0; data[hdr + 2] = 0; data[hdr + 7] = 0; addr = cellOffset + 2 * cells; Array.Clear(data, addr, (int)(cbrk - addr)); Debug.Assert(Pager.Iswriteable(page.DBPage)); if (cbrk - cellFirst != page.Frees) return SysEx.CORRUPT_BKPT(); return RC.OK; }
static RC clearDatabasePage(BtShared bt, Pid id, bool freePageFlag, ref int changes) { Debug.Assert(MutexEx.Held(bt.Mutex)); if (id > btreePagecount(bt)) return SysEx.CORRUPT_BKPT(); MemPage page = new MemPage(); var rc = getAndInitPage(bt, id, ref page); if (rc != RC.OK) return rc; for (uint i = 0U; i < page.Cells; i++) { uint cell_ = findCell(page, i); if (!page.Leaf) { rc = clearDatabasePage(bt, ConvertEx.Get4(page.Data, cell_), true, ref changes); if (rc != RC.OK) goto cleardatabasepage_out; } rc = clearCell(page, cell_); if (rc != RC.OK) goto cleardatabasepage_out; } if (!page.Leaf) { rc = clearDatabasePage(bt, ConvertEx.Get4(page.Data, 8), true, ref changes); if (rc != RC.OK) goto cleardatabasepage_out; } else { Debug.Assert(page.IntKey); changes += page.Cells; } if (freePageFlag) freePage(page, ref rc); else if ((rc = Pager.Write(page.DBPage)) == 0) zeroPage(page, page.Data[0] | PTF_LEAF); cleardatabasepage_out: releasePage(page); return rc; }
static RC freeSpace(MemPage page, uint start, int size) { return freeSpace(page, (int)start, size); }
static RC btreeDropTable(Btree p, Pid tableID, ref int movedID) { BtShared bt = p.Bt; Debug.Assert(p.HoldsMutex()); Debug.Assert(p.InTrans == TRANS.WRITE); // It is illegal to drop a table if any cursors are open on the database. This is because in auto-vacuum mode the backend may // need to move another root-page to fill a gap left by the deleted root page. If an open cursor was using this page a problem would occur. // // This error is caught long before control reaches this point. if (C._NEVER(bt.Cursor != null)) { BContext.ConnectionBlocked(p.Ctx, bt.Cursor.Btree.Ctx); return RC.LOCKED_SHAREDCACHE; } MemPage page = null; RC rc = btreeGetPage(bt, (Pid)tableID, ref page, false); if (rc != RC.OK) return rc; int dummy0 = 0; rc = p.ClearTable((int)tableID, ref dummy0); if (rc != RC.OK) { releasePage(page); return rc; } movedID = 0; if (tableID > 1) { #if OMIT_AUTOVACUUM freePage(page, ref rc); releasePage(page); #else if (bt.AutoVacuum) { Pid maxRootID = 0; p.GetMeta(META.LARGEST_ROOT_PAGE, ref maxRootID); if (tableID == maxRootID) { // If the table being dropped is the table with the largest root-page number in the database, put the root page on the free list. freePage(page, ref rc); releasePage(page); if (rc != RC.OK) return rc; } else { // The table being dropped does not have the largest root-page number in the database. So move the page that does into the // gap left by the deleted root-page. releasePage(page); MemPage move = new MemPage(); rc = btreeGetPage(bt, maxRootID, ref move, false); if (rc != RC.OK) return rc; rc = relocatePage(bt, move, PTRMAP.ROOTPAGE, 0, tableID, false); releasePage(move); if (rc != RC.OK) return rc; move = null; rc = btreeGetPage(bt, maxRootID, ref move, false); freePage(move, ref rc); releasePage(move); if (rc != RC.OK) return rc; movedID = (int)maxRootID; } // Set the new 'max-root-page' value in the database header. This is the old value less one, less one more if that happens to // be a root-page number, less one again if that is the PENDING_BYTE_PAGE. maxRootID--; while (maxRootID == PENDING_BYTE_PAGE(bt) || PTRMAP_ISPAGE(bt, maxRootID)) maxRootID--; Debug.Assert(maxRootID != PENDING_BYTE_PAGE(bt)); rc = p.UpdateMeta(META.LARGEST_ROOT_PAGE, maxRootID); } else { freePage(page, ref rc); releasePage(page); } #endif } else { // If sqlite3BtreeDropTable was called on page 1. This really never should happen except in a corrupt database. zeroPage(page, PTF_INTKEY | PTF_LEAF); releasePage(page); } return rc; }
static RC decodeFlags(MemPage page, int flagByte) { Debug.Assert(page.HdrOffset == (page.ID == 1 ? 100 : 0)); Debug.Assert(MutexEx.Held(page.Bt.Mutex)); page.Leaf = ((flagByte >> 3) != 0); Debug.Assert(PTF_LEAF == 1 << 3); flagByte &= ~PTF_LEAF; page.ChildPtrSize = (byte)(page.Leaf ? 0 : 4); BtShared bt = page.Bt; // A copy of pPage.pBt if (flagByte == (PTF_LEAFDATA | PTF_INTKEY)) { page.IntKey = true; page.HasData = page.Leaf; page.MaxLocal = bt.MaxLeaf; page.MinLocal = bt.MinLeaf; } else if (flagByte == PTF_ZERODATA) { page.IntKey = false; page.HasData = false; page.MaxLocal = bt.MaxLocal; page.MinLocal = bt.MinLocal; } else return SysEx.CORRUPT_BKPT(); page.Max1bytePayload = bt.Max1bytePayload; return RC.OK; }
static void btreeParseCellPtr(MemPage page, uint cellIdx, ref CellInfo info) { btreeParseCellPtr(page, page.Data, cellIdx, ref info); }
static MemPage va_arg(object[] ap, MemPage sysType) { return((MemPage)ap[vaNEXT++]); }
static void btreeParseCellPtr(MemPage page, byte[] cell, ref CellInfo info) { btreeParseCellPtr(page, cell, 0, ref info); }