internal static RC autoVacuumCommit(BtShared pBt) { var rc = RC.OK; var pPager = pBt.Pager; #if DEBUG var nRef = pPager.RefCount; #else var nRef = 0; #endif Debug.Assert(MutexEx.Held(pBt.Mutex)); Btree.invalidateAllOverflowCache(pBt); Debug.Assert(pBt.AutoVacuum); if (!pBt.IncrVacuum) { var nOrig = pBt.btreePagecount(); // Database size before freeing if (PTRMAP_ISPAGE(pBt, nOrig) || nOrig == PENDING_BYTE_PAGE(pBt)) { // It is not possible to create a database for which the final page is either a pointer-map page or the pending-byte page. If one // is encountered, this indicates corruption. return(SysEx.SQLITE_CORRUPT_BKPT()); } var nFree = (Pgno)ConvertEx.Get4(pBt.Page1.Data, 36); // Number of pages on the freelist initially var nEntry = (int)pBt.UsableSize / 5; // Number of entries on one ptrmap page var nPtrmap = (Pgno)((nFree - nOrig + PTRMAP_PAGENO(pBt, nOrig) + (Pgno)nEntry) / nEntry); // Number of PtrMap pages to be freed var nFin = nOrig - nFree - nPtrmap; // Number of pages in database after autovacuuming if (nOrig > PENDING_BYTE_PAGE(pBt) && nFin < PENDING_BYTE_PAGE(pBt)) { nFin--; } while (PTRMAP_ISPAGE(pBt, nFin) || nFin == PENDING_BYTE_PAGE(pBt)) { nFin--; } if (nFin > nOrig) { return(SysEx.SQLITE_CORRUPT_BKPT()); } for (var iFree = nOrig; iFree > nFin && rc == RC.OK; iFree--) { rc = incrVacuumStep(pBt, nFin, iFree); } if ((rc == RC.DONE || rc == RC.OK) && nFree > 0) { rc = Pager.Write(pBt.Page1.DbPage); ConvertEx.Put4(pBt.Page1.Data, 32, 0); ConvertEx.Put4(pBt.Page1.Data, 36, 0); ConvertEx.Put4(pBt.Page1.Data, 28, nFin); pBt.Pager.TruncateImage(nFin); pBt.Pages = nFin; } if (rc != RC.OK) { pPager.Rollback(); } } Debug.Assert(nRef == pPager.RefCount); return(rc); }
// was:invalidateAllOverflowCache internal static void invalidateAllOverflowCache(BtShared shared) { Debug.Assert(MutexEx.Held(shared.Mutex)); for (var cursor = shared.Cursors; cursor != null; cursor = cursor.Next) { invalidateOverflowCache(cursor); } }
internal static MemPage btreePageFromDbPage(DbPage pDbPage, Pgno pgno, BtShared pBt) { var pPage = (MemPage)Pager.sqlite3PagerGetExtra<MemPage>(pDbPage); pPage.Data = Pager.sqlite3PagerGetData(pDbPage); pPage.DbPage = pDbPage; pPage.Shared = pBt; pPage.ID = pgno; pPage.HeaderOffset = (byte)(pPage.ID == 1 ? 100 : 0); return pPage; }
internal static MemPage btreePageFromDbPage(DbPage pDbPage, Pgno pgno, BtShared pBt) { var pPage = (MemPage)Pager.sqlite3PagerGetExtra <MemPage>(pDbPage); pPage.Data = Pager.sqlite3PagerGetData(pDbPage); pPage.DbPage = pDbPage; pPage.Shared = pBt; pPage.ID = pgno; pPage.HeaderOffset = (byte)(pPage.ID == 1 ? 100 : 0); return(pPage); }
internal static RC autoVacuumCommit(BtShared pBt) { var rc = RC.OK; var pPager = pBt.Pager; #if DEBUG var nRef = pPager.RefCount; #else var nRef = 0; #endif Debug.Assert(MutexEx.Held(pBt.Mutex)); Btree.invalidateAllOverflowCache(pBt); Debug.Assert(pBt.AutoVacuum); if (!pBt.IncrVacuum) { var nOrig = pBt.btreePagecount(); // Database size before freeing if (PTRMAP_ISPAGE(pBt, nOrig) || nOrig == PENDING_BYTE_PAGE(pBt)) // It is not possible to create a database for which the final page is either a pointer-map page or the pending-byte page. If one // is encountered, this indicates corruption. return SysEx.SQLITE_CORRUPT_BKPT(); var nFree = (Pgno)ConvertEx.Get4(pBt.Page1.Data, 36); // Number of pages on the freelist initially var nEntry = (int)pBt.UsableSize / 5; // Number of entries on one ptrmap page var nPtrmap = (Pgno)((nFree - nOrig + PTRMAP_PAGENO(pBt, nOrig) + (Pgno)nEntry) / nEntry); // Number of PtrMap pages to be freed var nFin = nOrig - nFree - nPtrmap; // Number of pages in database after autovacuuming if (nOrig > PENDING_BYTE_PAGE(pBt) && nFin < PENDING_BYTE_PAGE(pBt)) nFin--; while (PTRMAP_ISPAGE(pBt, nFin) || nFin == PENDING_BYTE_PAGE(pBt)) nFin--; if (nFin > nOrig) return SysEx.SQLITE_CORRUPT_BKPT(); for (var iFree = nOrig; iFree > nFin && rc == RC.OK; iFree--) rc = incrVacuumStep(pBt, nFin, iFree); if ((rc == RC.DONE || rc == RC.OK) && nFree > 0) { rc = Pager.Write(pBt.Page1.DbPage); ConvertEx.Put4(pBt.Page1.Data, 32, 0); ConvertEx.Put4(pBt.Page1.Data, 36, 0); ConvertEx.Put4(pBt.Page1.Data, 28, nFin); pBt.Pager.TruncateImage(nFin); pBt.Pages = nFin; } if (rc != RC.OK) pPager.Rollback(); } Debug.Assert(nRef == pPager.RefCount); return rc; }
internal static RC incrVacuumStep(BtShared pBt, Pgno nFin, Pgno iLastPg) { Pgno nFreeList; // Number of pages still on the free-list Debug.Assert(MutexEx.Held(pBt.Mutex)); Debug.Assert(iLastPg > nFin); if (!PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg != PENDING_BYTE_PAGE(pBt)) { PTRMAP eType = 0; Pgno iPtrPage = 0; nFreeList = ConvertEx.Get4(pBt.Page1.Data, 36); if (nFreeList == 0) { return(RC.DONE); } var rc = pBt.ptrmapGet(iLastPg, ref eType, ref iPtrPage); if (rc != RC.OK) { return(rc); } if (eType == PTRMAP.ROOTPAGE) { return(SysEx.SQLITE_CORRUPT_BKPT()); } if (eType == PTRMAP.FREEPAGE) { if (nFin == 0) { // Remove the page from the files free-list. This is not required if nFin is non-zero. In that case, the free-list will be // truncated to zero after this function returns, so it doesn't matter if it still contains some garbage entries. Pgno iFreePg = 0; var pFreePg = new MemPage(); rc = pBt.allocateBtreePage(ref pFreePg, ref iFreePg, iLastPg, 1); if (rc != RC.OK) { return(rc); } Debug.Assert(iFreePg == iLastPg); pFreePg.releasePage(); } } else { Pgno iFreePg = 0; // Index of free page to move pLastPg to var pLastPg = new MemPage(); rc = pBt.btreeGetPage(iLastPg, ref pLastPg, 0); if (rc != RC.OK) { return(rc); } // If nFin is zero, this loop runs exactly once and page pLastPg is swapped with the first free page pulled off the free list. // On the other hand, if nFin is greater than zero, then keep looping until a free-page located within the first nFin pages of the file is found. do { var pFreePg = new MemPage(); rc = pBt.allocateBtreePage(ref pFreePg, ref iFreePg, 0, 0); if (rc != RC.OK) { pLastPg.releasePage(); return(rc); } pFreePg.releasePage(); } while (nFin != 0 && iFreePg > nFin); Debug.Assert(iFreePg < iLastPg); rc = Pager.Write(pLastPg.DbPage); if (rc == RC.OK) { rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg, (nFin != 0) ? 1 : 0); } pLastPg.releasePage(); if (rc != RC.OK) { return(rc); } } } if (nFin == 0) { iLastPg--; while (iLastPg == PENDING_BYTE_PAGE(pBt) || PTRMAP_ISPAGE(pBt, iLastPg)) { if (PTRMAP_ISPAGE(pBt, iLastPg)) { var pPg = new MemPage(); var rc = pBt.btreeGetPage(iLastPg, ref pPg, 0); if (rc != RC.OK) { return(rc); } rc = Pager.Write(pPg.DbPage); pPg.releasePage(); if (rc != RC.OK) { return(rc); } } iLastPg--; } pBt.Pager.TruncateImage(iLastPg); pBt.Pages = iLastPg; } return(RC.OK); }
internal static int MX_CELL_SIZE(BtShared pBt) { return (int)(pBt.PageSize - 8); }
// was:invalidateAllOverflowCache internal static void invalidateAllOverflowCache(BtShared shared) { Debug.Assert(MutexEx.Held(shared.Mutex)); for (var cursor = shared.Cursors; cursor != null; cursor = cursor.Next) invalidateOverflowCache(cursor); }
internal static Pgno PTRMAP_PAGENO(BtShared pBt, Pgno pgno) { return pBt.ptrmapPageno(pgno); }
internal static uint PENDING_BYTE_PAGE(BtShared pBt) { return (uint)Pager.PAGER_MJ_PGNO(pBt.Pager); }
internal static uint PENDING_BYTE_PAGE(BtShared pBt) { return((uint)Pager.PAGER_MJ_PGNO(pBt.Pager)); }
static string sqlite3BtreeIntegrityCheck( Btree p, /* The btree to be checked */ int[] aRoot, /* An array of root pages numbers for individual trees */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ ref int pnErr /* Write number of errors seen to this variable */ ) { Pgno i; int nRef; IntegrityCk sCheck = new IntegrityCk(); BtShared pBt = p.pBt; StringBuilder zErr = new StringBuilder(100);//char zErr[100]; sqlite3BtreeEnter(p); Debug.Assert(p.inTrans > TRANS_NONE && pBt.inTransaction > TRANS_NONE); nRef = sqlite3PagerRefcount(pBt.pPager); sCheck.pBt = pBt; sCheck.pPager = pBt.pPager; sCheck.nPage = btreePagecount(sCheck.pBt); sCheck.mxErr = mxErr; sCheck.nErr = 0; //sCheck.mallocFailed = 0; pnErr = 0; if (sCheck.nPage == 0) { sqlite3BtreeLeave(p); return(""); } sCheck.anRef = sqlite3Malloc(sCheck.anRef, (int)sCheck.nPage + 1); //if( !sCheck.anRef ){ // pnErr = 1; // sqlite3BtreeLeave(p); // return 0; //} // for (i = 0; i <= sCheck.nPage; i++) { sCheck.anRef[i] = 0; } i = PENDING_BYTE_PAGE(pBt); if (i <= sCheck.nPage) { sCheck.anRef[i] = 1; } sqlite3StrAccumInit(sCheck.errMsg, null, 1000, 20000); //sCheck.errMsg.useMalloc = 2; /* Check the integrity of the freelist */ checkList(sCheck, 1, (int)sqlite3Get4byte(pBt.pPage1.aData, 32), (int)sqlite3Get4byte(pBt.pPage1.aData, 36), "Main freelist: "); /* Check all the tables. */ for (i = 0; (int)i < nRoot && sCheck.mxErr != 0; i++) { if (aRoot[i] == 0) { continue; } #if !SQLITE_OMIT_AUTOVACUUM if (pBt.autoVacuum && aRoot[i] > 1) { checkPtrmap(sCheck, (u32)aRoot[i], PTRMAP_ROOTPAGE, 0, ""); } #endif checkTreePage(sCheck, aRoot[i], "List of tree roots: ", ref refNULL, ref refNULL, null, null); } /* Make sure every page in the file is referenced */ for (i = 1; i <= sCheck.nPage && sCheck.mxErr != 0; i++) { #if SQLITE_OMIT_AUTOVACUUM if (sCheck.anRef[i] == null) { checkAppendMsg(sCheck, 0, "Page %d is never used", i); } #else /* If the database supports auto-vacuum, make sure no tables contain ** references to pointer-map pages. */ if (sCheck.anRef[i] == 0 && (PTRMAP_PAGENO(pBt, i) != i || !pBt.autoVacuum)) { checkAppendMsg(sCheck, "", "Page %d is never used", i); } if (sCheck.anRef[i] != 0 && (PTRMAP_PAGENO(pBt, i) == i && pBt.autoVacuum)) { checkAppendMsg(sCheck, "", "Pointer map page %d is referenced", i); } #endif } /* Make sure this analysis did not leave any unref() pages. ** This is an internal consistency check; an integrity check ** of the integrity check. */ if (NEVER(nRef != sqlite3PagerRefcount(pBt.pPager))) { checkAppendMsg(sCheck, "", "Outstanding page count goes from %d to %d during this analysis", nRef, sqlite3PagerRefcount(pBt.pPager) ); } /* Clean up and report errors. */ sqlite3BtreeLeave(p); sCheck.anRef = null;// sqlite3_free( ref sCheck.anRef ); //if( sCheck.mallocFailed ){ // sqlite3StrAccumReset(sCheck.errMsg); // pnErr = sCheck.nErr+1; // return 0; //} pnErr = sCheck.nErr; if (sCheck.nErr == 0) { sqlite3StrAccumReset(sCheck.errMsg); } return(sqlite3StrAccumFinish(sCheck.errMsg)); }
internal static RC incrVacuumStep(BtShared pBt, Pgno nFin, Pgno iLastPg) { Pgno nFreeList; // Number of pages still on the free-list Debug.Assert(MutexEx.Held(pBt.Mutex)); Debug.Assert(iLastPg > nFin); if (!PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg != PENDING_BYTE_PAGE(pBt)) { PTRMAP eType = 0; Pgno iPtrPage = 0; nFreeList = ConvertEx.Get4(pBt.Page1.Data, 36); if (nFreeList == 0) return RC.DONE; var rc = pBt.ptrmapGet( iLastPg, ref eType, ref iPtrPage); if (rc != RC.OK) return rc; if (eType == PTRMAP.ROOTPAGE) return SysEx.SQLITE_CORRUPT_BKPT(); if (eType == PTRMAP.FREEPAGE) { if (nFin == 0) { // Remove the page from the files free-list. This is not required if nFin is non-zero. In that case, the free-list will be // truncated to zero after this function returns, so it doesn't matter if it still contains some garbage entries. Pgno iFreePg = 0; var pFreePg = new MemPage(); rc = pBt.allocateBtreePage( ref pFreePg, ref iFreePg, iLastPg, 1); if (rc != RC.OK) return rc; Debug.Assert(iFreePg == iLastPg); pFreePg.releasePage(); } } else { Pgno iFreePg = 0; // Index of free page to move pLastPg to var pLastPg = new MemPage(); rc = pBt.btreeGetPage( iLastPg, ref pLastPg, 0); if (rc != RC.OK) return rc; // If nFin is zero, this loop runs exactly once and page pLastPg is swapped with the first free page pulled off the free list. // On the other hand, if nFin is greater than zero, then keep looping until a free-page located within the first nFin pages of the file is found. do { var pFreePg = new MemPage(); rc = pBt.allocateBtreePage(ref pFreePg, ref iFreePg, 0, 0); if (rc != RC.OK) { pLastPg.releasePage(); return rc; } pFreePg.releasePage(); } while (nFin != 0 && iFreePg > nFin); Debug.Assert(iFreePg < iLastPg); rc = Pager.Write(pLastPg.DbPage); if (rc == RC.OK) rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg, (nFin != 0) ? 1 : 0); pLastPg.releasePage(); if (rc != RC.OK) return rc; } } if (nFin == 0) { iLastPg--; while (iLastPg == PENDING_BYTE_PAGE(pBt) || PTRMAP_ISPAGE(pBt, iLastPg)) { if (PTRMAP_ISPAGE(pBt, iLastPg)) { var pPg = new MemPage(); var rc = pBt.btreeGetPage(iLastPg, ref pPg, 0); if (rc != RC.OK) return rc; rc = Pager.Write(pPg.DbPage); pPg.releasePage(); if (rc != RC.OK) return rc; } iLastPg--; } pBt.Pager.TruncateImage(iLastPg); pBt.Pages = iLastPg; } return RC.OK; }
internal static RC relocatePage(BtShared pBt, MemPage pDbPage, PTRMAP eType, Pgno iPtrPage, Pgno iFreePage, int isCommit) { var pPtrPage = new MemPage(); // The page that contains a pointer to pDbPage var iDbPage = pDbPage.ID; var pPager = pBt.Pager; Debug.Assert(eType == PTRMAP.OVERFLOW2 || eType == PTRMAP.OVERFLOW1 || eType == PTRMAP.BTREE || eType == PTRMAP.ROOTPAGE); Debug.Assert(MutexEx.Held(pBt.Mutex)); Debug.Assert(pDbPage.Shared == pBt); // Move page iDbPage from its current location to page number iFreePage Btree.TRACE("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", iDbPage, iFreePage, iPtrPage, eType); var rc = pPager.sqlite3PagerMovepage(pDbPage.DbPage, iFreePage, isCommit); if (rc != RC.OK) return rc; pDbPage.ID = iFreePage; // If pDbPage was a btree-page, then it may have child pages and/or cells that point to overflow pages. The pointer map entries for all these // pages need to be changed. // If pDbPage is an overflow page, then the first 4 bytes may store a pointer to a subsequent overflow page. If this is the case, then // the pointer map needs to be updated for the subsequent overflow page. if (eType == PTRMAP.BTREE || eType == PTRMAP.ROOTPAGE) { rc = pDbPage.setChildPtrmaps(); if (rc != RC.OK) return rc; } else { var nextOvfl = (Pgno)ConvertEx.Get4(pDbPage.Data); if (nextOvfl != 0) { pBt.ptrmapPut(nextOvfl, PTRMAP.OVERFLOW2, iFreePage, ref rc); if (rc != RC.OK) return rc; } } // Fix the database pointer on page iPtrPage that pointed at iDbPage so that it points at iFreePage. Also fix the pointer map entry for iPtrPage. if (eType != PTRMAP.ROOTPAGE) { rc = pBt.btreeGetPage(iPtrPage, ref pPtrPage, 0); if (rc != RC.OK) return rc; rc = Pager.Write(pPtrPage.DbPage); if (rc != RC.OK) { pPtrPage.releasePage(); return rc; } rc = pPtrPage.modifyPagePointer(iDbPage, iFreePage, eType); pPtrPage.releasePage(); if (rc == RC.OK) pBt.ptrmapPut(iFreePage, eType, iPtrPage, ref rc); } return rc; }
public BtreeLock Locks; // Object used to lock page 1 #endif internal static int MX_CELL_SIZE(BtShared pBt) { return((int)(pBt.PageSize - 8)); }
// was:invalidateAllOverflowCache internal static void invalidateAllOverflowCache(BtShared pBt) { }
internal static Pgno PTRMAP_PAGENO(BtShared pBt, Pgno pgno) { return(pBt.ptrmapPageno(pgno)); }
internal static bool PTRMAP_ISPAGE(BtShared pBt, uint pgno) { return(PTRMAP_PAGENO((pBt), (pgno)) == (pgno)); }
internal static bool PTRMAP_ISPAGE(BtShared pBt, uint pgno) { return (PTRMAP_PAGENO((pBt), (pgno)) == (pgno)); }
// was:sqlite3BtreeOpen public static RC Open(VirtualFileSystem pVfs, string zFilename, sqlite3 db, ref Btree rTree, OPEN flags, VFSOPEN vfsFlags) { Btree p; // Handle to return var rc = RC.OK; byte nReserve; // Byte of unused space on each page var zDbHeader = new byte[100]; // Database header content // True if opening an ephemeral, temporary database */ bool isTempDb = string.IsNullOrEmpty(zFilename); // Set the variable isMemdb to true for an in-memory database, or false for a file-based database. #if SQLITE_OMIT_MEMORYDB var isMemdb = false; #else var isMemdb = (zFilename == ":memory:" || isTempDb && db.sqlite3TempInMemory()); #endif Debug.Assert(db != null); Debug.Assert(pVfs != null); Debug.Assert(MutexEx.Held(db.Mutex)); Debug.Assert(((uint)flags & 0xff) == (uint)flags); // flags fit in 8 bits // Only a BTREE_SINGLE database can be BTREE_UNORDERED Debug.Assert((flags & OPEN.UNORDERED) == 0 || (flags & OPEN.SINGLE) != 0); // A BTREE_SINGLE database is always a temporary and/or ephemeral Debug.Assert((flags & OPEN.SINGLE) == 0 || isTempDb); if ((db.flags & sqlite3b.SQLITE.NoReadlock) != 0) flags |= OPEN.NO_READLOCK; if (isMemdb) flags |= OPEN.MEMORY; if ((vfsFlags & VFSOPEN.MAIN_DB) != 0 && (isMemdb || isTempDb)) vfsFlags = (vfsFlags & ~VFSOPEN.MAIN_DB) | VFSOPEN.TEMP_DB; p = new Btree(); p.InTransaction = TRANS.NONE; p.DB = db; #if !SQLITE_OMIT_SHARED_CACHE p.Locks.Tree = p; p.Locks.TableID = 1; #endif BtShared shared = null; // Shared part of btree structure sqlite3_mutex mutexOpen = null; // Prevents a race condition. #if !SQLITE_OMIT_SHARED_CACHE && !SQLITE_OMIT_DISKIO // If this Btree is a candidate for shared cache, try to find an existing BtShared object that we can share with if (!isMemdb && !isTempDb) { if ((vfsFlags & VFSOPEN.SHAREDCACHE) != 0) { p.Sharable = true; string zPathname; rc = pVfs.xFullPathname(zFilename, out zPathname); mutexOpen = MutexEx.sqlite3MutexAlloc(MUTEX.STATIC_OPEN); MutexEx.sqlite3_mutex_enter(mutexOpen); var mutexShared = MutexEx.sqlite3MutexAlloc(MUTEX.STATIC_MASTER); MutexEx.sqlite3_mutex_enter(mutexShared); for (shared = SysEx.getGLOBAL<BtShared>(s_sqlite3SharedCacheList); shared != null; shared = shared.Next) { Debug.Assert(shared.nRef > 0); if (string.Equals(zPathname, shared.Pager.sqlite3PagerFilename) && shared.Pager.sqlite3PagerVfs == pVfs) { for (var iDb = db.DBs - 1; iDb >= 0; iDb--) { var existingTree = db.AllocDBs[iDb].Tree; if (existingTree != null && existingTree.Shared == shared) { MutexEx.sqlite3_mutex_leave(mutexShared); MutexEx.sqlite3_mutex_leave(mutexOpen); p = null; return RC.CONSTRAINT; } } p.Shared = shared; shared.nRef++; break; } } MutexEx.sqlite3_mutex_leave(mutexShared); } #if DEBUG else // In debug mode, we mark all persistent databases as sharable even when they are not. This exercises the locking code and // gives more opportunity for asserts(sqlite3_mutex_held()) statements to find locking problems. p.Sharable = true; #endif } #endif if (shared == null) { // The following asserts make sure that structures used by the btree are the right size. This is to guard against size changes that result // when compiling on a different architecture. Debug.Assert(sizeof(long) == 8 || sizeof(long) == 4); Debug.Assert(sizeof(ulong) == 8 || sizeof(ulong) == 4); Debug.Assert(sizeof(uint) == 4); Debug.Assert(sizeof(ushort) == 2); Debug.Assert(sizeof(Pgno) == 4); shared = new BtShared(); rc = Pager.Open(pVfs, out shared.Pager, zFilename, EXTRA_SIZE, (Pager.PAGEROPEN)flags, vfsFlags, pageReinit, () => new MemPage()); if (rc == RC.OK) rc = shared.Pager.ReadFileHeader(zDbHeader.Length, zDbHeader); if (rc != RC.OK) goto btree_open_out; shared.OpenFlags = flags; shared.DB = db; shared.Pager.SetBusyHandler(btreeInvokeBusyHandler, shared); p.Shared = shared; shared.Cursors = null; shared.Page1 = null; shared.ReadOnly = shared.Pager.IsReadonly; #if SQLITE_SECURE_DELETE pBt.secureDelete = true; #endif shared.PageSize = (uint)((zDbHeader[16] << 8) | (zDbHeader[17] << 16)); if (shared.PageSize < 512 || shared.PageSize > Pager.SQLITE_MAX_PAGE_SIZE || ((shared.PageSize - 1) & shared.PageSize) != 0) { shared.PageSize = 0; #if !SQLITE_OMIT_AUTOVACUUM // If the magic name ":memory:" will create an in-memory database, then leave the autoVacuum mode at 0 (do not auto-vacuum), even if // SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a // regular file-name. In this case the auto-vacuum applies as per normal. if (zFilename != string.Empty && !isMemdb) { shared.AutoVacuum = (AUTOVACUUM.DEFAULT != AUTOVACUUM.NONE); shared.IncrVacuum = (AUTOVACUUM.DEFAULT == AUTOVACUUM.INCR); } #endif nReserve = 0; } else { nReserve = zDbHeader[20]; shared.PageSizeFixed = true; #if !SQLITE_OMIT_AUTOVACUUM shared.AutoVacuum = ConvertEx.Get4(zDbHeader, 36 + 4 * 4) != 0; shared.IncrVacuum = ConvertEx.Get4(zDbHeader, 36 + 7 * 4) != 0; #endif } rc = shared.Pager.SetPageSize(ref shared.PageSize, nReserve); if (rc != RC.OK) goto btree_open_out; shared.UsableSize = (ushort)(shared.PageSize - nReserve); Debug.Assert((shared.PageSize & 7) == 0); // 8-byte alignment of pageSize #if !SQLITE_OMIT_SHARED_CACHE && !SQLITE_OMIT_DISKIO // Add the new BtShared object to the linked list sharable BtShareds. if (p.Sharable) { sqlite3_mutex mutexShared; shared.nRef = 1; mutexShared = MutexEx.sqlite3MutexAlloc(MUTEX.STATIC_MASTER); if (MutexEx.SQLITE_THREADSAFE && MutexEx.WantsCoreMutex) shared.Mutex = MutexEx.sqlite3MutexAlloc(MUTEX.FAST); MutexEx.sqlite3_mutex_enter(mutexShared); shared.Next = SysEx.getGLOBAL<BtShared>(s_sqlite3SharedCacheList); SysEx.setGLOBAL<BtShared>(s_sqlite3SharedCacheList, shared); MutexEx.sqlite3_mutex_leave(mutexShared); } #endif } #if !SQLITE_OMIT_SHARED_CACHE && !SQLITE_OMIT_DISKIO // If the new Btree uses a sharable pBtShared, then link the new Btree into the list of all sharable Btrees for the same connection. // The list is kept in ascending order by pBt address. Btree existingTree2; if (p.Sharable) for (var i = 0; i < db.DBs; i++) if ((existingTree2 = db.AllocDBs[i].Tree) != null && existingTree2.Sharable) { while (existingTree2.Prev != null) { existingTree2 = existingTree2.Prev; } if (p.Shared.Version < existingTree2.Shared.Version) { p.Next = existingTree2; p.Prev = null; existingTree2.Prev = p; } else { while (existingTree2.Next != null && existingTree2.Next.Shared.Version < p.Shared.Version) existingTree2 = existingTree2.Next; p.Next = existingTree2.Next; p.Prev = existingTree2; if (p.Next != null) p.Next.Prev = p; existingTree2.Next = p; } break; } #endif rTree = p; // btree_open_out: if (rc != RC.OK) { if (shared != null && shared.Pager != null) shared.Pager.Close(); shared = null; p = null; rTree = null; } else { // If the B-Tree was successfully opened, set the pager-cache size to the default value. Except, when opening on an existing shared pager-cache, // do not change the pager-cache size. if (p.GetSchema(0, null, null) == null) p.Shared.Pager.SetCacheSize(SQLITE_DEFAULT_CACHE_SIZE); } if (mutexOpen != null) { Debug.Assert(MutexEx.Held(mutexOpen)); MutexEx.sqlite3_mutex_leave(mutexOpen); } return rc; }
// was:sqlite3BtreeOpen public static RC Open(VirtualFileSystem pVfs, string zFilename, sqlite3 db, ref Btree rTree, OPEN flags, VFSOPEN vfsFlags) { Btree p; // Handle to return var rc = RC.OK; byte nReserve; // Byte of unused space on each page var zDbHeader = new byte[100]; // Database header content // True if opening an ephemeral, temporary database */ bool isTempDb = string.IsNullOrEmpty(zFilename); // Set the variable isMemdb to true for an in-memory database, or false for a file-based database. #if SQLITE_OMIT_MEMORYDB var isMemdb = false; #else var isMemdb = (zFilename == ":memory:" || isTempDb && db.sqlite3TempInMemory()); #endif Debug.Assert(db != null); Debug.Assert(pVfs != null); Debug.Assert(MutexEx.Held(db.Mutex)); Debug.Assert(((uint)flags & 0xff) == (uint)flags); // flags fit in 8 bits // Only a BTREE_SINGLE database can be BTREE_UNORDERED Debug.Assert((flags & OPEN.UNORDERED) == 0 || (flags & OPEN.SINGLE) != 0); // A BTREE_SINGLE database is always a temporary and/or ephemeral Debug.Assert((flags & OPEN.SINGLE) == 0 || isTempDb); if ((db.flags & sqlite3b.SQLITE.NoReadlock) != 0) flags |= OPEN.NO_READLOCK; if (isMemdb) flags |= OPEN.MEMORY; if ((vfsFlags & VFSOPEN.MAIN_DB) != 0 && (isMemdb || isTempDb)) vfsFlags = (vfsFlags & ~VFSOPEN.MAIN_DB) | VFSOPEN.TEMP_DB; p = new Btree(); p.InTransaction = TRANS.NONE; p.DB = db; #if !SQLITE_OMIT_SHARED_CACHE p.Locks.Tree = p; p.Locks.TableID = 1; #endif BtShared shared = null; // Shared part of btree structure MutexEx mutexOpen = null; // Prevents a race condition. #if !SQLITE_OMIT_SHARED_CACHE && !SQLITE_OMIT_DISKIO // If this Btree is a candidate for shared cache, try to find an existing BtShared object that we can share with if (!isMemdb && !isTempDb) { if ((vfsFlags & VFSOPEN.SHAREDCACHE) != 0) { p.Sharable = true; string zPathname; rc = pVfs.xFullPathname(zFilename, out zPathname); mutexOpen = MutexEx.Alloc(MUTEX.STATIC_OPEN); MutexEx.Enter(mutexOpen); var mutexShared = MutexEx.Alloc(MUTEX.STATIC_MASTER); MutexEx.Enter(mutexShared); for (shared = SysEx.getGLOBAL<BtShared>(s_sqlite3SharedCacheList); shared != null; shared = shared.Next) { Debug.Assert(shared.nRef > 0); if (string.Equals(zPathname, shared.Pager.sqlite3PagerFilename) && shared.Pager.sqlite3PagerVfs == pVfs) { for (var iDb = db.DBs - 1; iDb >= 0; iDb--) { var existingTree = db.AllocDBs[iDb].Tree; if (existingTree != null && existingTree.Shared == shared) { MutexEx.Leave(mutexShared); MutexEx.Leave(mutexOpen); p = null; return RC.CONSTRAINT; } } p.Shared = shared; shared.nRef++; break; } } MutexEx.Leave(mutexShared); } #if DEBUG else // In debug mode, we mark all persistent databases as sharable even when they are not. This exercises the locking code and // gives more opportunity for asserts(sqlite3_mutex_held()) statements to find locking problems. p.Sharable = true; #endif } #endif if (shared == null) { // The following asserts make sure that structures used by the btree are the right size. This is to guard against size changes that result // when compiling on a different architecture. Debug.Assert(sizeof(long) == 8 || sizeof(long) == 4); Debug.Assert(sizeof(ulong) == 8 || sizeof(ulong) == 4); Debug.Assert(sizeof(uint) == 4); Debug.Assert(sizeof(ushort) == 2); Debug.Assert(sizeof(Pgno) == 4); shared = new BtShared(); rc = Pager.Open(pVfs, out shared.Pager, zFilename, EXTRA_SIZE, (Pager.PAGEROPEN)flags, vfsFlags, pageReinit, () => new MemPage()); if (rc == RC.OK) rc = shared.Pager.ReadFileHeader(zDbHeader.Length, zDbHeader); if (rc != RC.OK) goto btree_open_out; shared.OpenFlags = flags; shared.DB = db; shared.Pager.SetBusyHandler(btreeInvokeBusyHandler, shared); p.Shared = shared; shared.Cursors = null; shared.Page1 = null; shared.ReadOnly = shared.Pager.IsReadonly; #if SQLITE_SECURE_DELETE pBt.secureDelete = true; #endif shared.PageSize = (uint)((zDbHeader[16] << 8) | (zDbHeader[17] << 16)); if (shared.PageSize < 512 || shared.PageSize > Pager.SQLITE_MAX_PAGE_SIZE || ((shared.PageSize - 1) & shared.PageSize) != 0) { shared.PageSize = 0; #if !SQLITE_OMIT_AUTOVACUUM // If the magic name ":memory:" will create an in-memory database, then leave the autoVacuum mode at 0 (do not auto-vacuum), even if // SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a // regular file-name. In this case the auto-vacuum applies as per normal. if (zFilename != string.Empty && !isMemdb) { shared.AutoVacuum = (AUTOVACUUM.DEFAULT != AUTOVACUUM.NONE); shared.IncrVacuum = (AUTOVACUUM.DEFAULT == AUTOVACUUM.INCR); } #endif nReserve = 0; } else { nReserve = zDbHeader[20]; shared.PageSizeFixed = true; #if !SQLITE_OMIT_AUTOVACUUM shared.AutoVacuum = ConvertEx.Get4(zDbHeader, 36 + 4 * 4) != 0; shared.IncrVacuum = ConvertEx.Get4(zDbHeader, 36 + 7 * 4) != 0; #endif } rc = shared.Pager.SetPageSize(ref shared.PageSize, nReserve); if (rc != RC.OK) goto btree_open_out; shared.UsableSize = (ushort)(shared.PageSize - nReserve); Debug.Assert((shared.PageSize & 7) == 0); // 8-byte alignment of pageSize #if !SQLITE_OMIT_SHARED_CACHE && !SQLITE_OMIT_DISKIO // Add the new BtShared object to the linked list sharable BtShareds. if (p.Sharable) { MutexEx mutexShared; shared.nRef = 1; mutexShared = MutexEx.Alloc(MUTEX.STATIC_MASTER); if (MutexEx.SQLITE_THREADSAFE && MutexEx.WantsCoreMutex) shared.Mutex = MutexEx.Alloc(MUTEX.FAST); MutexEx.Enter(mutexShared); shared.Next = SysEx.getGLOBAL<BtShared>(s_sqlite3SharedCacheList); SysEx.setGLOBAL<BtShared>(s_sqlite3SharedCacheList, shared); MutexEx.Leave(mutexShared); } #endif } #if !SQLITE_OMIT_SHARED_CACHE && !SQLITE_OMIT_DISKIO // If the new Btree uses a sharable pBtShared, then link the new Btree into the list of all sharable Btrees for the same connection. // The list is kept in ascending order by pBt address. Btree existingTree2; if (p.Sharable) for (var i = 0; i < db.DBs; i++) if ((existingTree2 = db.AllocDBs[i].Tree) != null && existingTree2.Sharable) { while (existingTree2.Prev != null) { existingTree2 = existingTree2.Prev; } if (p.Shared.Version < existingTree2.Shared.Version) { p.Next = existingTree2; p.Prev = null; existingTree2.Prev = p; } else { while (existingTree2.Next != null && existingTree2.Next.Shared.Version < p.Shared.Version) existingTree2 = existingTree2.Next; p.Next = existingTree2.Next; p.Prev = existingTree2; if (p.Next != null) p.Next.Prev = p; existingTree2.Next = p; } break; } #endif rTree = p; // btree_open_out: if (rc != RC.OK) { if (shared != null && shared.Pager != null) shared.Pager.Close(); shared = null; p = null; rTree = null; } else { // If the B-Tree was successfully opened, set the pager-cache size to the default value. Except, when opening on an existing shared pager-cache, // do not change the pager-cache size. if (p.GetSchema(0, null, null) == null) p.Shared.Pager.SetCacheSize(SQLITE_DEFAULT_CACHE_SIZE); } if (mutexOpen != null) { Debug.Assert(MutexEx.Held(mutexOpen)); MutexEx.Leave(mutexOpen); } return rc; }
internal static RC relocatePage(BtShared pBt, MemPage pDbPage, PTRMAP eType, Pgno iPtrPage, Pgno iFreePage, int isCommit) { var pPtrPage = new MemPage(); // The page that contains a pointer to pDbPage var iDbPage = pDbPage.ID; var pPager = pBt.Pager; Debug.Assert(eType == PTRMAP.OVERFLOW2 || eType == PTRMAP.OVERFLOW1 || eType == PTRMAP.BTREE || eType == PTRMAP.ROOTPAGE); Debug.Assert(MutexEx.Held(pBt.Mutex)); Debug.Assert(pDbPage.Shared == pBt); // Move page iDbPage from its current location to page number iFreePage Btree.TRACE("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", iDbPage, iFreePage, iPtrPage, eType); var rc = pPager.sqlite3PagerMovepage(pDbPage.DbPage, iFreePage, isCommit); if (rc != RC.OK) { return(rc); } pDbPage.ID = iFreePage; // If pDbPage was a btree-page, then it may have child pages and/or cells that point to overflow pages. The pointer map entries for all these // pages need to be changed. // If pDbPage is an overflow page, then the first 4 bytes may store a pointer to a subsequent overflow page. If this is the case, then // the pointer map needs to be updated for the subsequent overflow page. if (eType == PTRMAP.BTREE || eType == PTRMAP.ROOTPAGE) { rc = pDbPage.setChildPtrmaps(); if (rc != RC.OK) { return(rc); } } else { var nextOvfl = (Pgno)ConvertEx.Get4(pDbPage.Data); if (nextOvfl != 0) { pBt.ptrmapPut(nextOvfl, PTRMAP.OVERFLOW2, iFreePage, ref rc); if (rc != RC.OK) { return(rc); } } } // Fix the database pointer on page iPtrPage that pointed at iDbPage so that it points at iFreePage. Also fix the pointer map entry for iPtrPage. if (eType != PTRMAP.ROOTPAGE) { rc = pBt.btreeGetPage(iPtrPage, ref pPtrPage, 0); if (rc != RC.OK) { return(rc); } rc = Pager.Write(pPtrPage.DbPage); if (rc != RC.OK) { pPtrPage.releasePage(); return(rc); } rc = pPtrPage.modifyPagePointer(iDbPage, iFreePage, eType); pPtrPage.releasePage(); if (rc == RC.OK) { pBt.ptrmapPut(iFreePage, eType, iPtrPage, ref rc); } } return(rc); }
internal static int MX_CELL(BtShared pBt) { return ((int)(pBt.PageSize - 8) / 6); }