private static RC subjournalPage(PgHdr pPg) { var rc = RC.OK; var pPager = pPg.Pager; if (pPager.journalMode != JOURNALMODE.OFF) { // Open the sub-journal, if it has not already been opened Debug.Assert(pPager.useJournal != 0); Debug.Assert(pPager.jfd.IsOpen || pPager.pagerUseWal()); Debug.Assert(pPager.sjfd.IsOpen || pPager.nSubRec == 0); Debug.Assert(pPager.pagerUseWal() || pageInJournal(pPg) || pPg.ID > pPager.dbOrigSize); rc = pPager.openSubJournal(); // If the sub-journal was opened successfully (or was already open), write the journal record into the file. if (rc == RC.OK) { var pData = pPg.Data; long offset = pPager.nSubRec * (4 + pPager.pageSize); byte[] pData2 = null; if (CODEC2(pPager, pData, pPg.ID, codec_ctx.ENCRYPT_READ_CTX, ref pData2)) return RC.NOMEM; PAGERTRACE("STMT-JOURNAL {0} page {1}", PAGERID(pPager), pPg.ID); rc = pPager.sjfd.WriteByte(offset, pPg.ID); if (rc == RC.OK) rc = pPager.sjfd.Write(pData2, pPager.pageSize, offset + 4); } } if (rc == RC.OK) { pPager.nSubRec++; Debug.Assert(pPager.nSavepoint > 0); rc = pPager.addToSavepointBitvecs(pPg.ID); } return rc; }
private static void pcacheRemoveFromDirtyList(PgHdr pPage) { var p = pPage.Cache; Debug.Assert(pPage.DirtyNext != null || pPage == p.pDirtyTail); Debug.Assert(pPage.DirtyPrev != null || pPage == p.pDirty); // Update the PCache1.pSynced variable if necessary. if (p.pSynced == pPage) { var pSynced = pPage.DirtyPrev; while (pSynced != null && (pSynced.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) pSynced = pSynced.DirtyPrev; p.pSynced = pSynced; } if (pPage.DirtyNext != null) pPage.DirtyNext.DirtyPrev = pPage.DirtyPrev; else { Debug.Assert(pPage == p.pDirtyTail); p.pDirtyTail = pPage.DirtyPrev; } if (pPage.DirtyPrev != null) pPage.DirtyPrev.DirtyNext = pPage.DirtyNext; else { Debug.Assert(pPage == p.pDirty); p.pDirty = pPage.DirtyNext; } pPage.DirtyNext = null; pPage.DirtyPrev = null; #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(p)); #endif }
public PgHdr pPage1; // Reference to page 1 public void ClearState() { pDirty = null; pDirtyTail = null; pSynced = null; nRef = 0; }
private static void pcacheUnpin(PgHdr p) { var pCache = p.Cache; if (pCache.bPurgeable) { if (p.ID == 1) pCache.pPage1 = null; pCache.pCache.xUnpin(p, false); } }
// was:sqlite3PcacheMakeClean internal static void MakePageClean(PgHdr p) { if ((p.Flags & PgHdr.PGHDR.DIRTY) != 0) { pcacheRemoveFromDirtyList(p); p.Flags &= ~(PgHdr.PGHDR.DIRTY | PgHdr.PGHDR.NEED_SYNC); if (p.Refs == 0) pcacheUnpin(p); } }
// was:sqlite3PcacheMakeDirty internal static void MakePageDirty(PgHdr p) { p.Flags &= ~PgHdr.PGHDR.DONT_WRITE; Debug.Assert(p.Refs > 0); if (0 == (p.Flags & PgHdr.PGHDR.DIRTY)) { p.Flags |= PgHdr.PGHDR.DIRTY; pcacheAddToDirtyList(p); } }
// was:sqlite3PagerDontWrite public static void DontWrite(PgHdr pPg) { var pPager = pPg.Pager; if ((pPg.Flags & PgHdr.PGHDR.DIRTY) != 0 && pPager.nSavepoint == 0) { PAGERTRACE("DONT_WRITE page {0} of {1}", pPg.ID, PAGERID(pPager)); SysEx.IOTRACE("CLEAN {0:x} {1}", pPager.GetHashCode(), pPg.ID); pPg.Flags |= PgHdr.PGHDR.DONT_WRITE; pager_set_pagehash(pPg); } }
// was:sqlite3PcacheDrop internal static void DropPage(PgHdr p) { PCache pCache; Debug.Assert(p.Refs == 1); if ((p.Flags & PgHdr.PGHDR.DIRTY) != 0) pcacheRemoveFromDirtyList(p); pCache = p.Cache; pCache.nRef--; if (p.ID == 1) pCache.pPage1 = null; pCache.pCache.xUnpin(p, true); }
// was:sqlite3PcacheMove internal static void MovePage(PgHdr p, Pgno newPgno) { PCache pCache = p.Cache; Debug.Assert(p.Refs > 0); Debug.Assert(newPgno > 0); pCache.pCache.xRekey(p, p.ID, newPgno); p.ID = newPgno; if ((p.Flags & PgHdr.PGHDR.DIRTY) != 0 && (p.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) { pcacheRemoveFromDirtyList(p); pcacheAddToDirtyList(p); } }
public void ClearState() { this.Data = null; this.Extra = null; this.Dirtys = null; this.ID = 0; this.Pager = null; #if DEBUG this.PageHash = 0; #endif this.Flags = 0; this.Refs = 0; this.CacheAllocated = false; this.Cache = null; this.DirtyNext = null; this.DirtyPrev = null; this.PgHdr1 = null; }
private static void pcacheAddToDirtyList(PgHdr pPage) { var p = pPage.Cache; Debug.Assert(pPage.DirtyNext == null && pPage.DirtyPrev == null && p.pDirty != pPage); pPage.DirtyNext = p.pDirty; if (pPage.DirtyNext != null) { Debug.Assert(pPage.DirtyNext.DirtyPrev == null); pPage.DirtyNext.DirtyPrev = pPage; } p.pDirty = pPage; if (null == p.pDirtyTail) p.pDirtyTail = pPage; if (null == p.pSynced && 0 == (pPage.Flags & PgHdr.PGHDR.NEED_SYNC)) p.pSynced = pPage; #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(p)); #endif }
// was:sqlite3PcacheRelease internal static void ReleasePage(PgHdr p) { Debug.Assert(p.Refs > 0); p.Refs--; if (p.Refs == 0) { var pCache = p.Cache; pCache.nRef--; if ((p.Flags & PgHdr.PGHDR.DIRTY) == 0) { pcacheUnpin(p); } else { // Move the page to the head of the dirty list. pcacheRemoveFromDirtyList(p); pcacheAddToDirtyList(p); } } }
private static void pcacheRemoveFromDirtyList(PgHdr pPage) { var p = pPage.Cache; Debug.Assert(pPage.DirtyNext != null || pPage == p.pDirtyTail); Debug.Assert(pPage.DirtyPrev != null || pPage == p.pDirty); // Update the PCache1.pSynced variable if necessary. if (p.pSynced == pPage) { var pSynced = pPage.DirtyPrev; while (pSynced != null && (pSynced.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) { pSynced = pSynced.DirtyPrev; } p.pSynced = pSynced; } if (pPage.DirtyNext != null) { pPage.DirtyNext.DirtyPrev = pPage.DirtyPrev; } else { Debug.Assert(pPage == p.pDirtyTail); p.pDirtyTail = pPage.DirtyPrev; } if (pPage.DirtyPrev != null) { pPage.DirtyPrev.DirtyNext = pPage.DirtyNext; } else { Debug.Assert(pPage == p.pDirty); p.pDirty = pPage.DirtyNext; } pPage.DirtyNext = null; pPage.DirtyPrev = null; #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(p)); #endif }
private static RC subjournalPage(PgHdr pPg) { var rc = RC.OK; var pPager = pPg.Pager; if (pPager.journalMode != JOURNALMODE.OFF) { // Open the sub-journal, if it has not already been opened Debug.Assert(pPager.useJournal != 0); Debug.Assert(pPager.jfd.IsOpen || pPager.pagerUseWal()); Debug.Assert(pPager.sjfd.IsOpen || pPager.nSubRec == 0); Debug.Assert(pPager.pagerUseWal() || pageInJournal(pPg) || pPg.ID > pPager.dbOrigSize); rc = pPager.openSubJournal(); // If the sub-journal was opened successfully (or was already open), write the journal record into the file. if (rc == RC.OK) { var pData = pPg.Data; long offset = pPager.nSubRec * (4 + pPager.pageSize); byte[] pData2 = null; if (CODEC2(pPager, pData, pPg.ID, codec_ctx.ENCRYPT_READ_CTX, ref pData2)) { return(RC.NOMEM); } PAGERTRACE("STMT-JOURNAL {0} page {1}", PAGERID(pPager), pPg.ID); rc = pPager.sjfd.WriteByte(offset, pPg.ID); if (rc == RC.OK) { rc = pPager.sjfd.Write(pData2, pPager.pageSize, offset + 4); } } } if (rc == RC.OK) { pPager.nSubRec++; Debug.Assert(pPager.nSavepoint > 0); rc = pPager.addToSavepointBitvecs(pPg.ID); } return(rc); }
// This function is called to rollback a transaction on a WAL database. static int pagerRollbackWal(Pager pPager) { int rc; /* Return Code */ PgHdr pList; /* List of dirty pages to revert */ /* For all pages in the cache that are currently dirty or have already been written (but not committed) to the log file, do one of the ** following: ** + Discard the cached page (if refcount==0), or ** + Reload page content from the database (if refcount>0). */ pPager.dbSize = pPager.dbOrigSize; rc = sqlite3WalUndo(pPager.pWal, pagerUndoCallback, pPager); pList = sqlite3PcacheDirtyList(pPager.pPCache); while (pList && rc == SQLITE.OK) { PgHdr pNext = pList.pDirty; rc = pagerUndoCallback(pPager, pList.pgno); pList = pNext; } return(rc); }
private static PgHdr pcacheSortDirtyList(PgHdr pIn) { var a = new PgHdr[N_SORT_BUCKET]; PgHdr p; while (pIn != null) { p = pIn; pIn = p.Dirtys; p.Dirtys = null; int i; for (i = 0; Check.ALWAYS(i < N_SORT_BUCKET - 1); i++) { if (a[i] == null) { a[i] = p; break; } else { p = pcacheMergeDirtyList(a[i], p); a[i] = null; } } if (Check.NEVER(i == N_SORT_BUCKET - 1)) { // To get here, there need to be 2^(N_SORT_BUCKET) elements in the input list. But that is impossible. a[i] = pcacheMergeDirtyList(a[i], p); } } p = a[0]; for (var i = 1; i < N_SORT_BUCKET; i++) { p = pcacheMergeDirtyList(p, a[i]); } return(p); }
private static void pcacheAddToDirtyList(PgHdr pPage) { var p = pPage.Cache; Debug.Assert(pPage.DirtyNext == null && pPage.DirtyPrev == null && p.pDirty != pPage); pPage.DirtyNext = p.pDirty; if (pPage.DirtyNext != null) { Debug.Assert(pPage.DirtyNext.DirtyPrev == null); pPage.DirtyNext.DirtyPrev = pPage; } p.pDirty = pPage; if (null == p.pDirtyTail) { p.pDirtyTail = pPage; } if (null == p.pSynced && 0 == (pPage.Flags & PgHdr.PGHDR.NEED_SYNC)) { p.pSynced = pPage; } #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(p)); #endif }
private static PgHdr pcacheMergeDirtyList(PgHdr pA, PgHdr pB) { var result = new PgHdr(); var pTail = result; while (pA != null && pB != null) { if (pA.ID < pB.ID) { pTail.Dirtys = pA; pTail = pA; pA = pA.Dirtys; } else { pTail.Dirtys = pB; pTail = pB; pB = pB.Dirtys; } } if (pA != null) pTail.Dirtys = pA; else if (pB != null) pTail.Dirtys = pB; else pTail.Dirtys = null; return result.Dirtys; }
internal static int sqlite3PcachePageRefcount(PgHdr p) { return p.Refs; }
internal static void assertTruncateConstraintCb(PgHdr pPg) { }
internal static void pager_set_pagehash(PgHdr X) { }
internal static void pager_set_pagehash(PgHdr pPage) { pPage.PageHash = pager_pagehash(pPage); }
internal RC pagerWalFrames(PgHdr w, Pgno x, int y, VirtualFile.SYNC z) { return(RC.OK); }
private static bool pageInJournal(PgHdr pPg) { return pPg.Pager.pInJournal.Get(pPg.ID) != 0; }
private static bool subjRequiresPage(PgHdr pPg) { var pgno = pPg.ID; var pPager = pPg.Pager; for (var i = 0; i < pPager.nSavepoint; i++) { var p = pPager.aSavepoint[i]; if (p.nOrig >= pgno && p.pInSavepoint.Get(pgno) == 0) return true; } return false; }
internal static int sqlite3PcachePageRefcount(PgHdr p) { return(p.Refs); }
// was:sqlite3PcacheFetch internal RC FetchPage(Pgno pgno, int createFlag, ref PgHdr ppPage) { Debug.Assert(createFlag == 1 || createFlag == 0); Debug.Assert(pgno > 0); // If the pluggable cache (sqlite3_pcache*) has not been allocated, allocate it now. if (pCache == null && createFlag != 0) { var nByte = szPage + szExtra + 0; var p = IPCache.xCreate(nByte, bPurgeable); p.xCachesize(nMax); pCache = p; } var eCreate = createFlag * (1 + ((!bPurgeable || null == pDirty) ? 1 : 0)); PgHdr pPage = null; if (pCache != null) { pPage = pCache.xFetch(pgno, eCreate); } if (pPage == null && eCreate == 1) { PgHdr pPg; // Find a dirty page to write-out and recycle. First try to find a page that does not require a journal-sync (one with PGHDR_NEED_SYNC // cleared), but if that is not possible settle for any other unreferenced dirty page. #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(pCache)); #endif for (pPg = pSynced; pPg != null && (pPg.Refs != 0 || (pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0); pPg = pPg.DirtyPrev) { ; } pSynced = pPg; if (pPg == null) { for (pPg = pDirtyTail; pPg != null && pPg.Refs != 0; pPg = pPg.DirtyPrev) { ; } } if (pPg != null) { #if SQLITE_LOG_CACHE_SPILL sqlite3_log(SQLITE_FULL, "spill page %d making room for %d - cache used: %d/%d", pPg.pgno, pgno, sqlite3GlobalConfig.pcache.xPagecount(pCache.pCache), pCache.nMax); #endif var rc = xStress(pStress, pPg); if (rc != RC.OK && rc != RC.BUSY) { return(rc); } } pPage = pCache.xFetch(pgno, 2); } if (pPage != null) { if (pPage.Data == null) { pPage.Data = MallocEx.sqlite3Malloc(pCache.szPage); pPage.Cache = this; pPage.ID = pgno; } Debug.Assert(pPage.Cache == this); Debug.Assert(pPage.ID == pgno); if (pPage.Refs == 0) { nRef++; } pPage.Refs++; if (pgno == 1) { pPage1 = pPage; } } ppPage = pPage; return(pPage == null && eCreate != 0 ? RC.NOMEM : RC.OK); }
private static PgHdr pcacheSortDirtyList(PgHdr pIn) { var a = new PgHdr[N_SORT_BUCKET]; PgHdr p; while (pIn != null) { p = pIn; pIn = p.Dirtys; p.Dirtys = null; int i; for (i = 0; Check.ALWAYS(i < N_SORT_BUCKET - 1); i++) { if (a[i] == null) { a[i] = p; break; } else { p = pcacheMergeDirtyList(a[i], p); a[i] = null; } } if (Check.NEVER(i == N_SORT_BUCKET - 1)) // To get here, there need to be 2^(N_SORT_BUCKET) elements in the input list. But that is impossible. a[i] = pcacheMergeDirtyList(a[i], p); } p = a[0]; for (var i = 1; i < N_SORT_BUCKET; i++) p = pcacheMergeDirtyList(p, a[i]); return p; }
internal RC ptrmapGet(Pgno key, ref PTRMAP pEType, ref Pgno pPgno) { Debug.Assert(MutexEx.Held(this.Mutex)); var iPtrmap = (int)MemPage.PTRMAP_PAGENO(this, key); var pDbPage = new PgHdr(); // The pointer map page var rc = this.Pager.Get((Pgno)iPtrmap, ref pDbPage); if (rc != RC.OK) return rc; var pPtrmap = Pager.sqlite3PagerGetData(pDbPage);// Pointer map page data var offset = (int)MemPage.PTRMAP_PTROFFSET((Pgno)iPtrmap, key); if (offset < 0) { Pager.Unref(pDbPage); return SysEx.SQLITE_CORRUPT_BKPT(); } Debug.Assert(offset <= (int)this.UsableSize - 5); var v = pPtrmap[offset]; if (v < 1 || v > 5) return SysEx.SQLITE_CORRUPT_BKPT(); pEType = (PTRMAP)v; pPgno = ConvertEx.Get4(pPtrmap, offset + 1); Pager.Unref(pDbPage); return RC.OK; }
// was:sqlite3PagerWrite public static RC Write(DbPage pDbPage) { var rc = RC.OK; var pPg = pDbPage; var pPager = pPg.Pager; var nPagePerSector = (uint)(pPager.sectorSize / pPager.pageSize); Debug.Assert(pPager.eState >= PAGER.WRITER_LOCKED); Debug.Assert(pPager.eState != PAGER.ERROR); Debug.Assert(pPager.assert_pager_state()); if (nPagePerSector > 1) { Pgno nPageCount = 0; // Total number of pages in database file Pgno pg1; // First page of the sector pPg is located on. Pgno nPage = 0; // Number of pages starting at pg1 to journal bool needSync = false; // True if any page has PGHDR_NEED_SYNC // Set the doNotSyncSpill flag to 1. This is because we cannot allow a journal header to be written between the pages journaled by // this function. Debug.Assert( #if SQLITE_OMIT_MEMORYDB 0 == MEMDB #else 0 == pPager.memDb #endif ); Debug.Assert(pPager.doNotSyncSpill == 0); pPager.doNotSyncSpill++; // This trick assumes that both the page-size and sector-size are an integer power of 2. It sets variable pg1 to the identifier // of the first page of the sector pPg is located on. pg1 = (Pgno)((pPg.ID - 1) & ~(nPagePerSector - 1)) + 1; nPageCount = pPager.dbSize; if (pPg.ID > nPageCount) { nPage = (pPg.ID - pg1) + 1; } else if ((pg1 + nPagePerSector - 1) > nPageCount) { nPage = nPageCount + 1 - pg1; } else { nPage = nPagePerSector; } Debug.Assert(nPage > 0); Debug.Assert(pg1 <= pPg.ID); Debug.Assert((pg1 + nPage) > pPg.ID); for (var ii = 0; ii < nPage && rc == RC.OK; ii++) { var pg = (Pgno)(pg1 + ii); var pPage = new PgHdr(); if (pg == pPg.ID || pPager.pInJournal.Get(pg) == 0) { if (pg != ((VirtualFile.PENDING_BYTE / (pPager.pageSize)) + 1)) { rc = pPager.Get(pg, ref pPage); if (rc == RC.OK) { rc = pager_write(pPage); if ((pPage.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) { needSync = true; } Unref(pPage); } } } else if ((pPage = pPager.pager_lookup(pg)) != null) { if ((pPage.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) { needSync = true; } Unref(pPage); } } // If the PGHDR_NEED_SYNC flag is set for any of the nPage pages starting at pg1, then it needs to be set for all of them. Because // writing to any of these nPage pages may damage the others, the journal file must contain sync()ed copies of all of them // before any of them can be written out to the database file. if (rc == RC.OK && needSync) { Debug.Assert( #if SQLITE_OMIT_MEMORYDB 0 == MEMDB #else 0 == pPager.memDb #endif ); for (var ii = 0; ii < nPage; ii++) { var pPage = pPager.pager_lookup((Pgno)(pg1 + ii)); if (pPage != null) { pPage.Flags |= PgHdr.PGHDR.NEED_SYNC; Unref(pPage); } } } Debug.Assert(pPager.doNotSyncSpill == 1); pPager.doNotSyncSpill--; } else { rc = pager_write(pDbPage); } return(rc); }
internal void ptrmapPut(Pgno key, PTRMAP eType, Pgno parent, ref RC rRC) { if (rRC != RC.OK) return; Debug.Assert(MutexEx.Held(this.Mutex)); // The master-journal page number must never be used as a pointer map page Debug.Assert(!MemPage.PTRMAP_ISPAGE(this, MemPage.PENDING_BYTE_PAGE(this))); Debug.Assert(this.AutoVacuum); if (key == 0) { rRC = SysEx.SQLITE_CORRUPT_BKPT(); return; } var iPtrmap = MemPage.PTRMAP_PAGENO(this, key); var pDbPage = new PgHdr(); // The pointer map page var rc = this.Pager.Get(iPtrmap, ref pDbPage); if (rc != RC.OK) { rRC = rc; return; } var offset = (int)MemPage.PTRMAP_PTROFFSET(iPtrmap, key); if (offset < 0) { rRC = SysEx.SQLITE_CORRUPT_BKPT(); goto ptrmap_exit; } Debug.Assert(offset <= (int)this.UsableSize - 5); var pPtrmap = Pager.sqlite3PagerGetData(pDbPage); // The pointer map data if (eType != (PTRMAP)pPtrmap[offset] || ConvertEx.Get4(pPtrmap, offset + 1) != parent) { Btree.TRACE("PTRMAP_UPDATE: {0}->({1},{2})", key, eType, parent); rRC = rc = Pager.Write(pDbPage); if (rc == RC.OK) { pPtrmap[offset] = (byte)eType; ConvertEx.Put4L(pPtrmap, offset + 1, parent); } } ptrmap_exit: Pager.Unref(pDbPage); }
/* ** This function is called by the wal module when writing page content ** into the log file. ** ** This function returns a pointer to a buffer containing the encrypted ** page content. If a malloc fails, this function may return NULL. */ void sqlite3PagerCodec(PgHdr *pPg) { voidaData = 0; CODEC2(pPg->pPager, pPg->pData, pPg->pgno, 6, return 0, aData); return aData; }
// was:sqlite3PcacheRef internal static void AddPageRef(PgHdr p) { Debug.Assert(p.Refs > 0); p.Refs++; }
internal static uint pager_pagehash(PgHdr pPage) { return(pager_datahash(pPage.Pager.pageSize, pPage.Data)); }
private RC pager_write_pagelist(PgHdr pList) { var rc = RC.OK; // This function is only called for rollback pagers in WRITER_DBMOD state. Debug.Assert(!pagerUseWal()); Debug.Assert(this.eState == PAGER.WRITER_DBMOD); Debug.Assert(this.eLock == VFSLOCK.EXCLUSIVE); // If the file is a temp-file has not yet been opened, open it now. It is not possible for rc to be other than SQLITE.OK if this branch // is taken, as pager_wait_on_lock() is a no-op for temp-files. if (!this.fd.IsOpen) { Debug.Assert(this.tempFile && rc == RC.OK); rc = pagerOpentemp(ref this.fd, this.vfsFlags); } // Before the first write, give the VFS a hint of what the final file size will be. Debug.Assert(rc != RC.OK || this.fd.IsOpen); if (rc == RC.OK && this.dbSize > this.dbHintSize) { long szFile = this.pageSize * (long)this.dbSize; this.fd.SetFileControl(VirtualFile.FCNTL.SIZE_HINT, ref szFile); this.dbHintSize = this.dbSize; } while (rc == RC.OK && pList) { var pgno = pList.ID; // If there are dirty pages in the page cache with page numbers greater than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to // make the file smaller (presumably by auto-vacuum code). Do not write any such pages to the file. // Also, do not write out any page that has the PGHDR_DONT_WRITE flag set (set by sqlite3PagerDontWrite()). if (pList.ID <= this.dbSize && 0 == (pList.Flags & PgHdr.PGHDR.DONT_WRITE)) { Debug.Assert((pList.Flags & PgHdr.PGHDR.NEED_SYNC) == 0); if (pList.ID == 1) { pager_write_changecounter(pList); } // Encode the database byte[] pData = null; // Data to write if (CODEC2(this, pList.Data, pgno, codec_ctx.ENCRYPT_WRITE_CTX, ref pData)) { return(RC.NOMEM); } // Write out the page data. long offset = (pList.ID - 1) * (long)this.pageSize; // Offset to write rc = this.fd.Write(pData, this.pageSize, offset); // If page 1 was just written, update Pager.dbFileVers to match the value now stored in the database file. If writing this // page caused the database file to grow, update dbFileSize. if (pgno == 1) { Buffer.BlockCopy(pData, 24, this.dbFileVers, 0, this.dbFileVers.Length); } if (pgno > this.dbFileSize) { this.dbFileSize = pgno; } // Update any backup objects copying the contents of this pager. if (this.pBackup != null) { this.pBackup.sqlite3BackupUpdate(pgno, pList.Data); } PAGERTRACE("STORE {0} page {1} hash({2,08:x})", PAGERID(this), pgno, pager_pagehash(pList)); SysEx.IOTRACE("PGOUT {0:x} {1}", this.GetHashCode(), pgno); } else { PAGERTRACE("NOSTORE {0} page {1}", PAGERID(this), pgno); } pager_set_pagehash(pList); pList = pList.Dirtys; } return(rc); }
internal static uint pager_pagehash(PgHdr X) { return(0); }
private RC pager_incr_changecounter(bool isDirectMode) { var rc = RC.OK; Debug.Assert(this.eState == PAGER.WRITER_CACHEMOD || this.eState == PAGER.WRITER_DBMOD); Debug.Assert(assert_pager_state()); // Declare and initialize constant integer 'isDirect'. If the atomic-write optimization is enabled in this build, then isDirect // is initialized to the value passed as the isDirectMode parameter to this function. Otherwise, it is always set to zero. // The idea is that if the atomic-write optimization is not enabled at compile time, the compiler can omit the tests of // 'isDirect' below, as well as the block enclosed in the "if( isDirect )" condition. #if !SQLITE_ENABLE_ATOMIC_WRITE var DIRECT_MODE = false; Debug.Assert(!isDirectMode); SysEx.UNUSED_PARAMETER(isDirectMode); #else var DIRECT_MODE = isDirectMode; #endif if (!this.changeCountDone && this.dbSize > 0) { PgHdr pPgHdr = null; // Reference to page 1 Debug.Assert(!this.tempFile && this.fd.IsOpen); // Open page 1 of the file for writing. rc = Get(1, ref pPgHdr); Debug.Assert(pPgHdr == null || rc == RC.OK); // If page one was fetched successfully, and this function is not operating in direct-mode, make page 1 writable. When not in // direct mode, page 1 is always held in cache and hence the PagerGet() above is always successful - hence the ALWAYS on rc==SQLITE.OK. if (!DIRECT_MODE && Check.ALWAYS(rc == RC.OK)) { rc = Write(pPgHdr); } if (rc == RC.OK) { // Actually do the update of the change counter pager_write_changecounter(pPgHdr); // If running in direct mode, write the contents of page 1 to the file. if (DIRECT_MODE) { byte[] zBuf = null; Debug.Assert(this.dbFileSize > 0); if (CODEC2(this, pPgHdr.Data, 1, codec_ctx.ENCRYPT_WRITE_CTX, ref zBuf)) { return(rc = RC.NOMEM); } if (rc == RC.OK) { rc = this.fd.Write(zBuf, this.pageSize, 0); } if (rc == RC.OK) { this.changeCountDone = true; } } else { this.changeCountDone = true; } } // Release the page reference. Unref(pPgHdr); } return(rc); }
internal static void assertTruncateConstraintCb(PgHdr pPg) { Debug.Assert((pPg.Flags & PgHdr.PGHDR.DIRTY) != 0); Debug.Assert(!subjRequiresPage(pPg) || pPg.ID <= pPg.Pager.dbSize); }
private static RC pagerStress(object p, PgHdr pPg) { var pPager = (Pager)p; var rc = RC.OK; Debug.Assert(pPg.Pager == pPager); Debug.Assert((pPg.Flags & PgHdr.PGHDR.DIRTY) != 0); // The doNotSyncSpill flag is set during times when doing a sync of journal (and adding a new header) is not allowed. This occurs // during calls to sqlite3PagerWrite() while trying to journal multiple pages belonging to the same sector. // The doNotSpill flag inhibits all cache spilling regardless of whether or not a sync is required. This is set during a rollback. // Spilling is also prohibited when in an error state since that could lead to database corruption. In the current implementaton it // is impossible for sqlite3PCacheFetch() to be called with createFlag==1 while in the error state, hence it is impossible for this routine to // be called in the error state. Nevertheless, we include a NEVER() test for the error state as a safeguard against future changes. if (Check.NEVER(pPager.errCode != 0)) { return(RC.OK); } if (pPager.doNotSpill != 0) { return(RC.OK); } if (pPager.doNotSyncSpill != 0 && (pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) { return(RC.OK); } pPg.Dirtys = null; if (pPager.pagerUseWal()) { // Write a single frame for this page to the log. if (subjRequiresPage(pPg)) { rc = subjournalPage(pPg); } if (rc == RC.OK) { rc = pPager.pagerWalFrames(pPg, 0, 0, 0); } } else { // Sync the journal file if required. if ((pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0 || pPager.eState == PAGER.WRITER_CACHEMOD) { rc = pPager.syncJournal(1); } // If the page number of this page is larger than the current size of the database image, it may need to be written to the sub-journal. // This is because the call to pager_write_pagelist() below will not actually write data to the file in this case. // Consider the following sequence of events: // BEGIN; // <journal page X> // <modify page X> // SAVEPOINT sp; // <shrink database file to Y pages> // pagerStress(page X) // ROLLBACK TO sp; // If (X>Y), then when pagerStress is called page X will not be written out to the database file, but will be dropped from the cache. Then, // following the "ROLLBACK TO sp" statement, reading page X will read data from the database file. This will be the copy of page X as it // was when the transaction started, not as it was when "SAVEPOINT sp" was executed. // The solution is to write the current data for page X into the sub-journal file now (if it is not already there), so that it will // be restored to its current value when the "ROLLBACK TO sp" is executed. if (Check.NEVER(rc == RC.OK && pPg.ID > pPager.dbSize && subjRequiresPage(pPg))) { rc = subjournalPage(pPg); } // Write the contents of the page out to the database file. if (rc == RC.OK) { Debug.Assert((pPg.Flags & PgHdr.PGHDR.NEED_SYNC) == 0); rc = pPager.pager_write_pagelist(pPg); } } // Mark the page as clean. if (rc == RC.OK) { PAGERTRACE("STRESS {0} page {1}", PAGERID(pPager), pPg.ID); PCache.MakePageClean(pPg); } return(pPager.pager_error(rc)); }
// was:sqlite3PcacheRelease internal static void ReleasePage(PgHdr p) { Debug.Assert(p.Refs > 0); p.Refs--; if (p.Refs == 0) { var pCache = p.Cache; pCache.nRef--; if ((p.Flags & PgHdr.PGHDR.DIRTY) == 0) pcacheUnpin(p); else { // Move the page to the head of the dirty list. pcacheRemoveFromDirtyList(p); pcacheAddToDirtyList(p); } } }
private RC pager_write_pagelist(PgHdr pList) { var rc = RC.OK; // This function is only called for rollback pagers in WRITER_DBMOD state. Debug.Assert(!pagerUseWal()); Debug.Assert(this.eState == PAGER.WRITER_DBMOD); Debug.Assert(this.eLock == VFSLOCK.EXCLUSIVE); // If the file is a temp-file has not yet been opened, open it now. It is not possible for rc to be other than SQLITE.OK if this branch // is taken, as pager_wait_on_lock() is a no-op for temp-files. if (!this.fd.IsOpen) { Debug.Assert(this.tempFile && rc == RC.OK); rc = pagerOpentemp(ref this.fd, this.vfsFlags); } // Before the first write, give the VFS a hint of what the final file size will be. Debug.Assert(rc != RC.OK || this.fd.IsOpen); if (rc == RC.OK && this.dbSize > this.dbHintSize) { long szFile = this.pageSize * (long)this.dbSize; this.fd.SetFileControl(VirtualFile.FCNTL.SIZE_HINT, ref szFile); this.dbHintSize = this.dbSize; } while (rc == RC.OK && pList) { var pgno = pList.ID; // If there are dirty pages in the page cache with page numbers greater than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to // make the file smaller (presumably by auto-vacuum code). Do not write any such pages to the file. // Also, do not write out any page that has the PGHDR_DONT_WRITE flag set (set by sqlite3PagerDontWrite()). if (pList.ID <= this.dbSize && 0 == (pList.Flags & PgHdr.PGHDR.DONT_WRITE)) { Debug.Assert((pList.Flags & PgHdr.PGHDR.NEED_SYNC) == 0); if (pList.ID == 1) pager_write_changecounter(pList); // Encode the database byte[] pData = null; // Data to write if (CODEC2(this, pList.Data, pgno, codec_ctx.ENCRYPT_WRITE_CTX, ref pData)) return RC.NOMEM; // Write out the page data. long offset = (pList.ID - 1) * (long)this.pageSize; // Offset to write rc = this.fd.Write(pData, this.pageSize, offset); // If page 1 was just written, update Pager.dbFileVers to match the value now stored in the database file. If writing this // page caused the database file to grow, update dbFileSize. if (pgno == 1) Buffer.BlockCopy(pData, 24, this.dbFileVers, 0, this.dbFileVers.Length); if (pgno > this.dbFileSize) this.dbFileSize = pgno; // Update any backup objects copying the contents of this pager. if (this.pBackup != null) this.pBackup.sqlite3BackupUpdate(pgno, pList.Data); PAGERTRACE("STORE {0} page {1} hash({2,08:x})", PAGERID(this), pgno, pager_pagehash(pList)); SysEx.IOTRACE("PGOUT {0:x} {1}", this.GetHashCode(), pgno); } else PAGERTRACE("NOSTORE {0} page {1}", PAGERID(this), pgno); pager_set_pagehash(pList); pList = pList.Dirtys; } return rc; }
// was:sqlite3PcacheFetch internal RC FetchPage(Pgno pgno, int createFlag, ref PgHdr ppPage) { Debug.Assert(createFlag == 1 || createFlag == 0); Debug.Assert(pgno > 0); // If the pluggable cache (sqlite3_pcache*) has not been allocated, allocate it now. if (pCache == null && createFlag != 0) { var nByte = szPage + szExtra + 0; var p = IPCache.xCreate(nByte, bPurgeable); p.xCachesize(nMax); pCache = p; } var eCreate = createFlag * (1 + ((!bPurgeable || null == pDirty) ? 1 : 0)); PgHdr pPage = null; if (pCache != null) pPage = pCache.xFetch(pgno, eCreate); if (pPage == null && eCreate == 1) { PgHdr pPg; // Find a dirty page to write-out and recycle. First try to find a page that does not require a journal-sync (one with PGHDR_NEED_SYNC // cleared), but if that is not possible settle for any other unreferenced dirty page. #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(pCache)); #endif for (pPg = pSynced; pPg != null && (pPg.Refs != 0 || (pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0); pPg = pPg.DirtyPrev) ; pSynced = pPg; if (pPg == null) for (pPg = pDirtyTail; pPg != null && pPg.Refs != 0; pPg = pPg.DirtyPrev) ; if (pPg != null) { #if SQLITE_LOG_CACHE_SPILL sqlite3_log(SQLITE_FULL, "spill page %d making room for %d - cache used: %d/%d", pPg.pgno, pgno, sqlite3GlobalConfig.pcache.xPagecount(pCache.pCache), pCache.nMax); #endif var rc = xStress(pStress, pPg); if (rc != RC.OK && rc != RC.BUSY) return rc; } pPage = pCache.xFetch(pgno, 2); } if (pPage != null) { if (pPage.Data == null) { pPage.Data = MallocEx.sqlite3Malloc(pCache.szPage); pPage.Cache = this; pPage.ID = pgno; } Debug.Assert(pPage.Cache == this); Debug.Assert(pPage.ID == pgno); if (pPage.Refs == 0) nRef++; pPage.Refs++; if (pgno == 1) pPage1 = pPage; } ppPage = pPage; return (pPage == null && eCreate != 0 ? RC.NOMEM : RC.OK); }
private static RC pagerStress(object p, PgHdr pPg) { var pPager = (Pager)p; var rc = RC.OK; Debug.Assert(pPg.Pager == pPager); Debug.Assert((pPg.Flags & PgHdr.PGHDR.DIRTY) != 0); // The doNotSyncSpill flag is set during times when doing a sync of journal (and adding a new header) is not allowed. This occurs // during calls to sqlite3PagerWrite() while trying to journal multiple pages belonging to the same sector. // The doNotSpill flag inhibits all cache spilling regardless of whether or not a sync is required. This is set during a rollback. // Spilling is also prohibited when in an error state since that could lead to database corruption. In the current implementaton it // is impossible for sqlite3PCacheFetch() to be called with createFlag==1 while in the error state, hence it is impossible for this routine to // be called in the error state. Nevertheless, we include a NEVER() test for the error state as a safeguard against future changes. if (Check.NEVER(pPager.errCode != 0)) return RC.OK; if (pPager.doNotSpill != 0) return RC.OK; if (pPager.doNotSyncSpill != 0 && (pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) return RC.OK; pPg.Dirtys = null; if (pPager.pagerUseWal()) { // Write a single frame for this page to the log. if (subjRequiresPage(pPg)) rc = subjournalPage(pPg); if (rc == RC.OK) rc = pPager.pagerWalFrames(pPg, 0, 0, 0); } else { // Sync the journal file if required. if ((pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0 || pPager.eState == PAGER.WRITER_CACHEMOD) rc = pPager.syncJournal(1); // If the page number of this page is larger than the current size of the database image, it may need to be written to the sub-journal. // This is because the call to pager_write_pagelist() below will not actually write data to the file in this case. // Consider the following sequence of events: // BEGIN; // <journal page X> // <modify page X> // SAVEPOINT sp; // <shrink database file to Y pages> // pagerStress(page X) // ROLLBACK TO sp; // If (X>Y), then when pagerStress is called page X will not be written out to the database file, but will be dropped from the cache. Then, // following the "ROLLBACK TO sp" statement, reading page X will read data from the database file. This will be the copy of page X as it // was when the transaction started, not as it was when "SAVEPOINT sp" was executed. // The solution is to write the current data for page X into the sub-journal file now (if it is not already there), so that it will // be restored to its current value when the "ROLLBACK TO sp" is executed. if (Check.NEVER(rc == RC.OK && pPg.ID > pPager.dbSize && subjRequiresPage(pPg))) rc = subjournalPage(pPg); // Write the contents of the page out to the database file. if (rc == RC.OK) { Debug.Assert((pPg.Flags & PgHdr.PGHDR.NEED_SYNC) == 0); rc = pPager.pager_write_pagelist(pPg); } } // Mark the page as clean. if (rc == RC.OK) { PAGERTRACE("STRESS {0} page {1}", PAGERID(pPager), pPg.ID); PCache.MakePageClean(pPg); } return pPager.pager_error(rc); }
// was:sqlite3PagerWrite public static RC Write(DbPage pDbPage) { var rc = RC.OK; var pPg = pDbPage; var pPager = pPg.Pager; var nPagePerSector = (uint)(pPager.sectorSize / pPager.pageSize); Debug.Assert(pPager.eState >= PAGER.WRITER_LOCKED); Debug.Assert(pPager.eState != PAGER.ERROR); Debug.Assert(pPager.assert_pager_state()); if (nPagePerSector > 1) { Pgno nPageCount = 0; // Total number of pages in database file Pgno pg1; // First page of the sector pPg is located on. Pgno nPage = 0; // Number of pages starting at pg1 to journal bool needSync = false; // True if any page has PGHDR_NEED_SYNC // Set the doNotSyncSpill flag to 1. This is because we cannot allow a journal header to be written between the pages journaled by // this function. Debug.Assert( #if SQLITE_OMIT_MEMORYDB 0==MEMDB #else 0 == pPager.memDb #endif ); Debug.Assert(pPager.doNotSyncSpill == 0); pPager.doNotSyncSpill++; // This trick assumes that both the page-size and sector-size are an integer power of 2. It sets variable pg1 to the identifier // of the first page of the sector pPg is located on. pg1 = (Pgno)((pPg.ID - 1) & ~(nPagePerSector - 1)) + 1; nPageCount = pPager.dbSize; if (pPg.ID > nPageCount) nPage = (pPg.ID - pg1) + 1; else if ((pg1 + nPagePerSector - 1) > nPageCount) nPage = nPageCount + 1 - pg1; else nPage = nPagePerSector; Debug.Assert(nPage > 0); Debug.Assert(pg1 <= pPg.ID); Debug.Assert((pg1 + nPage) > pPg.ID); for (var ii = 0; ii < nPage && rc == RC.OK; ii++) { var pg = (Pgno)(pg1 + ii); var pPage = new PgHdr(); if (pg == pPg.ID || pPager.pInJournal.Get(pg) == 0) { if (pg != ((VirtualFile.PENDING_BYTE / (pPager.pageSize)) + 1)) { rc = pPager.Get(pg, ref pPage); if (rc == RC.OK) { rc = pager_write(pPage); if ((pPage.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) needSync = true; Unref(pPage); } } } else if ((pPage = pPager.pager_lookup(pg)) != null) { if ((pPage.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) needSync = true; Unref(pPage); } } // If the PGHDR_NEED_SYNC flag is set for any of the nPage pages starting at pg1, then it needs to be set for all of them. Because // writing to any of these nPage pages may damage the others, the journal file must contain sync()ed copies of all of them // before any of them can be written out to the database file. if (rc == RC.OK && needSync) { Debug.Assert( #if SQLITE_OMIT_MEMORYDB 0==MEMDB #else 0 == pPager.memDb #endif ); for (var ii = 0; ii < nPage; ii++) { var pPage = pPager.pager_lookup((Pgno)(pg1 + ii)); if (pPage != null) { pPage.Flags |= PgHdr.PGHDR.NEED_SYNC; Unref(pPage); } } } Debug.Assert(pPager.doNotSyncSpill == 1); pPager.doNotSyncSpill--; } else rc = pager_write(pDbPage); return rc; }
private static bool pageInJournal(PgHdr pPg) { return(pPg.Pager.pInJournal.Get(pPg.ID)); }
/* ** This function is a wrapper around sqlite3WalFrames(). As well as logging ** the contents of the list of pages headed by pList (connected by pDirty), ** this function notifies any active backup processes that the pages have ** changed. ** ** The list of pages passed into this routine is always sorted by page number. ** Hence, if page 1 appears anywhere on the list, it will be the first page. */ static int pagerWalFrames(Pager pPager, PgHdr pList, Pgno nTruncate, int isCommit, int syncFlags) { int rc; /* Return code */ #if DEBUG || (SQLITE_CHECK_PAGES) PgHdr p; /* For looping over pages */ #endif Debug.Assert(pPager.pWal); #if SQLITE_DEBUG /* Verify that the page list is in accending order */ for(p=pList; p && p->pDirty; p=p->pDirty){ assert( p->pgno < p->pDirty->pgno ); } #endif if (isCommit) { /* If a WAL transaction is being committed, there is no point in writing ** any pages with page numbers greater than nTruncate into the WAL file. ** They will never be read by any client. So remove them from the pDirty ** list here. */ PgHdr* p; PgHdr** ppNext = &pList; for (p = pList; (*ppNext = p); p = p->pDirty) { if (p->pgno <= nTruncate) ppNext = &p->pDirty; } assert(pList); } if (pList->pgno == 1) pager_write_changecounter(pList); rc = sqlite3WalFrames(pPager.pWal, pPager.pageSize, pList, nTruncate, isCommit, syncFlags ); if (rc == SQLITE.OK && pPager.pBackup) { PgHdr* p; for (p = pList; p; p = p->pDirty) { sqlite3BackupUpdate(pPager.pBackup, p->pgno, (u8*)p->pData); } } #if SQLITE_CHECK_PAGES pList = sqlite3PcacheDirtyList(pPager.pPCache); for(p=pList; p; p=p->pDirty){ pager_set_pagehash(p); } #endif return rc; }
internal static void checkList(IntegrityCk pCheck, int isFreeList, int iPage, int N, string zContext) { int i; int expected = N; int iFirst = iPage; while (N-- > 0 && pCheck.mxErr != 0) { var pOvflPage = new PgHdr(); if (iPage < 1) { checkAppendMsg(pCheck, zContext, "%d of %d pages missing from overflow list starting at %d", N + 1, expected, iFirst); break; } if (checkRef(pCheck, (uint)iPage, zContext) != 0) { break; } byte[] pOvflData; if (Pager.sqlite3PagerGet(pCheck.pPager, (Pgno)iPage, ref pOvflPage) != 0) { checkAppendMsg(pCheck, zContext, "failed to get page %d", iPage); break; } pOvflData = Pager.sqlite3PagerGetData(pOvflPage); if (isFreeList != 0) { int n = (int)ConvertEx.sqlite3Get4byte(pOvflData, 4); #if !SQLITE_OMIT_AUTOVACUUM if (pCheck.pBt.autoVacuum) { checkPtrmap(pCheck, (uint)iPage, PTRMAP_FREEPAGE, 0, zContext); } #endif if (n > (int)pCheck.pBt.usableSize / 4 - 2) { checkAppendMsg(pCheck, zContext, "freelist leaf count too big on page %d", iPage); N--; } else { for (i = 0; i < n; i++) { Pgno iFreePage = ConvertEx.sqlite3Get4byte(pOvflData, 8 + i * 4); #if !SQLITE_OMIT_AUTOVACUUM if (pCheck.pBt.autoVacuum) { checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0, zContext); } #endif checkRef(pCheck, iFreePage, zContext); } N -= n; } } #if !SQLITE_OMIT_AUTOVACUUM else { /* If this database supports auto-vacuum and iPage is not the last ** page in this overflow list, check that the pointer-map entry for ** the following page matches iPage. */ if (pCheck.pBt.autoVacuum && N > 0) { i = (int)ConvertEx.sqlite3Get4byte(pOvflData); checkPtrmap(pCheck, (uint)i, PTRMAP_OVERFLOW2, (uint)iPage, zContext); } } #endif iPage = (int)ConvertEx.sqlite3Get4byte(pOvflData); Pager.sqlite3PagerUnref(pOvflPage); } }
private static RC readDbPage(PgHdr pPg) { var pPager = pPg.Pager; // Pager object associated with page pPg var pgno = pPg.ID; // Page number to read var rc = RC.OK; // Return code var isInWal = 0; // True if page is in log file var pgsz = pPager.pageSize; // Number of bytes to read Debug.Assert(pPager.eState >= PAGER.READER && #if SQLITE_OMIT_MEMORYDB 0 == MEMDB #else 0 == pPager.memDb #endif ); Debug.Assert(pPager.fd.IsOpen); if (Check.NEVER(!pPager.fd.IsOpen)) { Debug.Assert(pPager.tempFile); Array.Clear(pPg.Data, 0, pPager.pageSize); return RC.OK; } if (pPager.pagerUseWal()) // Try to pull the page from the write-ahead log. rc = pPager.pWal.Read(pgno, ref isInWal, pgsz, pPg.Data); if (rc == RC.OK && 0 == isInWal) { long iOffset = (pgno - 1) * (long)pPager.pageSize; rc = pPager.fd.Read(pPg.Data, pgsz, iOffset); if (rc == RC.IOERR_SHORT_READ) rc = RC.OK; } if (pgno == 1) { if (rc != 0) // If the read is unsuccessful, set the dbFileVers[] to something that will never be a valid file version. dbFileVers[] is a copy // of bytes 24..39 of the database. Bytes 28..31 should always be zero or the size of the database in page. Bytes 32..35 and 35..39 // should be page numbers which are never 0xffffffff. So filling pPager.dbFileVers[] with all 0xff bytes should suffice. // // For an encrypted database, the situation is more complex: bytes 24..39 of the database are white noise. But the probability of // white noising equaling 16 bytes of 0xff is vanishingly small so we should still be ok. for (int i = 0; i < pPager.dbFileVers.Length; pPager.dbFileVers[i++] = 0xff) ; // memset(pPager.dbFileVers, 0xff, sizeof(pPager.dbFileVers)); else Buffer.BlockCopy(pPg.Data, 24, pPager.dbFileVers, 0, pPager.dbFileVers.Length); } if (CODEC1(pPager, pPg.Data, pgno, codec_ctx.DECRYPT)) rc = RC.NOMEM; SysEx.IOTRACE("PGIN {0:x} {1}", pPager.GetHashCode(), pgno); PAGERTRACE("FETCH {0} page {1}% hash({2,08:x})", PAGERID(pPager), pgno, pager_pagehash(pPg)); return rc; }
// was:fetchPayload private RC AccessPayload(uint offset, uint size, byte[] b, bool writeOperation) { var page = Pages[PageID]; // Btree page of current entry Debug.Assert(page != null); Debug.Assert(State == CursorState.VALID); Debug.Assert(PagesIndexs[PageID] < page.Cells); Debug.Assert(HoldsMutex()); GetCellInfo(); var payload = Info.Cells; var nKey = (uint)(page.HasIntKey ? 0 : (int)Info.nKey); var shared = Shared; // Btree this cursor belongs to if (Check.NEVER(offset + size > nKey + Info.nData) || Info.nLocal > shared.UsableSize) { // Trying to read or write past the end of the data is an error return(SysEx.SQLITE_CORRUPT_BKPT()); } // Check if data must be read/written to/from the btree page itself. var rc = RC.OK; uint bOffset = 0; if (offset < Info.nLocal) { var a = (int)size; if (a + offset > Info.nLocal) { a = (int)(Info.nLocal - offset); } rc = CopyPayload(page.DbPage, payload, (uint)(offset + Info.CellID + Info.nHeader), b, bOffset, (uint)a, writeOperation); offset = 0; bOffset += (uint)a; size -= (uint)a; } else { offset -= Info.nLocal; } var iIdx = 0; if (rc == RC.OK && size > 0) { var ovflSize = (uint)(shared.UsableSize - 4); // Bytes content per ovfl page var nextPage = (Pgno)ConvertEx.Get4(payload, Info.nLocal + Info.CellID + Info.nHeader); #if !SQLITE_OMIT_INCRBLOB // If the isIncrblobHandle flag is set and the BtCursor.aOverflow[] has not been allocated, allocate it now. The array is sized at // one entry for each overflow page in the overflow chain. The page number of the first overflow page is stored in aOverflow[0], // etc. A value of 0 in the aOverflow[] array means "not yet known" (the cache is lazily populated). if (IsIncrblob && OverflowIDs == null) { var nOvfl = (Info.nPayload - Info.nLocal + ovflSize - 1) / ovflSize; OverflowIDs = new Pgno[nOvfl]; } // If the overflow page-list cache has been allocated and the entry for the first required overflow page is valid, skip directly to it. if (OverflowIDs != null && OverflowIDs[offset / ovflSize] != 0) { iIdx = (int)(offset / ovflSize); nextPage = OverflowIDs[iIdx]; offset = (offset % ovflSize); } #endif for (; rc == RC.OK && size > 0 && nextPage != 0; iIdx++) { #if !SQLITE_OMIT_INCRBLOB // If required, populate the overflow page-list cache. if (OverflowIDs != null) { Debug.Assert(OverflowIDs[iIdx] == 0 || OverflowIDs[iIdx] == nextPage); OverflowIDs[iIdx] = nextPage; } #endif MemPage MemPageDummy = null; if (offset >= ovflSize) { // The only reason to read this page is to obtain the page number for the next page in the overflow chain. The page // data is not required. So first try to lookup the overflow page-list cache, if any, then fall back to the getOverflowPage() function. #if !SQLITE_OMIT_INCRBLOB if (OverflowIDs != null && OverflowIDs[iIdx + 1] != 0) { nextPage = OverflowIDs[iIdx + 1]; } else #endif rc = shared.getOverflowPage(nextPage, out MemPageDummy, out nextPage); offset -= ovflSize; } else { // Need to read this page properly. It contains some of the range of data that is being read (eOp==null) or written (eOp!=null). var pDbPage = new PgHdr(); var a = (int)size; rc = shared.Pager.Get(nextPage, ref pDbPage); if (rc == RC.OK) { payload = Pager.sqlite3PagerGetData(pDbPage); nextPage = ConvertEx.Get4(payload); if (a + offset > ovflSize) { a = (int)(ovflSize - offset); } rc = CopyPayload(pDbPage, payload, offset + 4, b, bOffset, (uint)a, writeOperation); Pager.Unref(pDbPage); offset = 0; size -= (uint)a; bOffset += (uint)a; } } } } if (rc == RC.OK && size > 0) { return(SysEx.SQLITE_CORRUPT_BKPT()); } return(rc); }
private static RC pager_write(PgHdr pPg) { var pData = pPg.Data; var pPager = pPg.Pager; var rc = RC.OK; // This routine is not called unless a write-transaction has already been started. The journal file may or may not be open at this point. // It is never called in the ERROR state. Debug.Assert(pPager.eState == PAGER.WRITER_LOCKED || pPager.eState == PAGER.WRITER_CACHEMOD || pPager.eState == PAGER.WRITER_DBMOD); Debug.Assert(pPager.assert_pager_state()); // If an error has been previously detected, report the same error again. This should not happen, but the check provides robustness. if (Check.NEVER(pPager.errCode) != RC.OK) { return(pPager.errCode); } // Higher-level routines never call this function if database is not writable. But check anyway, just for robustness. if (Check.NEVER(pPager.readOnly)) { return(RC.PERM); } #if SQLITE_CHECK_PAGES CHECK_PAGE(pPg); #endif // The journal file needs to be opened. Higher level routines have already obtained the necessary locks to begin the write-transaction, but the // rollback journal might not yet be open. Open it now if this is the case. // // This is done before calling sqlite3PcacheMakeDirty() on the page. Otherwise, if it were done after calling sqlite3PcacheMakeDirty(), then // an error might occur and the pager would end up in WRITER_LOCKED state with pages marked as dirty in the cache. if (pPager.eState == PAGER.WRITER_LOCKED) { rc = pPager.pager_open_journal(); if (rc != RC.OK) { return(rc); } } Debug.Assert(pPager.eState >= PAGER.WRITER_CACHEMOD); Debug.Assert(pPager.assert_pager_state()); // Mark the page as dirty. If the page has already been written to the journal then we can return right away. PCache.MakePageDirty(pPg); if (pageInJournal(pPg) && !subjRequiresPage(pPg)) { Debug.Assert(!pPager.pagerUseWal()); } else { // The transaction journal now exists and we have a RESERVED or an EXCLUSIVE lock on the main database file. Write the current page to // the transaction journal if it is not there already. if (!pageInJournal(pPg) && !pPager.pagerUseWal()) { Debug.Assert(!pPager.pagerUseWal()); if (pPg.ID <= pPager.dbOrigSize && pPager.jfd.IsOpen) { var iOff = pPager.journalOff; // We should never write to the journal file the page that contains the database locks. The following Debug.Assert verifies that we do not. Debug.Assert(pPg.ID != ((VirtualFile.PENDING_BYTE / (pPager.pageSize)) + 1)); Debug.Assert(pPager.journalHdr <= pPager.journalOff); byte[] pData2 = null; if (CODEC2(pPager, pData, pPg.ID, codec_ctx.ENCRYPT_READ_CTX, ref pData2)) { return(RC.NOMEM); } var cksum = pPager.pager_cksum(pData2); // Even if an IO or diskfull error occurred while journalling the page in the block above, set the need-sync flag for the page. // Otherwise, when the transaction is rolled back, the logic in playback_one_page() will think that the page needs to be restored // in the database file. And if an IO error occurs while doing so, then corruption may follow. pPg.Flags |= PgHdr.PGHDR.NEED_SYNC; rc = pPager.jfd.WriteByte(iOff, pPg.ID); if (rc != RC.OK) { return(rc); } rc = pPager.jfd.Write(pData2, pPager.pageSize, iOff + 4); if (rc != RC.OK) { return(rc); } rc = pPager.jfd.WriteByte(iOff + pPager.pageSize + 4, cksum); if (rc != RC.OK) { return(rc); } SysEx.IOTRACE("JOUT {0:x} {1} {2,11} {3}", pPager.GetHashCode(), pPg.ID, pPager.journalOff, pPager.pageSize); PAGERTRACE("JOURNAL {0} page {1} needSync={2} hash({3,08:x})", PAGERID(pPager), pPg.ID, (pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0 ? 1 : 0, pager_pagehash(pPg)); pPager.journalOff += 8 + pPager.pageSize; pPager.nRec++; Debug.Assert(pPager.pInJournal != null); rc = pPager.pInJournal.Set(pPg.ID); Debug.Assert(rc == RC.OK || rc == RC.NOMEM); rc |= pPager.addToSavepointBitvecs(pPg.ID); if (rc != RC.OK) { Debug.Assert(rc == RC.NOMEM); return(rc); } } else { if (pPager.eState != PAGER.WRITER_DBMOD) { pPg.Flags |= PgHdr.PGHDR.NEED_SYNC; } PAGERTRACE("APPEND {0} page {1} needSync={2}", PAGERID(pPager), pPg.ID, (pPg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0 ? 1 : 0); } } // If the statement journal is open and the page is not in it, then write the current page to the statement journal. Note that // the statement journal format differs from the standard journal format in that it omits the checksums and the header. if (subjRequiresPage(pPg)) { rc = subjournalPage(pPg); } } // Update the database size and return. if (pPager.dbSize < pPg.ID) { pPager.dbSize = pPg.ID; } return(rc); }
/* ** This function is a wrapper around sqlite3WalFrames(). As well as logging ** the contents of the list of pages headed by pList (connected by pDirty), ** this function notifies any active backup processes that the pages have ** changed. ** ** The list of pages passed into this routine is always sorted by page number. ** Hence, if page 1 appears anywhere on the list, it will be the first page. */ static int pagerWalFrames(Pager pPager, PgHdr pList, Pgno nTruncate, int isCommit, int syncFlags) { int rc; /* Return code */ #if DEBUG || (SQLITE_CHECK_PAGES) PgHdr p; /* For looping over pages */ #endif Debug.Assert(pPager.pWal); #if SQLITE_DEBUG /* Verify that the page list is in accending order */ for (p = pList; p && p->pDirty; p = p->pDirty) { assert(p->pgno < p->pDirty->pgno); } #endif if (isCommit) { /* If a WAL transaction is being committed, there is no point in writing ** any pages with page numbers greater than nTruncate into the WAL file. ** They will never be read by any client. So remove them from the pDirty ** list here. */ PgHdr * p; PgHdr **ppNext = &pList; for (p = pList; (*ppNext = p); p = p->pDirty) { if (p->pgno <= nTruncate) { ppNext = &p->pDirty; } } assert(pList); } if (pList->pgno == 1) { pager_write_changecounter(pList); } rc = sqlite3WalFrames(pPager.pWal, pPager.pageSize, pList, nTruncate, isCommit, syncFlags ); if (rc == SQLITE.OK && pPager.pBackup) { PgHdr *p; for (p = pList; p; p = p->pDirty) { sqlite3BackupUpdate(pPager.pBackup, p->pgno, (u8 *)p->pData); } } #if SQLITE_CHECK_PAGES pList = sqlite3PcacheDirtyList(pPager.pPCache); for (p = pList; p; p = p->pDirty) { pager_set_pagehash(p); } #endif return(rc); }
// was:fetchPayload private RC AccessPayload(uint offset, uint size, byte[] b, bool writeOperation) { var page = Pages[PageID]; // Btree page of current entry Debug.Assert(page != null); Debug.Assert(State == CursorState.VALID); Debug.Assert(PagesIndexs[PageID] < page.Cells); Debug.Assert(HoldsMutex()); GetCellInfo(); var payload = Info.Cells; var nKey = (uint)(page.HasIntKey ? 0 : (int)Info.nKey); var shared = Shared; // Btree this cursor belongs to if (Check.NEVER(offset + size > nKey + Info.nData) || Info.nLocal > shared.UsableSize) // Trying to read or write past the end of the data is an error return SysEx.SQLITE_CORRUPT_BKPT(); // Check if data must be read/written to/from the btree page itself. var rc = RC.OK; uint bOffset = 0; if (offset < Info.nLocal) { var a = (int)size; if (a + offset > Info.nLocal) a = (int)(Info.nLocal - offset); rc = CopyPayload(page.DbPage, payload, (uint)(offset + Info.CellID + Info.nHeader), b, bOffset, (uint)a, writeOperation); offset = 0; bOffset += (uint)a; size -= (uint)a; } else offset -= Info.nLocal; var iIdx = 0; if (rc == RC.OK && size > 0) { var ovflSize = (uint)(shared.UsableSize - 4); // Bytes content per ovfl page var nextPage = (Pgno)ConvertEx.Get4(payload, Info.nLocal + Info.CellID + Info.nHeader); #if !SQLITE_OMIT_INCRBLOB // If the isIncrblobHandle flag is set and the BtCursor.aOverflow[] has not been allocated, allocate it now. The array is sized at // one entry for each overflow page in the overflow chain. The page number of the first overflow page is stored in aOverflow[0], // etc. A value of 0 in the aOverflow[] array means "not yet known" (the cache is lazily populated). if (IsIncrblob && OverflowIDs == null) { var nOvfl = (Info.nPayload - Info.nLocal + ovflSize - 1) / ovflSize; OverflowIDs = new Pgno[nOvfl]; } // If the overflow page-list cache has been allocated and the entry for the first required overflow page is valid, skip directly to it. if (OverflowIDs != null && OverflowIDs[offset / ovflSize] != 0) { iIdx = (int)(offset / ovflSize); nextPage = OverflowIDs[iIdx]; offset = (offset % ovflSize); } #endif for (; rc == RC.OK && size > 0 && nextPage != 0; iIdx++) { #if !SQLITE_OMIT_INCRBLOB // If required, populate the overflow page-list cache. if (OverflowIDs != null) { Debug.Assert(OverflowIDs[iIdx] == 0 || OverflowIDs[iIdx] == nextPage); OverflowIDs[iIdx] = nextPage; } #endif MemPage MemPageDummy = null; if (offset >= ovflSize) { // The only reason to read this page is to obtain the page number for the next page in the overflow chain. The page // data is not required. So first try to lookup the overflow page-list cache, if any, then fall back to the getOverflowPage() function. #if !SQLITE_OMIT_INCRBLOB if (OverflowIDs != null && OverflowIDs[iIdx + 1] != 0) nextPage = OverflowIDs[iIdx + 1]; else #endif rc = shared.getOverflowPage(nextPage, out MemPageDummy, out nextPage); offset -= ovflSize; } else { // Need to read this page properly. It contains some of the range of data that is being read (eOp==null) or written (eOp!=null). var pDbPage = new PgHdr(); var a = (int)size; rc = shared.Pager.Get(nextPage, ref pDbPage); if (rc == RC.OK) { payload = Pager.sqlite3PagerGetData(pDbPage); nextPage = ConvertEx.Get4(payload); if (a + offset > ovflSize) a = (int)(ovflSize - offset); rc = CopyPayload(pDbPage, payload, offset + 4, b, bOffset, (uint)a, writeOperation); Pager.Unref(pDbPage); offset = 0; size -= (uint)a; bOffset += (uint)a; } } } } if (rc == RC.OK && size > 0) return SysEx.SQLITE_CORRUPT_BKPT(); return rc; }
public RC Get(Pgno pgno, ref DbPage ppPage, byte noContent) { Debug.Assert(eState >= PAGER.READER); Debug.Assert(assert_pager_state()); if (pgno == 0) { return(SysEx.SQLITE_CORRUPT_BKPT()); } // If the pager is in the error state, return an error immediately. Otherwise, request the page from the PCache layer. var rc = (errCode != RC.OK ? errCode : pPCache.FetchPage(pgno, 1, ref ppPage)); PgHdr pPg = null; if (rc != RC.OK) { // Either the call to sqlite3PcacheFetch() returned an error or the pager was already in the error-state when this function was called. // Set pPg to 0 and jump to the exception handler. */ pPg = null; goto pager_get_err; } Debug.Assert((ppPage).ID == pgno); Debug.Assert((ppPage).Pager == this || (ppPage).Pager == null); if ((ppPage).Pager != null && 0 == noContent) { // In this case the pcache already contains an initialized copy of the page. Return without further ado. Debug.Assert(pgno <= PAGER_MAX_PGNO && pgno != PAGER_MJ_PGNO(this)); return(RC.OK); } else { // The pager cache has created a new page. Its content needs to be initialized. pPg = ppPage; pPg.Pager = this; pPg.Extra = _memPageBuilder; // The maximum page number is 2^31. Return SQLITE_CORRUPT if a page number greater than this, or the unused locking-page, is requested. if (pgno > PAGER_MAX_PGNO || pgno == PAGER_MJ_PGNO(this)) { rc = SysEx.SQLITE_CORRUPT_BKPT(); goto pager_get_err; } if ( #if SQLITE_OMIT_MEMORYDB 1 == MEMDB #else memDb != 0 #endif || dbSize < pgno || noContent != 0 || !fd.IsOpen) { if (pgno > mxPgno) { rc = RC.FULL; goto pager_get_err; } if (noContent != 0) { // Failure to set the bits in the InJournal bit-vectors is benign. It merely means that we might do some extra work to journal a // page that does not need to be journaled. Nevertheless, be sure to test the case where a malloc error occurs while trying to set // a bit in a bit vector. MallocEx.sqlite3BeginBenignMalloc(); if (pgno <= dbOrigSize) { pInJournal.Set(pgno); } addToSavepointBitvecs(pgno); MallocEx.sqlite3EndBenignMalloc(); } Array.Clear(pPg.Data, 0, pageSize); SysEx.IOTRACE("ZERO {0:x} {1}\n", this.GetHashCode(), pgno); }