public void Clear() { pDirty = null; pDirtyTail = null; pSynced = null; nRef = 0; }
/* ** Add page pPage to the head of the dirty list (PCache1.pDirty is set to ** pPage). */ static void pcacheAddToDirtyList( PgHdr pPage ) { PCache p = pPage.pCache; Debug.Assert( pPage.pDirtyNext == null && pPage.pDirtyPrev == null && p.pDirty != pPage ); pPage.pDirtyNext = p.pDirty; if ( pPage.pDirtyNext != null ) { Debug.Assert( pPage.pDirtyNext.pDirtyPrev == null ); pPage.pDirtyNext.pDirtyPrev = pPage; } p.pDirty = pPage; if ( null == p.pDirtyTail ) { p.pDirtyTail = pPage; } if ( null == p.pSynced && 0 == ( pPage.flags & PGHDR_NEED_SYNC ) ) { p.pSynced = pPage; } #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert( pcacheCheckSynced(p) ); #endif }
/* ** Merge two lists of pages connected by pDirty and in pgno order. ** Do not both fixing the pDirtyPrev pointers. */ static PgHdr pcacheMergeDirtyList( PgHdr pA, PgHdr pB ) { PgHdr result = new PgHdr(); PgHdr pTail = result; while ( pA != null && pB != null ) { if ( pA.pgno < pB.pgno ) { pTail.pDirty = pA; pTail = pA; pA = pA.pDirty; } else { pTail.pDirty = pB; pTail = pB; pB = pB.pDirty; } } if ( pA != null ) { pTail.pDirty = pA; } else if ( pB != null ) { pTail.pDirty = pB; } else { pTail.pDirty = null; } return result.pDirty; }
private static PgHdr pcache1Alloc(int nByte) { PgHdr p = null; Debug.Assert(MutexEx.sqlite3_mutex_notheld(pcache1.grp.mutex)); StatusEx.sqlite3StatusSet(StatusEx.STATUS.PAGECACHE_SIZE, nByte); if (nByte <= pcache1.szSlot) { MutexEx.sqlite3_mutex_enter(pcache1.mutex); p = pcache1.pFree._PgHdr; if (p != null) { pcache1.pFree = pcache1.pFree.pNext; pcache1.nFreeSlot--; pcache1.bUnderPressure = pcache1.nFreeSlot < pcache1.nReserve; Debug.Assert(pcache1.nFreeSlot >= 0); StatusEx.sqlite3StatusAdd(StatusEx.STATUS.PAGECACHE_USED, 1); } MutexEx.sqlite3_mutex_leave(pcache1.mutex); } if (p == null) { // Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get it from sqlite3Malloc instead. p = new PgHdr(); { var sz = nByte; MutexEx.sqlite3_mutex_enter(pcache1.mutex); StatusEx.sqlite3StatusAdd(StatusEx.STATUS.PAGECACHE_OVERFLOW, sz); MutexEx.sqlite3_mutex_leave(pcache1.mutex); } SysEx.sqlite3MemdebugSetType(p, SysEx.MEMTYPE.PCACHE); } return p; }
private static void pcache1Free(ref PgHdr p) { if (p == null) return; if (p.CacheAllocated) { var pSlot = new PgFreeslot(); MutexEx.sqlite3_mutex_enter(pcache1.mutex); StatusEx.sqlite3StatusAdd(StatusEx.STATUS.PAGECACHE_USED, -1); pSlot._PgHdr = p; pSlot.pNext = pcache1.pFree; pcache1.pFree = pSlot; pcache1.nFreeSlot++; pcache1.bUnderPressure = pcache1.nFreeSlot < pcache1.nReserve; Debug.Assert(pcache1.nFreeSlot <= pcache1.nSlot); MutexEx.sqlite3_mutex_leave(pcache1.mutex); } else { Debug.Assert(SysEx.sqlite3MemdebugHasType(p, SysEx.MEMTYPE.PCACHE)); SysEx.sqlite3MemdebugSetType(p, SysEx.MEMTYPE.HEAP); var iSize = MallocEx.sqlite3MallocSize(p.Data); MutexEx.sqlite3_mutex_enter(pcache1.mutex); StatusEx.sqlite3StatusAdd(StatusEx.STATUS.PAGECACHE_OVERFLOW, -iSize); MutexEx.sqlite3_mutex_leave(pcache1.mutex); MallocEx.sqlite3_free(ref p.Data); } }
public void Clear() { this.pData = null; this.pExtra = null; this.pDirty = null; this.pgno = 0; this.pPager = null; #if SQLITE_CHECK_PAGES this.pageHash=0; #endif this.flags = 0; this.nRef = 0; this.pCache = null; this.pDirtyNext = null; this.pDirtyPrev = null; this.pPgHdr1 = null; }
public void Clear() { sqlite3_free(ref this.pData); this.pData = null; this.pExtra = null; this.pDirty = null; this.pgno = 0; this.pPager = null; #if SQLITE_CHECK_PAGES this.pageHash = 0; #endif this.flags = 0; this.nRef = 0; this.pCache = null; this.pDirtyNext = null; this.pDirtyPrev = null; this.pPgHdr1 = null; }
public void Clear() { sqlite3_free(ref this.pData); this.pData = null; this.pExtra = null; this.pDirty = null; this.pgno = 0; this.pPager = null; #if SQLITE_CHECK_PAGES this.pageHash=0; #endif this.flags = 0; this.nRef = 0; this.CacheAllocated = false; this.pCache = null; this.pDirtyNext = null; this.pDirtyPrev = null; this.pPgHdr1 = null; }
/* ** Implementation of the sqlite3_pcache.xRekey method. */ static void pcache1Rekey( sqlite3_pcache p, PgHdr pPg, u32 iOld, u32 iNew ) { PCache1 pCache = p; PgHdr1 pPage = PAGE_TO_PGHDR1(pCache, pPg); PgHdr1 pp; u32 h; Debug.Assert(pPage.iKey == iOld); Debug.Assert(pPage.pCache == pCache); pcache1EnterMutex(); h = iOld % pCache.nHash; pp = pCache.apHash[h]; while (pp != pPage) { pp = pp.pNext; } if (pp == pCache.apHash[h]) { pCache.apHash[h] = pp.pNext; } else { pp.pNext = pPage.pNext; } h = iNew % pCache.nHash; pPage.iKey = iNew; pPage.pNext = pCache.apHash[h]; pCache.apHash[h] = pPage; if (iNew > pCache.iMaxKey) { pCache.iMaxKey = iNew; } pcache1LeaveMutex(); }
/* ** Implementation of the sqlite3_pcache.xUnpin method. ** ** Mark a page as unpinned (eligible for asynchronous recycling). */ private static void pcache1Unpin(sqlite3_pcache p, PgHdr pPg, int reuseUnlikely) { var pCache = p; var pPage = PAGE_TO_PGHDR1(pCache, pPg); Debug.Assert(pPage.pCache == pCache); pcache1EnterMutex(); /* It is an error to call this function if the page is already ** part of the global LRU list. */ Debug.Assert(pPage.pLruPrev == null && pPage.pLruNext == null); Debug.Assert(pcache1.pLruHead != pPage && pcache1.pLruTail != pPage); if (reuseUnlikely != 0 || pcache1.nCurrentPage > pcache1.nMaxPage) { pcache1RemoveFromHash(pPage); pcache1FreePage(ref pPage); } else { /* Add the page to the global LRU list. Normally, the page is added to ** the head of the list (last page to be recycled). However, if the ** reuseUnlikely flag passed to this function is true, the page is added ** to the tail of the list (first page to be recycled). */ if (pcache1.pLruHead != null) { pcache1.pLruHead.pLruPrev = pPage; pPage.pLruNext = pcache1.pLruHead; pcache1.pLruHead = pPage; } else { pcache1.pLruTail = pPage; pcache1.pLruHead = pPage; } pCache.nRecyclable++; } pcache1LeaveMutex(); }
/* ** Implementation of the sqlite3_pcache.xRekey method. */ static void pcache1Rekey( sqlite3_pcache p, PgHdr pPg, Pgno iOld, Pgno iNew ) { PCache1 pCache = (PCache1)p; PgHdr1 pPage = PAGE_TO_PGHDR1(pCache, pPg); PgHdr1 pp; int h; Debug.Assert(pPage.iKey == iOld); Debug.Assert(pPage.pCache == pCache); pcache1EnterMutex(pCache.pGroup); h = (int)(iOld % pCache.nHash); pp = pCache.apHash[h]; while ((pp) != pPage) { pp = (pp).pNext; } if (pp == pCache.apHash[h]) { pCache.apHash[h] = pp.pNext; } else { pp.pNext = pPage.pNext; } h = (int)(iNew % pCache.nHash); pPage.iKey = iNew; pPage.pNext = pCache.apHash[h]; pCache.apHash[h] = pPage; if (iNew > pCache.iMaxKey) { pCache.iMaxKey = iNew; } pcache1LeaveMutex(pCache.pGroup); }
/* ** Remove page pPage from the list of dirty pages. */ static void pcacheRemoveFromDirtyList(PgHdr pPage) { PCache p = pPage.pCache; Debug.Assert(pPage.pDirtyNext != null || pPage == p.pDirtyTail); Debug.Assert(pPage.pDirtyPrev != null || pPage == p.pDirty); /* Update the PCache1.pSynced variable if necessary. */ if (p.pSynced == pPage) { PgHdr pSynced = pPage.pDirtyPrev; while (pSynced != null && (pSynced.flags & PGHDR_NEED_SYNC) != 0) { pSynced = pSynced.pDirtyPrev; } p.pSynced = pSynced; } if (pPage.pDirtyNext != null) { pPage.pDirtyNext.pDirtyPrev = pPage.pDirtyPrev; } else { Debug.Assert(pPage == p.pDirtyTail); p.pDirtyTail = pPage.pDirtyPrev; } if (pPage.pDirtyPrev != null) { pPage.pDirtyPrev.pDirtyNext = pPage.pDirtyNext; } else { Debug.Assert(pPage == p.pDirty); p.pDirty = pPage.pDirtyNext; } pPage.pDirtyNext = null; pPage.pDirtyPrev = null; #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(p)); #endif }
private static PgHdr pcacheSortDirtyList(PgHdr pIn) { PgHdr[] a; PgHdr p; //a[N_SORT_BUCKET], p; int i; a = new PgHdr[N_SORT_BUCKET]; //memset(a, 0, sizeof(a)); while (pIn != null) { p = pIn; pIn = p.pDirty; p.pDirty = null; for (i = 0; ALWAYS(i < N_SORT_BUCKET - 1); i++) { if (a[i] == null) { a[i] = p; break; } else { p = pcacheMergeDirtyList(a[i], p); a[i] = null; } } if (NEVER(i == N_SORT_BUCKET - 1)) { /* To get here, there need to be 2^(N_SORT_BUCKET) elements in ** the input list. But that is impossible. */ a[i] = pcacheMergeDirtyList(a[i], p); } } p = a[0]; for (i = 1; i < N_SORT_BUCKET; i++) { p = pcacheMergeDirtyList(p, a[i]); } return(p); }
/* ** Decrement the reference count on a page. If the page is clean and the ** reference count drops to 0, then it is made elible for recycling. */ static void sqlite3PcacheRelease(PgHdr p) { Debug.Assert(p.nRef > 0); p.nRef--; if (p.nRef == 0) { PCache pCache = p.pCache; pCache.nRef--; if ((p.flags & PGHDR_DIRTY) == 0) { pcacheUnpin(p); } else { /* Move the page to the head of the dirty list. */ pcacheRemoveFromDirtyList(p); pcacheAddToDirtyList(p); } } }
/* ** Free an allocated buffer obtained from pcache1Alloc(). */ static void pcache1Free(ref PgHdr p) { Debug.Assert(sqlite3_mutex_held(pcache1.mutex)); if (p == null) { return; } if (p.CacheAllocated) //if ( p >= pcache1.pStart && p < pcache1.pEnd ) { PgFreeslot pSlot = new PgFreeslot(); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, -1); pSlot._PgHdr = p;// (PgFreeslot)p; pSlot.pNext = pcache1.pFree; pcache1.pFree = pSlot; } else { int iSize = p.pData.Length; //sqlite3MallocSize( p ); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, -iSize); p = null; //sqlite3_free( ref p ); } }
/* ** Implementation of the sqlite3_pcache.xUnpin method. ** ** Mark a page as unpinned (eligible for asynchronous recycling). */ static void pcache1Unpin(sqlite3_pcache p, PgHdr pPg, bool reuseUnlikely) { PCache1 pCache = (PCache1)p; PgHdr1 pPage = PAGE_TO_PGHDR1(pCache, pPg); PGroup pGroup = pCache.pGroup; Debug.Assert(pPage.pCache == pCache); pcache1EnterMutex(pGroup); /* It is an error to call this function if the page is already ** part of the PGroup LRU list. */ Debug.Assert(pPage.pLruPrev == null && pPage.pLruNext == null); Debug.Assert(pGroup.pLruHead != pPage && pGroup.pLruTail != pPage); if (reuseUnlikely || pGroup.nCurrentPage > pGroup.nMaxPage) { pcache1RemoveFromHash(pPage); pcache1FreePage(ref pPage); } else { /* Add the page to the PGroup LRU list. */ if (pGroup.pLruHead != null) { pGroup.pLruHead.pLruPrev = pPage; pPage.pLruNext = pGroup.pLruHead; pGroup.pLruHead = pPage; } else { pGroup.pLruTail = pPage; pGroup.pLruHead = pPage; } pCache.nRecyclable++; } pcache1LeaveMutex(pCache.pGroup); }
private static void TestPager() { var vfs = FileEx.sqlite3_vfs_find(null); var pager = Open(vfs); if (pager == null) { throw new Exception(); } var rc = pager.SharedLock(); if (rc != RC.OK) { throw new Exception(); } // PgHdr p = null; rc = pager.Get(1, ref p, 0); if (rc != RC.OK) { throw new Exception(); } rc = pager.Begin(false, false); if (rc != RC.OK) { throw new Exception(); } Array.Copy(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, p.Data, 10); Pager.Write(p); pager.CommitPhaseOne(null, false); pager.CommitPhaseTwo(); // if (pager != null) { pager.Close(); } }
/* ** Allocate a new page object initially associated with cache pCache. */ static PgHdr1 pcache1AllocPage(PCache1 pCache) { //int nByte = sizeof( PgHdr1 ) + pCache.szPage; PgHdr pPg = pcache1Alloc(pCache.szPage);//nByte ); PgHdr1 p = null; //if ( pPg !=null) { //PAGE_TO_PGHDR1( pCache, pPg ); p = new PgHdr1(); p.pCache = pCache; p.pPgHdr = pPg; if (pCache.bPurgeable) { pCache.pGroup.nCurrentPage++; } } //else //{ // p = 0; //} return(p); }
/* ** Malloc function used within this file to allocate space from the buffer ** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no ** such buffer exists or there is no space left in it, this function falls ** back to sqlite3Malloc(). ** ** Multiple threads can run this routine at the same time. Global variables ** in pcache1 need to be protected via mutex. */ static PgHdr pcache1Alloc(int nByte) { PgHdr p = null; Debug.Assert(sqlite3_mutex_notheld(pcache1.grp.mutex)); sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte); if (nByte <= pcache1.szSlot) { sqlite3_mutex_enter(pcache1.mutex); p = pcache1.pFree._PgHdr; if (p != null) { pcache1.pFree = pcache1.pFree.pNext; pcache1.nFreeSlot--; pcache1.bUnderPressure = pcache1.nFreeSlot < pcache1.nReserve; Debug.Assert(pcache1.nFreeSlot >= 0); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, 1); } sqlite3_mutex_leave(pcache1.mutex); } if (p == null) { /* Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get ** it from sqlite3Malloc instead. */ p = new PgHdr();// sqlite3Malloc( nByte ); //if ( p != null ) { int sz = nByte;//sqlite3MallocSize( p ); sqlite3_mutex_enter(pcache1.mutex); sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz); sqlite3_mutex_leave(pcache1.mutex); } sqlite3MemdebugSetType(p, MEMTYPE_PCACHE); } return(p); }
static void sqlite3PageFree( ref PgHdr p ) { pcache1EnterMutex(); pcache1Free( ref p ); pcache1LeaveMutex(); }
/* ** Increase the reference count of a supplied page by 1. */ static void sqlite3PcacheRef(PgHdr p) { Debug.Assert(p.nRef > 0); p.nRef++; }
/* ** Copy nPage pages from the source b-tree to the destination. */ public static int sqlite3_backup_step(sqlite3_backup p, int nPage) { int rc; sqlite3_mutex_enter(p.pSrcDb.mutex); sqlite3BtreeEnter(p.pSrc); if (p.pDestDb != null) { sqlite3_mutex_enter(p.pDestDb.mutex); } rc = p.rc; if (!isFatalError(rc)) { var pSrcPager = sqlite3BtreePager(p.pSrc); /* Source pager */ var pDestPager = sqlite3BtreePager(p.pDest); /* Dest pager */ int ii; /* Iterator variable */ uint nSrcPage = 0; /* Size of source db in pages */ var bCloseTrans = 0; /* True if src db requires unlocking */ /* If the source pager is currently in a write-transaction, return ** SQLITE_BUSY immediately. */ if (p.pDestDb != null && p.pSrc.pBt.inTransaction == TRANS_WRITE) { rc = SQLITE_BUSY; } else { rc = SQLITE_OK; } /* Lock the destination database, if it is not locked already. */ if (SQLITE_OK == rc && p.bDestLocked == 0 && SQLITE_OK == (rc = sqlite3BtreeBeginTrans(p.pDest, 2)) ) { p.bDestLocked = 1; sqlite3BtreeGetMeta(p.pDest, BTREE_SCHEMA_VERSION, ref p.iDestSchema); } /* If there is no open read-transaction on the source database, open ** one now. If a transaction is opened here, then it will be closed ** before this function exits. */ if (rc == SQLITE_OK && !sqlite3BtreeIsInReadTrans(p.pSrc)) { rc = sqlite3BtreeBeginTrans(p.pSrc, 0); bCloseTrans = 1; } /* Now that there is a read-lock on the source database, query the ** source pager for the number of pages in the database. */ if (rc == SQLITE_OK) { rc = sqlite3PagerPagecount(pSrcPager, ref nSrcPage); } for (ii = 0; (nPage < 0 || ii < nPage) && p.iNext <= nSrcPage && 0 == rc; ii++) { var iSrcPg = p.iNext; /* Source page number */ if (iSrcPg != PENDING_BYTE_PAGE(p.pSrc.pBt)) { DbPage pSrcPg = null; /* Source page object */ rc = sqlite3PagerGet(pSrcPager, iSrcPg, ref pSrcPg); if (rc == SQLITE_OK) { rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg)); sqlite3PagerUnref(pSrcPg); } } p.iNext++; } if (rc == SQLITE_OK) { p.nPagecount = nSrcPage; p.nRemaining = nSrcPage + 1 - p.iNext; if (p.iNext > nSrcPage) { rc = SQLITE_DONE; } else if (0 == p.isAttached) { attachBackupObject(p); } } /* Update the schema version field in the destination database. This ** is to make sure that the schema-version really does change in ** the case where the source and destination databases have the ** same schema version. */ if (rc == SQLITE_DONE && (rc = sqlite3BtreeUpdateMeta(p.pDest, 1, p.iDestSchema + 1)) == SQLITE_OK ) { var nSrcPagesize = sqlite3BtreeGetPageSize(p.pSrc); var nDestPagesize = sqlite3BtreeGetPageSize(p.pDest); uint nDestTruncate; if (p.pDestDb != null) { sqlite3ResetInternalSchema(p.pDestDb, 0); } /* Set nDestTruncate to the final number of pages in the destination ** database. The complication here is that the destination page ** size may be different to the source page size. ** ** If the source page size is smaller than the destination page size, ** round up. In this case the call to sqlite3OsTruncate() below will ** fix the size of the file. However it is important to call ** sqlite3PagerTruncateImage() here so that any pages in the ** destination file that lie beyond the nDestTruncate page mark are ** journalled by PagerCommitPhaseOne() before they are destroyed ** by the file truncation. */ if (nSrcPagesize < nDestPagesize) { var ratio = nDestPagesize / nSrcPagesize; nDestTruncate = (uint)((nSrcPage + ratio - 1) / ratio); if (nDestTruncate == (int)PENDING_BYTE_PAGE(p.pDest.pBt)) { nDestTruncate--; } } else { nDestTruncate = (uint)(nSrcPage * (nSrcPagesize / nDestPagesize)); } sqlite3PagerTruncateImage(pDestPager, nDestTruncate); if (nSrcPagesize < nDestPagesize) { /* If the source page-size is smaller than the destination page-size, ** two extra things may need to happen: ** ** * The destination may need to be truncated, and ** ** * Data stored on the pages immediately following the ** pending-byte page in the source database may need to be ** copied into the destination database. */ var iSize = (uint)nSrcPagesize * nSrcPage; var pFile = sqlite3PagerFile(pDestPager); Debug.Assert(pFile != null); Debug.Assert(nDestTruncate * nDestPagesize >= iSize || nDestTruncate == (int)(PENDING_BYTE_PAGE(p.pDest.pBt) - 1) && iSize >= PENDING_BYTE && iSize <= PENDING_BYTE + nDestPagesize); if (SQLITE_OK == (rc = sqlite3PagerCommitPhaseOne(pDestPager, null, true)) && SQLITE_OK == (rc = backupTruncateFile(pFile, (int)iSize)) && SQLITE_OK == (rc = sqlite3PagerSync(pDestPager)) ) { long iOff; long iEnd = MIN(PENDING_BYTE + nDestPagesize, iSize); for ( iOff = PENDING_BYTE + nSrcPagesize; rc == SQLITE_OK && iOff < iEnd; iOff += nSrcPagesize ) { PgHdr pSrcPg = null; var iSrcPg = (uint)(iOff / nSrcPagesize + 1); rc = sqlite3PagerGet(pSrcPager, iSrcPg, ref pSrcPg); if (rc == SQLITE_OK) { var zData = sqlite3PagerGetData(pSrcPg); rc = sqlite3OsWrite(pFile, zData, nSrcPagesize, iOff); } sqlite3PagerUnref(pSrcPg); } } } else { rc = sqlite3PagerCommitPhaseOne(pDestPager, null, false); } /* Finish committing the transaction to the destination database. */ if (SQLITE_OK == rc && SQLITE_OK == (rc = sqlite3BtreeCommitPhaseTwo(p.pDest)) ) { rc = SQLITE_DONE; } } /* If bCloseTrans is true, then this function opened a read transaction ** on the source database. Close the read transaction here. There is ** no need to check the return values of the btree methods here, as ** "committing" a read-only transaction cannot fail. */ if (bCloseTrans != 0) { #if !NDEBUG || SQLITE_COVERAGE_TEST //TESTONLY( int rc2 ); //TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p.pSrc, 0); //TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p.pSrc); int rc2; rc2 = sqlite3BtreeCommitPhaseOne(p.pSrc, ""); rc2 |= sqlite3BtreeCommitPhaseTwo(p.pSrc); Debug.Assert(rc2 == SQLITE_OK); #else sqlite3BtreeCommitPhaseOne(p.pSrc, null); sqlite3BtreeCommitPhaseTwo(p.pSrc); #endif } p.rc = rc; } if (p.pDestDb != null) { sqlite3_mutex_leave(p.pDestDb.mutex); } sqlite3BtreeLeave(p.pSrc); sqlite3_mutex_leave(p.pSrcDb.mutex); return(rc); }
static PgHdr pcacheSortDirtyList( PgHdr pIn ) { PgHdr[] a; PgHdr p;//a[N_SORT_BUCKET], p; int i; a = new PgHdr[N_SORT_BUCKET];//memset(a, 0, sizeof(a)); while ( pIn != null ) { p = pIn; pIn = p.pDirty; p.pDirty = null; for ( i = 0; ALWAYS( i < N_SORT_BUCKET - 1 ); i++ ) { if ( a[i] == null ) { a[i] = p; break; } else { p = pcacheMergeDirtyList( a[i], p ); a[i] = null; } } if ( NEVER( i == N_SORT_BUCKET - 1 ) ) { /* To get here, there need to be 2^(N_SORT_BUCKET) elements in ** the input list. But that is impossible. */ a[i] = pcacheMergeDirtyList( a[i], p ); } } p = a[0]; for ( i = 1; i < N_SORT_BUCKET; i++ ) { p = pcacheMergeDirtyList( p, a[i] ); } return p; }
/* ** Return the number of references to the page supplied as an argument. */ static int sqlite3PcachePageRefcount( PgHdr p ) { return p.nRef; }
/* ** Drop a page from the cache. There must be exactly one reference to the ** page. This function deletes that reference, so after it returns the ** page pointed to by p is invalid. */ static void sqlite3PcacheDrop( PgHdr p ) { PCache pCache; Debug.Assert( p.nRef == 1 ); if ( ( p.flags & PGHDR_DIRTY ) != 0 ) { pcacheRemoveFromDirtyList( p ); } pCache = p.pCache; pCache.nRef--; if ( p.pgno == 1 ) { pCache.pPage1 = null; } sqlite3GlobalConfig.pcache.xUnpin( pCache.pCache, p, true ); }
/* ** Make sure the page is marked as clean. If it isn't clean already, ** make it so. */ static void sqlite3PcacheMakeClean( PgHdr p ) { if ( ( p.flags & PGHDR_DIRTY ) != 0 ) { pcacheRemoveFromDirtyList( p ); p.flags &= ~( PGHDR_DIRTY | PGHDR_NEED_SYNC ); if ( p.nRef == 0 ) { pcacheUnpin( p ); } } }
static void sqlite3PageFree(ref PgHdr p) { pcache1Free(ref p); }
/* ** Decrement the reference count on a page. If the page is clean and the ** reference count drops to 0, then it is made elible for recycling. */ static void sqlite3PcacheRelease( PgHdr p ) { Debug.Assert( p.nRef > 0 ); p.nRef--; if ( p.nRef == 0 ) { PCache pCache = p.pCache; pCache.nRef--; if ( ( p.flags & PGHDR_DIRTY ) == 0 ) { pcacheUnpin( p ); } else { /* Move the page to the head of the dirty list. */ pcacheRemoveFromDirtyList( p ); pcacheAddToDirtyList( p ); } } }
/* ** Free an allocated buffer obtained from pcache1Alloc(). */ static void pcache1Free( ref PgHdr p ) { Debug.Assert( sqlite3_mutex_held( pcache1.mutex ) ); if ( p == null ) return; if ( p.CacheAllocated ) //if ( p >= pcache1.pStart && p < pcache1.pEnd ) { PgFreeslot pSlot = new PgFreeslot(); sqlite3StatusAdd( SQLITE_STATUS_PAGECACHE_USED, -1 ); pSlot._PgHdr = p;// (PgFreeslot)p; pSlot.pNext = pcache1.pFree; pcache1.pFree = pSlot; } else { int iSize = sqlite3MallocSize(p.pData); sqlite3StatusAdd( SQLITE_STATUS_PAGECACHE_OVERFLOW, -iSize ); sqlite3_free(ref p.pData); p = null; } }
/* ** Malloc function used within this file to allocate space from the buffer ** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no ** such buffer exists or there is no space left in it, this function falls ** back to sqlite3Malloc(). */ static PgHdr pcache1Alloc( int nByte ) { PgHdr p; Debug.Assert( sqlite3_mutex_held( pcache1.mutex ) ); if ( nByte <= pcache1.szSlot && pcache1.pFree != null ) { Debug.Assert( pcache1.isInit != 0 ); p = pcache1.pFree._PgHdr; p.CacheAllocated = true; pcache1.pFree = pcache1.pFree.pNext; sqlite3StatusSet( SQLITE_STATUS_PAGECACHE_SIZE, nByte ); sqlite3StatusAdd( SQLITE_STATUS_PAGECACHE_USED, 1 ); } else { /* Allocate a new buffer using sqlite3Malloc. Before doing so, exit the ** global pcache mutex and unlock the pager-cache object pCache. This is ** so that if the attempt to allocate a new buffer causes the the ** configured soft-heap-limit to be breached, it will be possible to ** reclaim memory from this pager-cache. */ pcache1LeaveMutex(); p = new PgHdr();// p = sqlite3Malloc(nByte); p.CacheAllocated = false; pcache1EnterMutex(); // if( p !=null){ int sz = nByte;//int sz = sqlite3MallocSize(p); sqlite3StatusAdd( SQLITE_STATUS_PAGECACHE_OVERFLOW, sz ); } return p; }
//#define PAGE_TO_PGHDR1(c, p) (PgHdr1*)(((char*)p) + c->szPage) static PgHdr1 PAGE_TO_PGHDR1( PCache1 c, PgHdr p ) { return p.pPgHdr1; }
/* ** Implementation of the sqlite3_pcache.xRekey method. */ static void pcache1Rekey( sqlite3_pcache p, PgHdr pPg, u32 iOld, u32 iNew ) { PCache1 pCache = p; PgHdr1 pPage = PAGE_TO_PGHDR1( pCache, pPg ); PgHdr1 pp; u32 h; Debug.Assert( pPage.iKey == iOld ); Debug.Assert( pPage.pCache == pCache ); pcache1EnterMutex(); h = iOld % pCache.nHash; pp = pCache.apHash[h]; while ( pp != pPage ) { pp = pp.pNext; } if ( pp == pCache.apHash[h] ) pCache.apHash[h] = pp.pNext; else pp.pNext = pPage.pNext; h = iNew % pCache.nHash; pPage.iKey = iNew; pPage.pNext = pCache.apHash[h]; pCache.apHash[h] = pPage; /* The xRekey() interface is only used to move pages earlier in the ** database file (in order to move all free pages to the end of the ** file where they can be truncated off.) Hence, it is not possible ** for the new page number to be greater than the largest previously ** fetched page. But we retain the following test in case xRekey() ** begins to be used in different ways in the future. */ if ( NEVER( iNew > pCache.iMaxKey ) ) { pCache.iMaxKey = iNew; } pcache1LeaveMutex(); }
/* ** Return the number of references to the page supplied as an argument. */ static int sqlite3PcachePageRefcount(PgHdr p) { return(p.nRef); }
//# define sqlite3WalFrames(u,v,w,x,y,z) 0 static int sqlite3WalFrames(Wal u, int v, PgHdr w, Pgno x, int y, int z) { return(0); }
/* ** Implementation of the sqlite3_pcache.xUnpin method. ** ** Mark a page as unpinned (eligible for asynchronous recycling). */ static void pcache1Unpin( sqlite3_pcache p, PgHdr pPg, int reuseUnlikely ) { PCache1 pCache = (PCache1)p; PgHdr1 pPage = PAGE_TO_PGHDR1( pCache, pPg ); Debug.Assert( pPage.pCache == pCache ); pcache1EnterMutex(); /* It is an error to call this function if the page is already ** part of the global LRU list. */ Debug.Assert( pPage.pLruPrev == null && pPage.pLruNext == null ); Debug.Assert( pcache1.pLruHead != pPage && pcache1.pLruTail != pPage ); if ( reuseUnlikely != 0 || pcache1.nCurrentPage > pcache1.nMaxPage ) { pcache1RemoveFromHash( pPage ); pcache1FreePage( ref pPage ); } else { /* Add the page to the global LRU list. Normally, the page is added to ** the head of the list (last page to be recycled). However, if the ** reuseUnlikely flag passed to this function is true, the page is added ** to the tail of the list (first page to be recycled). */ if ( pcache1.pLruHead != null ) { pcache1.pLruHead.pLruPrev = pPage; pPage.pLruNext = pcache1.pLruHead; pcache1.pLruHead = pPage; } else { pcache1.pLruTail = pPage; pcache1.pLruHead = pPage; } pCache.nRecyclable++; } pcache1LeaveMutex(); }
/* ** Increase the reference count of a supplied page by 1. */ static void sqlite3PcacheRef( PgHdr p ) { Debug.Assert( p.nRef > 0 ); p.nRef++; }
private static PgHdr1 PAGE_TO_PGHDR1(PCache1 c, PgHdr p) { return((PgHdr1)p.PgHdr1); }
/* ** Make sure the page is marked as dirty. If it isn't dirty already, ** make it so. */ static void sqlite3PcacheMakeDirty( PgHdr p ) { p.flags &= ~PGHDR_DONT_WRITE; Debug.Assert( p.nRef > 0 ); if ( 0 == ( p.flags & PGHDR_DIRTY ) ) { p.flags |= PGHDR_DIRTY; pcacheAddToDirtyList( p ); } }
/* ** Read an entry from the pointer map. ** ** This routine retrieves the pointer map entry for page 'key', writing ** the type and parent page number to pEType and pPgno respectively. ** An error code is returned if something goes wrong, otherwise SQLITE_OK. */ static int ptrmapGet( BtShared pBt, Pgno key, ref u8 pEType, ref Pgno pPgno ) { PgHdr pDbPage = new PgHdr();/* The pointer map page */ int iPtrmap; /* Pointer map page index */ u8[] pPtrmap; /* Pointer map page data */ int offset; /* Offset of entry in pointer map */ int rc; Debug.Assert( sqlite3_mutex_held( pBt.mutex ) ); iPtrmap = (int)PTRMAP_PAGENO( pBt, key ); rc = sqlite3PagerGet( pBt.pPager, (u32)iPtrmap, ref pDbPage ); if ( rc != 0 ) { return rc; } pPtrmap = sqlite3PagerGetData( pDbPage ); offset = (int)PTRMAP_PTROFFSET( (u32)iPtrmap, key ); if ( offset < 0 ) { sqlite3PagerUnref( pDbPage ); return SQLITE_CORRUPT_BKPT(); } Debug.Assert( offset <= (int)pBt.usableSize - 5 ); // Under C# pEType will always exist. No need to test; // //Debug.Assert( pEType != 0 ); pEType = pPtrmap[offset]; // Under C# pPgno will always exist. No need to test; // //if ( pPgno != 0 ) pPgno = sqlite3Get4byte( pPtrmap, offset + 1 ); sqlite3PagerUnref( pDbPage ); if ( pEType < 1 || pEType > 5 ) return SQLITE_CORRUPT_BKPT(); return SQLITE_OK; }
/* ** Change the page number of page p to newPgno. */ static void sqlite3PcacheMove( PgHdr p, Pgno newPgno ) { PCache pCache = p.pCache; Debug.Assert( p.nRef > 0 ); Debug.Assert( newPgno > 0 ); sqlite3GlobalConfig.pcache.xRekey( pCache.pCache, p, p.pgno, newPgno ); p.pgno = newPgno; if ( ( p.flags & PGHDR_DIRTY ) != 0 && ( p.flags & PGHDR_NEED_SYNC ) != 0 ) { pcacheRemoveFromDirtyList( p ); pcacheAddToDirtyList( p ); } }
//# define sqlite3WalFrames(u,v,w,x,y,z) 0 static int sqlite3WalFrames(Wal u, int v, PgHdr w, Pgno x, int y, int z) { return 0; }
/* ** Write an entry into the pointer map. ** ** This routine updates the pointer map entry for page number 'key' ** so that it maps to type 'eType' and parent page number 'pgno'. ** ** If pRC is initially non-zero (non-SQLITE_OK) then this routine is ** a no-op. If an error occurs, the appropriate error code is written ** into pRC. */ static void ptrmapPut( BtShared pBt, Pgno key, u8 eType, Pgno parent, ref int pRC ) { PgHdr pDbPage = new PgHdr(); /* The pointer map page */ u8[] pPtrmap; /* The pointer map data */ Pgno iPtrmap; /* The pointer map page number */ int offset; /* Offset in pointer map page */ int rc; /* Return code from subfunctions */ if ( pRC != 0 ) return; Debug.Assert( sqlite3_mutex_held( pBt.mutex ) ); /* The master-journal page number must never be used as a pointer map page */ Debug.Assert( false == PTRMAP_ISPAGE( pBt, PENDING_BYTE_PAGE( pBt ) ) ); Debug.Assert( pBt.autoVacuum ); if ( key == 0 ) { pRC = SQLITE_CORRUPT_BKPT(); return; } iPtrmap = PTRMAP_PAGENO( pBt, key ); rc = sqlite3PagerGet( pBt.pPager, iPtrmap, ref pDbPage ); if ( rc != SQLITE_OK ) { pRC = rc; return; } offset = (int)PTRMAP_PTROFFSET( iPtrmap, key ); if ( offset < 0 ) { pRC = SQLITE_CORRUPT_BKPT(); goto ptrmap_exit; } Debug.Assert( offset <= (int)pBt.usableSize - 5 ); pPtrmap = sqlite3PagerGetData( pDbPage ); if ( eType != pPtrmap[offset] || sqlite3Get4byte( pPtrmap, offset + 1 ) != parent ) { TRACE( "PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent ); pRC = rc = sqlite3PagerWrite( pDbPage ); if ( rc == SQLITE_OK ) { pPtrmap[offset] = eType; sqlite3Put4byte( pPtrmap, offset + 1, parent ); } } ptrmap_exit: sqlite3PagerUnref( pDbPage ); }
/* ** Copy nPage pages from the source b-tree to the destination. */ static public int sqlite3_backup_step(sqlite3_backup p, int nPage) { int rc; int destMode; /* Destination journal mode */ int pgszSrc = 0; /* Source page size */ int pgszDest = 0; /* Destination page size */ sqlite3_mutex_enter(p.pSrcDb.mutex); sqlite3BtreeEnter(p.pSrc); if (p.pDestDb != null) { sqlite3_mutex_enter(p.pDestDb.mutex); } rc = p.rc; if (!isFatalError(rc)) { Pager pSrcPager = sqlite3BtreePager(p.pSrc); /* Source pager */ Pager pDestPager = sqlite3BtreePager(p.pDest); /* Dest pager */ int ii; /* Iterator variable */ Pgno nSrcPage = 0; /* Size of source db in pages */ int bCloseTrans = 0; /* True if src db requires unlocking */ /* If the source pager is currently in a write-transaction, return ** SQLITE_BUSY immediately. */ if (p.pDestDb != null && p.pSrc.pBt.inTransaction == TRANS_WRITE) { rc = SQLITE_BUSY; } else { rc = SQLITE_OK; } /* Lock the destination database, if it is not locked already. */ if (SQLITE_OK == rc && p.bDestLocked == 0 && SQLITE_OK == (rc = sqlite3BtreeBeginTrans(p.pDest, 2)) ) { p.bDestLocked = 1; sqlite3BtreeGetMeta(p.pDest, BTREE_SCHEMA_VERSION, ref p.iDestSchema); } /* If there is no open read-transaction on the source database, open ** one now. If a transaction is opened here, then it will be closed ** before this function exits. */ if (rc == SQLITE_OK && !sqlite3BtreeIsInReadTrans(p.pSrc)) { rc = sqlite3BtreeBeginTrans(p.pSrc, 0); bCloseTrans = 1; } /* Do not allow backup if the destination database is in WAL mode ** and the page sizes are different between source and destination */ pgszSrc = sqlite3BtreeGetPageSize(p.pSrc); pgszDest = sqlite3BtreeGetPageSize(p.pDest); destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p.pDest)); if (SQLITE_OK == rc && destMode == PAGER_JOURNALMODE_WAL && pgszSrc != pgszDest) { rc = SQLITE_READONLY; } /* Now that there is a read-lock on the source database, query the ** source pager for the number of pages in the database. */ nSrcPage = sqlite3BtreeLastPage(p.pSrc); Debug.Assert(nSrcPage >= 0); for (ii = 0; (nPage < 0 || ii < nPage) && p.iNext <= nSrcPage && 0 == rc; ii++) { Pgno iSrcPg = p.iNext; /* Source page number */ if (iSrcPg != PENDING_BYTE_PAGE(p.pSrc.pBt)) { DbPage pSrcPg = null; /* Source page object */ rc = sqlite3PagerGet(pSrcPager, (u32)iSrcPg, ref pSrcPg); if (rc == SQLITE_OK) { rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg)); sqlite3PagerUnref(pSrcPg); } } p.iNext++; } if (rc == SQLITE_OK) { p.nPagecount = nSrcPage; p.nRemaining = (nSrcPage + 1 - p.iNext); if (p.iNext > nSrcPage) { rc = SQLITE_DONE; } else if (0 == p.isAttached) { attachBackupObject(p); } } /* Update the schema version field in the destination database. This ** is to make sure that the schema-version really does change in ** the case where the source and destination databases have the ** same schema version. */ if (rc == SQLITE_DONE && (rc = sqlite3BtreeUpdateMeta(p.pDest, 1, p.iDestSchema + 1)) == SQLITE_OK ) { Pgno nDestTruncate; if (p.pDestDb != null) { sqlite3ResetInternalSchema(p.pDestDb, -1); } /* Set nDestTruncate to the final number of pages in the destination ** database. The complication here is that the destination page ** size may be different to the source page size. ** ** If the source page size is smaller than the destination page size, ** round up. In this case the call to sqlite3OsTruncate() below will ** fix the size of the file. However it is important to call ** sqlite3PagerTruncateImage() here so that any pages in the ** destination file that lie beyond the nDestTruncate page mark are ** journalled by PagerCommitPhaseOne() before they are destroyed ** by the file truncation. */ Debug.Assert(pgszSrc == sqlite3BtreeGetPageSize(p.pSrc)); Debug.Assert(pgszDest == sqlite3BtreeGetPageSize(p.pDest)); if (pgszSrc < pgszDest) { int ratio = pgszDest / pgszSrc; nDestTruncate = (Pgno)((nSrcPage + ratio - 1) / ratio); if (nDestTruncate == (int)PENDING_BYTE_PAGE(p.pDest.pBt)) { nDestTruncate--; } } else { nDestTruncate = (Pgno)(nSrcPage * (pgszSrc / pgszDest)); } sqlite3PagerTruncateImage(pDestPager, nDestTruncate); if (pgszSrc < pgszDest) { /* If the source page-size is smaller than the destination page-size, ** two extra things may need to happen: ** ** * The destination may need to be truncated, and ** ** * Data stored on the pages immediately following the ** pending-byte page in the source database may need to be ** copied into the destination database. */ int iSize = (int)(pgszSrc * nSrcPage); sqlite3_file pFile = sqlite3PagerFile(pDestPager); i64 iOff; i64 iEnd; Debug.Assert(pFile != null); Debug.Assert((i64)nDestTruncate * (i64)pgszDest >= iSize || ( nDestTruncate == (int)(PENDING_BYTE_PAGE(p.pDest.pBt) - 1) && iSize >= PENDING_BYTE && iSize <= PENDING_BYTE + pgszDest )); /* This call ensures that all data required to recreate the original ** database has been stored in the journal for pDestPager and the ** journal synced to disk. So at this point we may safely modify ** the database file in any way, knowing that if a power failure ** occurs, the original database will be reconstructed from the ** journal file. */ rc = sqlite3PagerCommitPhaseOne(pDestPager, null, true); /* Write the extra pages and truncate the database file as required. */ iEnd = MIN(PENDING_BYTE + pgszDest, iSize); for ( iOff = PENDING_BYTE + pgszSrc; rc == SQLITE_OK && iOff < iEnd; iOff += pgszSrc ) { PgHdr pSrcPg = null; u32 iSrcPg = (u32)((iOff / pgszSrc) + 1); rc = sqlite3PagerGet(pSrcPager, iSrcPg, ref pSrcPg); if (rc == SQLITE_OK) { byte[] zData = sqlite3PagerGetData(pSrcPg); rc = sqlite3OsWrite(pFile, zData, pgszSrc, iOff); } sqlite3PagerUnref(pSrcPg); } if (rc == SQLITE_OK) { rc = backupTruncateFile(pFile, (int)iSize); } /* Sync the database file to disk. */ if (rc == SQLITE_OK) { rc = sqlite3PagerSync(pDestPager); } } else { rc = sqlite3PagerCommitPhaseOne(pDestPager, null, false); } /* Finish committing the transaction to the destination database. */ if (SQLITE_OK == rc && SQLITE_OK == (rc = sqlite3BtreeCommitPhaseTwo(p.pDest, 0)) ) { rc = SQLITE_DONE; } } /* If bCloseTrans is true, then this function opened a read transaction ** on the source database. Close the read transaction here. There is ** no need to check the return values of the btree methods here, as ** "committing" a read-only transaction cannot fail. */ if (bCloseTrans != 0) { #if !NDEBUG || SQLITE_COVERAGE_TEST //TESTONLY( int rc2 ); //TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p.pSrc, 0); //TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p.pSrc); int rc2; rc2 = sqlite3BtreeCommitPhaseOne(p.pSrc, string.Empty); rc2 |= sqlite3BtreeCommitPhaseTwo(p.pSrc, 0); Debug.Assert(rc2 == SQLITE_OK); #else sqlite3BtreeCommitPhaseOne(p.pSrc, null); sqlite3BtreeCommitPhaseTwo(p.pSrc, 0); #endif } if (rc == SQLITE_IOERR_NOMEM) { rc = SQLITE_NOMEM; } p.rc = rc; } if (p.pDestDb != null) { sqlite3_mutex_leave(p.pDestDb.mutex); } sqlite3BtreeLeave(p.pSrc); sqlite3_mutex_leave(p.pSrcDb.mutex); return(rc); }
static void sqlite3PageFree(ref PgHdr p) { pcache1EnterMutex(); pcache1Free(ref p); pcache1LeaveMutex(); }
public void xRekey(PgHdr p2, Pgno oldKey, Pgno newKey) { var pPage = PAGE_TO_PGHDR1(this, p2); Debug.Assert(pPage.iKey == oldKey); Debug.Assert(pPage.pCache == this); pcache1EnterMutex(pGroup); var h = (int)(oldKey % nHash); var pp = apHash[h]; while (pp != pPage) pp = pp.pNext; if (pp == apHash[h]) apHash[h] = pp.pNext; else pp.pNext = pPage.pNext; h = (int)(newKey % nHash); pPage.iKey = newKey; pPage.pNext = apHash[h]; apHash[h] = pPage; if (newKey > iMaxKey) iMaxKey = newKey; pcache1LeaveMutex(pGroup); }
/* sqlite3_rekey ** Given a database, this will reencrypt the database using a new key. ** There are two possible modes of operation. The first is rekeying ** an existing database that was not previously encrypted. The second ** is to change the key on an existing database. ** ** The proposed logic for this function follows: ** 1. Determine if there is already a key present ** 2. If there is NOT already a key present, create one and attach a codec (key would be null) ** 3. Initialize a ctx.rekey parameter of the codec ** ** Note: this will require modifications to the sqlite3Codec to support rekey ** */ public static int sqlite3_rekey(sqlite3 db, string pKey, int nKey) { CODEC_TRACE("sqlite3_rekey: entered db=%d pKey=%s, nKey=%d\n", db, pKey, nKey); //activate_openssl(); if (db != null && pKey != null) { Db pDb = db.aDb[0]; CODEC_TRACE("sqlite3_rekey: database pDb=%d\n", pDb); if (pDb.pBt != null) { codec_ctx ctx = null; int rc; Pgno page_count = 0; Pgno pgno; PgHdr page = null; Pager pPager = pDb.pBt.pBt.pPager; sqlite3pager_get_codec(pDb.pBt.pBt.pPager, ref ctx); if (ctx == null) { CODEC_TRACE("sqlite3_rekey: no codec attached to db, attaching now\n"); /* there was no codec attached to this database,so attach one now with a null password */ sqlite3CodecAttach(db, 0, pKey, nKey); sqlite3pager_get_codec(pDb.pBt.pBt.pPager, ref ctx); /* prepare this setup as if it had already been initialized */ Buffer.BlockCopy(Encoding.UTF8.GetBytes(SQLITE_FILE_HEADER), 0, ctx.read_ctx.iv, 0, FILE_HEADER_SZ); ctx.read_ctx.key_sz = ctx.read_ctx.iv_sz = ctx.read_ctx.pass_sz = 0; } //if ( ctx.read_ctx.iv_sz != ctx.write_ctx.iv_sz ) //{ // string error = ""; // CODEC_TRACE( "sqlite3_rekey: updating page size for iv_sz change from %d to %d\n", ctx.read_ctx.iv_sz, ctx.write_ctx.iv_sz ); // db.nextPagesize = sqlite3BtreeGetPageSize( pDb.pBt ); // pDb.pBt.pBt.pageSizeFixed = false; /* required for sqlite3BtreeSetPageSize to modify pagesize setting */ // sqlite3BtreeSetPageSize( pDb.pBt, db.nextPagesize, MAX_IV_LENGTH, 0 ); // sqlite3RunVacuum( ref error, db ); //} codec_set_pass_key(db, 0, pKey, nKey, 1); ctx.mode_rekey = 1; /* do stuff here to rewrite the database ** 1. Create a transaction on the database ** 2. Iterate through each page, reading it and then writing it. ** 3. If that goes ok then commit and put ctx.rekey into ctx.key ** note: don't deallocate rekey since it may be used in a subsequent iteration */ rc = sqlite3BtreeBeginTrans(pDb.pBt, 1); /* begin write transaction */ sqlite3PagerPagecount(pPager, out page_count); for (pgno = 1; rc == SQLITE_OK && pgno <= page_count; pgno++) { /* pgno's start at 1 see pager.c:pagerAcquire */ if (0 == sqlite3pager_is_mj_pgno(pPager, pgno)) { /* skip this page (see pager.c:pagerAcquire for reasoning) */ rc = sqlite3PagerGet(pPager, pgno, ref page); if (rc == SQLITE_OK) { /* write page see pager_incr_changecounter for example */ rc = sqlite3PagerWrite(page); //printf("sqlite3PagerWrite(%d)\n", pgno); if (rc == SQLITE_OK) { sqlite3PagerUnref(page); } } } } /* if commit was successful commit and copy the rekey data to current key, else rollback to release locks */ if (rc == SQLITE_OK) { CODEC_TRACE("sqlite3_rekey: committing\n"); db.nextPagesize = sqlite3BtreeGetPageSize(pDb.pBt); rc = sqlite3BtreeCommit(pDb.pBt); if (ctx != null) { cipher_ctx_copy(ctx.read_ctx, ctx.write_ctx); } } else { CODEC_TRACE("sqlite3_rekey: rollback\n"); sqlite3BtreeRollback(pDb.pBt); } ctx.mode_rekey = 0; } return(SQLITE_OK); } return(SQLITE_ERROR); }
public void xUnpin(PgHdr p2, bool discard) { var pPage = PAGE_TO_PGHDR1(this, p2); Debug.Assert(pPage.pCache == this); var pGroup = this.pGroup; pcache1EnterMutex(pGroup); // It is an error to call this function if the page is already part of the PGroup LRU list. Debug.Assert(pPage.pLruPrev == null && pPage.pLruNext == null); Debug.Assert(pGroup.pLruHead != pPage && pGroup.pLruTail != pPage); if (discard || pGroup.nCurrentPage > pGroup.nMaxPage) { pcache1RemoveFromHash(pPage); pcache1FreePage(ref pPage); } else { // Add the page to the PGroup LRU list. if (pGroup.pLruHead != null) { pGroup.pLruHead.pLruPrev = pPage; pPage.pLruNext = pGroup.pLruHead; pGroup.pLruHead = pPage; } else { pGroup.pLruTail = pPage; pGroup.pLruHead = pPage; } nRecyclable++; } pcache1LeaveMutex(pGroup); }
//#define PAGE_TO_PGHDR1(c, p) (PgHdr1)(((char)p) + c.szPage) static PgHdr1 PAGE_TO_PGHDR1(PCache1 c, PgHdr p) { return(p.pPgHdr1); }
/* ** Remove page pPage from the list of dirty pages. */ static void pcacheRemoveFromDirtyList( PgHdr pPage ) { PCache p = pPage.pCache; Debug.Assert( pPage.pDirtyNext != null || pPage == p.pDirtyTail ); Debug.Assert( pPage.pDirtyPrev != null || pPage == p.pDirty ); /* Update the PCache1.pSynced variable if necessary. */ if ( p.pSynced == pPage ) { PgHdr pSynced = pPage.pDirtyPrev; while ( pSynced != null && ( pSynced.flags & PGHDR_NEED_SYNC ) != 0 ) { pSynced = pSynced.pDirtyPrev; } p.pSynced = pSynced; } if ( pPage.pDirtyNext != null ) { pPage.pDirtyNext.pDirtyPrev = pPage.pDirtyPrev; } else { Debug.Assert( pPage == p.pDirtyTail ); p.pDirtyTail = pPage.pDirtyPrev; } if ( pPage.pDirtyPrev != null ) { pPage.pDirtyPrev.pDirtyNext = pPage.pDirtyNext; } else { Debug.Assert( pPage == p.pDirty ); p.pDirty = pPage.pDirtyNext; } pPage.pDirtyNext = null; pPage.pDirtyPrev = null; #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert( pcacheCheckSynced(p) ); #endif }
/* ** Wrapper around the pluggable caches xUnpin method. If the cache is ** being used for an in-memory database, this function is a no-op. */ static void pcacheUnpin( PgHdr p ) { PCache pCache = p.pCache; if ( pCache.bPurgeable ) { if ( p.pgno == 1 ) { pCache.pPage1 = null; } sqlite3GlobalConfig.pcache.xUnpin( pCache.pCache, p, false ); } }
/* ** Try to obtain a page from the cache. */ static int sqlite3PcacheFetch( PCache pCache, /* Obtain the page from this cache */ u32 pgno, /* Page number to obtain */ int createFlag, /* If true, create page if it does not exist already */ ref PgHdr ppPage /* Write the page here */ ) { PgHdr pPage = null; int eCreate; Debug.Assert( pCache != null ); Debug.Assert( createFlag == 1 || createFlag == 0 ); Debug.Assert( pgno > 0 ); /* If the pluggable cache (sqlite3_pcache*) has not been allocated, ** allocate it now. */ if ( null == pCache.pCache && createFlag != 0 ) { sqlite3_pcache p; int nByte; nByte = pCache.szPage + pCache.szExtra + 0;// sizeof( PgHdr ); p = sqlite3GlobalConfig.pcache.xCreate( nByte, pCache.bPurgeable ); //if ( null == p ) //{ // return SQLITE_NOMEM; //} sqlite3GlobalConfig.pcache.xCachesize( p, pCache.nMax ); pCache.pCache = p; } eCreate = createFlag * ( 1 + ( ( !pCache.bPurgeable || null == pCache.pDirty ) ? 1 : 0 ) ); if ( pCache.pCache != null ) { pPage = sqlite3GlobalConfig.pcache.xFetch( pCache.pCache, pgno, eCreate ); } if ( null == pPage && eCreate == 1 ) { PgHdr pPg; /* Find a dirty page to write-out and recycle. First try to find a ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC ** cleared), but if that is not possible settle for any other ** unreferenced dirty page. */ #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert( pcacheCheckSynced(pCache) ); #endif for ( pPg = pCache.pSynced; pPg != null && ( pPg.nRef != 0 || ( pPg.flags & PGHDR_NEED_SYNC ) != 0 ); pPg = pPg.pDirtyPrev ) ; pCache.pSynced = pPg; if ( null == pPg ) { for ( pPg = pCache.pDirtyTail; pPg != null && pPg.nRef != 0; pPg = pPg.pDirtyPrev ) ; } if ( pPg != null ) { int rc; #if SQLITE_LOG_CACHE_SPILL sqlite3_log(SQLITE_FULL, "spill page %d making room for %d - cache used: %d/%d", pPg->pgno, pgno, sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), pCache->nMax); #endif rc = pCache.xStress( pCache.pStress, pPg ); if ( rc != SQLITE_OK && rc != SQLITE_BUSY ) { return rc; } } pPage = sqlite3GlobalConfig.pcache.xFetch( pCache.pCache, pgno, 2 ); } if ( pPage != null ) { if ( null == pPage.pData ) { // memset(pPage, 0, sizeof(PgHdr)); pPage.pData = sqlite3Malloc( pCache.szPage );// pPage->pData = (void*)&pPage[1]; //pPage->pExtra = (void*)&((char*)pPage->pData)[pCache->szPage]; //memset(pPage->pExtra, 0, pCache->szExtra); pPage.pCache = pCache; pPage.pgno = pgno; } Debug.Assert( pPage.pCache == pCache ); Debug.Assert( pPage.pgno == pgno ); //assert(pPage->pData == (void*)&pPage[1]); //assert(pPage->pExtra == (void*)&((char*)&pPage[1])[pCache->szPage]); if ( 0 == pPage.nRef ) { pCache.nRef++; } pPage.nRef++; if ( pgno == 1 ) { pCache.pPage1 = pPage; } } ppPage = pPage; return ( pPage == null && eCreate != 0 ) ? SQLITE_NOMEM : SQLITE_OK; }
/* ** Implementation of the sqlite3_pcache.xRekey method. */ static void pcache1Rekey( sqlite3_pcache p, PgHdr pPg, u32 iOld, u32 iNew ) { PCache1 pCache = p; PgHdr1 pPage = PAGE_TO_PGHDR1( pCache, pPg ); PgHdr1 pp; u32 h; Debug.Assert( pPage.iKey == iOld ); Debug.Assert( pPage.pCache == pCache ); pcache1EnterMutex(); h = iOld % pCache.nHash; pp = pCache.apHash[h]; while ( pp != pPage ) { pp = pp.pNext; } if ( pp == pCache.apHash[h] ) pCache.apHash[h] = pp.pNext; else pp.pNext = pPage.pNext; h = iNew % pCache.nHash; pPage.iKey = iNew; pPage.pNext = pCache.apHash[h]; pCache.apHash[h] = pPage; if ( iNew > pCache.iMaxKey ) { pCache.iMaxKey = iNew; } pcache1LeaveMutex(); }
/* ** Try to obtain a page from the cache. */ static int sqlite3PcacheFetch( PCache pCache, /* Obtain the page from this cache */ u32 pgno, /* Page number to obtain */ int createFlag, /* If true, create page if it does not exist already */ ref PgHdr ppPage /* Write the page here */ ) { PgHdr pPage = null; int eCreate; Debug.Assert(pCache != null); Debug.Assert(createFlag == 1 || createFlag == 0); Debug.Assert(pgno > 0); /* If the pluggable cache (sqlite3_pcache*) has not been allocated, ** allocate it now. */ if (null == pCache.pCache && createFlag != 0) { sqlite3_pcache p; int nByte; nByte = pCache.szPage + pCache.szExtra + 0;// sizeof( PgHdr ); p = sqlite3GlobalConfig.pcache.xCreate(nByte, pCache.bPurgeable); if (null == p) { return(SQLITE_NOMEM); } sqlite3GlobalConfig.pcache.xCachesize(p, pCache.nMax); pCache.pCache = p; } eCreate = createFlag * (1 + ((!pCache.bPurgeable || null == pCache.pDirty) ? 1 : 0)); if (pCache.pCache != null) { pPage = sqlite3GlobalConfig.pcache.xFetch(pCache.pCache, pgno, eCreate); } if (null == pPage && eCreate == 1) { PgHdr pPg; /* Find a dirty page to write-out and recycle. First try to find a ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC ** cleared), but if that is not possible settle for any other ** unreferenced dirty page. */ #if SQLITE_ENABLE_EXPENSIVE_ASSERT expensive_assert(pcacheCheckSynced(pCache)); #endif for (pPg = pCache.pSynced; pPg != null && (pPg.nRef != 0 || (pPg.flags & PGHDR_NEED_SYNC) != 0); pPg = pPg.pDirtyPrev ) { ; } pCache.pSynced = pPg; if (null == pPg) { for (pPg = pCache.pDirtyTail; pPg != null && pPg.nRef != 0; pPg = pPg.pDirtyPrev) { ; } } if (pPg != null) { int rc; rc = pCache.xStress(pCache.pStress, pPg); if (rc != SQLITE_OK && rc != SQLITE_BUSY) { return(rc); } } pPage = sqlite3GlobalConfig.pcache.xFetch(pCache.pCache, pgno, 2); } if (pPage != null) { if (null == pPage.pData) { // memset(pPage, 0, sizeof(PgHdr)); pPage.pData = sqlite3Malloc(pCache.szPage);// pPage->pData = (void*)&pPage[1]; //pPage->pExtra = (void*)&((char*)pPage->pData)[pCache->szPage]; //memset(pPage->pExtra, 0, pCache->szExtra); pPage.pCache = pCache; pPage.pgno = pgno; } Debug.Assert(pPage.pCache == pCache); Debug.Assert(pPage.pgno == pgno); //assert(pPage->pData == (void*)&pPage[1]); //assert(pPage->pExtra == (void*)&((char*)&pPage[1])[pCache->szPage]); if (0 == pPage.nRef) { pCache.nRef++; } pPage.nRef++; if (pgno == 1) { pCache.pPage1 = pPage; } } ppPage = pPage; return((pPage == null && eCreate != 0) ? SQLITE_NOMEM : SQLITE_OK); }