public static void MakeClean(PgHdr p) { if ((p.Flags & PgHdr.PGHDR.DIRTY) != 0) { RemoveFromDirtyList(p); p.Flags &= ~(PgHdr.PGHDR.DIRTY | PgHdr.PGHDR.NEED_SYNC); if (p.Refs == 0) Unpin(p); } }
public static void MakeDirty(PgHdr p) { p.Flags &= ~PgHdr.PGHDR.DONT_WRITE; Debug.Assert(p.Refs > 0); if ((p.Flags & PgHdr.PGHDR.DIRTY) == 0) { p.Flags |= PgHdr.PGHDR.DIRTY; AddToDirtyList(p); } }
public static void Drop(PgHdr p) { Debug.Assert(p.Refs == 1); if ((p.Flags & PgHdr.PGHDR.DIRTY) != 0) RemoveFromDirtyList(p); var cache = p.Cache; cache.Refs--; if (p.ID == 1) cache.Page1 = null; cache.Cache.Unpin(p.Page, true); }
public static void Move(PgHdr p, Pid newID) { PCache cache = p.Cache; Debug.Assert(p.Refs > 0); Debug.Assert(newID > 0); cache.Cache.Rekey(p.Page, p.ID, newID); p.ID = newID; if ((p.Flags & PgHdr.PGHDR.DIRTY) != 0 && (p.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) { RemoveFromDirtyList(p); AddToDirtyList(p); } }
public static void Release(PgHdr p) { Debug.Assert(p.Refs > 0); p.Refs--; if (p.Refs == 0) { var cache = p.Cache; cache.Refs--; if ((p.Flags & PgHdr.PGHDR.DIRTY) == 0) { Unpin(p); } else { // Move the page to the head of the dirty list. RemoveFromDirtyList(p); AddToDirtyList(p); } } }
private static void RemoveFromDirtyList(PgHdr page) { var p = page.Cache; Debug.Assert(page.DirtyNext != null || page == p.DirtyTail); Debug.Assert(page.DirtyPrev != null || page == p.Dirty); // Update the PCache1.pSynced variable if necessary. if (p.Synced == page) { var synced = page.DirtyPrev; while (synced != null && (synced.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) { synced = synced.DirtyPrev; } p.Synced = synced; } if (page.DirtyNext != null) { page.DirtyNext.DirtyPrev = page.DirtyPrev; } else { Debug.Assert(page == p.DirtyTail); p.DirtyTail = page.DirtyPrev; } if (page.DirtyPrev != null) { page.DirtyPrev.DirtyNext = page.DirtyNext; } else { Debug.Assert(page == p.Dirty); p.Dirty = page.DirtyNext; } page.DirtyNext = null; page.DirtyPrev = null; #if EXPENSIVE_ASSERT Debug.Assert(CheckSynced(p)); #endif }
private static PgHdr SortDirtyList(PgHdr @in) { var a = new PgHdr[N_SORT_BUCKET]; PgHdr p; int i; while (@in != null) { p = @in; @in = p.Dirty; p.Dirty = null; for (i = 0; C._ALWAYS(i < N_SORT_BUCKET - 1); i++) { if (a[i] == null) { a[i] = p; break; } else { p = MergeDirtyList(a[i], p); a[i] = null; } } if (C._NEVER(i == N_SORT_BUCKET - 1)) { // To get here, there need to be 2^(N_SORT_BUCKET) elements in the input list. But that is impossible. a[i] = MergeDirtyList(a[i], p); } } p = a[0]; for (i = 1; i < N_SORT_BUCKET; i++) { p = MergeDirtyList(p, a[i]); } return(p); }
private static void AddToDirtyList(PgHdr page) { var p = page.Cache; Debug.Assert(page.DirtyNext == null && page.DirtyPrev == null && p.Dirty != page); page.DirtyNext = p.Dirty; if (page.DirtyNext != null) { Debug.Assert(page.DirtyNext.DirtyPrev == null); page.DirtyNext.DirtyPrev = page; } p.Dirty = page; if (p.DirtyTail == null) { p.DirtyTail = page; } if (p.Synced == null && (page.Flags & PgHdr.PGHDR.NEED_SYNC) == 0) { p.Synced = page; } #if EXPENSIVE_ASSERT Debug.Assert(CheckSynced(p)); #endif }
private static void Unpin(PgHdr p) { var cache = p.Cache; if (cache.Purgeable) { if (p.ID == 1) cache.Page1 = null; cache.Cache.Unpin(p.Page, false); } }
private static PgHdr SortDirtyList(PgHdr @in) { var a = new PgHdr[N_SORT_BUCKET]; PgHdr p; int i; while (@in != null) { p = @in; @in = p.Dirty; p.Dirty = null; for (i = 0; SysEx.ALWAYS(i < N_SORT_BUCKET - 1); i++) { if (a[i] == null) { a[i] = p; break; } else { p = MergeDirtyList(a[i], p); a[i] = null; } } if (SysEx.NEVER(i == N_SORT_BUCKET - 1)) // To get here, there need to be 2^(N_SORT_BUCKET) elements in the input list. But that is impossible. a[i] = MergeDirtyList(a[i], p); } p = a[0]; for (i = 1; i < N_SORT_BUCKET; i++) p = MergeDirtyList(p, a[i]); return p; }
public static int get_PageRefs(PgHdr p) { return p.Refs; }
private static PgHdr MergeDirtyList(PgHdr a, PgHdr b) { var result = new PgHdr(); var tail = result; while (a != null && b != null) { if (a.ID < b.ID) { tail.Dirty = a; tail = a; a = a.Dirty; } else { tail.Dirty = b; tail = b; b = b.Dirty; } } if (a != null) tail.Dirty = a; else if (b != null) tail.Dirty = b; else tail.Dirty = null; return result.Dirty; }
private static void RemoveFromDirtyList(PgHdr page) { var p = page.Cache; Debug.Assert(page.DirtyNext != null || page == p.DirtyTail); Debug.Assert(page.DirtyPrev != null || page == p.Dirty); // Update the PCache1.pSynced variable if necessary. if (p.Synced == page) { var synced = page.DirtyPrev; while (synced != null && (synced.Flags & PgHdr.PGHDR.NEED_SYNC) != 0) synced = synced.DirtyPrev; p.Synced = synced; } if (page.DirtyNext != null) page.DirtyNext.DirtyPrev = page.DirtyPrev; else { Debug.Assert(page == p.DirtyTail); p.DirtyTail = page.DirtyPrev; } if (page.DirtyPrev != null) page.DirtyPrev.DirtyNext = page.DirtyNext; else { Debug.Assert(page == p.Dirty); p.Dirty = page.DirtyNext; } page.DirtyNext = null; page.DirtyPrev = null; #if EXPENSIVE_ASSERT Debug.Assert(CheckSynced(p)); #endif }
//public static implicit operator bool(PgHdr b) { return (b != null); } public void memset() { //Page = null; Data = null; Extra = null; Dirty = null; Pager = null; ID = 0; #if CHECK_PAGES PageHash = 0; #endif Flags = 0; Refs = 0; Cache = null; DirtyNext = null; DirtyPrev = null; }
private static void AddToDirtyList(PgHdr page) { var p = page.Cache; Debug.Assert(page.DirtyNext == null && page.DirtyPrev == null && p.Dirty != page); page.DirtyNext = p.Dirty; if (page.DirtyNext != null) { Debug.Assert(page.DirtyNext.DirtyPrev == null); page.DirtyNext.DirtyPrev = page; } p.Dirty = page; if (p.DirtyTail == null) p.DirtyTail = page; if (p.Synced == null && (page.Flags & PgHdr.PGHDR.NEED_SYNC) == 0) p.Synced = page; #if EXPENSIVE_ASSERT Debug.Assert(CheckSynced(p)); #endif }
internal RC Frames(int v, PgHdr w, Pid x, int y, int z) { return 0; }
public RC Fetch(Pid id, bool createFlag, out PgHdr pageOut) { Debug.Assert(id > 0); // If the pluggable cache (sqlite3_pcache*) has not been allocated, allocate it now. if (Cache == null && createFlag) { var p = _pcache.Create(SizePage, SizeExtra + 0, Purgeable); p.Cachesize(get_CacheSize()); Cache = p; } ICachePage page = null; var create = (createFlag ? 1 : 0) * (1 + ((!Purgeable || Dirty == null) ? 1 : 0)); if (Cache != null) page = Cache.Fetch(id, create > 0); if (page == null && create == 1) { // Find a dirty page to write-out and recycle. First try to find a page that does not require a journal-sync (one with PGHDR_NEED_SYNC // cleared), but if that is not possible settle for any other unreferenced dirty page. #if EXPENSIVE_ASSERT CheckSynced(this); #endif PgHdr pg; for (pg = Synced; pg != null && (pg.Refs != 0 || (pg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0); pg = pg.DirtyPrev) ; Synced = pg; if (pg == null) for (pg = DirtyTail; pg != null && pg.Refs != 0; pg = pg.DirtyPrev) ; if (pg != null) { #if LOG_CACHE_SPILL SysEx.Log(RC.FULL, "spill page %d making room for %d - cache used: %d/%d", pg.ID, id, _pcache->Pagecount(Cache), NumberOfCachePages(this)); #endif var rc = Stress(StressArg, pg); if (rc != RC.OK && rc != RC.BUSY) { pageOut = null; return rc; } } page = Cache.Fetch(id, true); } PgHdr pgHdr = null; if (page != null) { //pgHdr = page.Extra; if (page.Data == null) { //page.Page = page; page.Data = SysEx.Alloc(SizePage); //page.Extra = this; page.Cache = this; page.ID = id; } Debug.Assert(page.Cache == Cache); Debug.Assert(page.ID == id); //Debug.Assert(page.Data == page.Buffer); //Debug.Assert(page.Extra == this); if (page.Refs == 0) Refs++; page.Refs++; if (id == 1) Page1 = pgHdr; } pageOut = pgHdr; return (pgHdr == null && create != 0 ? RC.NOMEM : RC.OK); }
public PgHdr Synced; // Last synced page in dirty page list #endregion Fields #region Methods public void memset() { Dirty = DirtyTail = null; Synced = null; Refs = 0; }
static void checkList(IntegrityCk check, bool isFreeList, Pid pageID, int length, string context) { int expected = length; Pid firstID = pageID; while (length-- > 0 && check.MaxErrors != 0) { if (pageID < 1) { checkAppendMsg(check, context, "%d of %d pages missing from overflow list starting at %d", length + 1, expected, firstID); break; } if (checkRef(check, pageID, context)) break; PgHdr ovflPage = new PgHdr(); if (check.Pager.Acquire((Pid)pageID, ref ovflPage, false) != RC.OK) { checkAppendMsg(check, context, "failed to get page %d", pageID); break; } byte[] ovflData = Pager.GetData(ovflPage); if (isFreeList) { int n = (int)ConvertEx.Get4(ovflData, 4); #if !OMIT_AUTOVACUUM if (check.Bt.AutoVacuum) checkPtrmap(check, (uint)pageID, PTRMAP.FREEPAGE, 0, context); #endif if (n > (int)check.Bt.UsableSize / 4 - 2) { checkAppendMsg(check, context, "freelist leaf count too big on page %d", pageID); length--; } else { for (int i = 0; i < n; i++) { Pid freePageID = ConvertEx.Get4(ovflData, 8 + i * 4); #if !OMIT_AUTOVACUUM if (check.Bt.AutoVacuum) checkPtrmap(check, freePageID, PTRMAP.FREEPAGE, 0, context); #endif checkRef(check, freePageID, context); } length -= n; } } #if !OMIT_AUTOVACUUM else { // If this database supports auto-vacuum and iPage is not the last page in this overflow list, check that the pointer-map entry for // the following page matches iPage. if (check.Bt.AutoVacuum && length > 0) { int i = (int)ConvertEx.Get4(ovflData); checkPtrmap(check, (uint)i, PTRMAP.OVERFLOW2, (uint)pageID, context); } } #endif pageID = (Pid)ConvertEx.Get4(ovflData); Pager.Unref(ovflPage); } }
public PgHdr Page1; // Reference to page 1 public void memset() { Dirty = DirtyTail = null; Synced = null; Refs = 0; }
public static void PageFree(ref PgHdr p) { PCache1.Free(ref p); }
public static int get_PageRefs(PgHdr p) { return(p.Refs); }
public RC Fetch(Pid id, bool createFlag, out PgHdr pageOut) { Debug.Assert(id > 0); // If the pluggable cache (sqlite3_pcache*) has not been allocated, allocate it now. if (Cache == null && createFlag) { var p = _pcache.Create(SizePage, SizeExtra + 0, Purgeable); p.Cachesize(get_CacheSize()); Cache = p; } ICachePage page = null; var create = (createFlag ? 1 : 0) * (1 + ((!Purgeable || Dirty == null) ? 1 : 0)); if (Cache != null) { page = Cache.Fetch(id, create > 0); } if (page == null && create == 1) { // Find a dirty page to write-out and recycle. First try to find a page that does not require a journal-sync (one with PGHDR_NEED_SYNC // cleared), but if that is not possible settle for any other unreferenced dirty page. #if EXPENSIVE_ASSERT CheckSynced(this); #endif PgHdr pg; for (pg = Synced; pg != null && (pg.Refs != 0 || (pg.Flags & PgHdr.PGHDR.NEED_SYNC) != 0); pg = pg.DirtyPrev) { ; } Synced = pg; if (pg == null) { for (pg = DirtyTail; pg != null && pg.Refs != 0; pg = pg.DirtyPrev) { ; } } if (pg != null) { #if LOG_CACHE_SPILL SysEx.Log(RC.FULL, "spill page %d making room for %d - cache used: %d/%d", pg.ID, id, _pcache->Pagecount(Cache), NumberOfCachePages(this)); #endif var rc = Stress(StressArg, pg); if (rc != RC.OK && rc != RC.BUSY) { pageOut = null; return(rc); } } page = Cache.Fetch(id, true); } PgHdr pgHdr = null; if (page != null) { //pgHdr = page.Extra; if (page.Data == null) { //page.Page = page; page.Data = C._alloc(SizePage); //page.Extra = this; page.Cache = this; page.ID = id; } Debug.Assert(page.Cache == Cache); Debug.Assert(page.ID == id); //Debug.Assert(page.Data == page.Buffer); //Debug.Assert(page.Extra == this); if (page.Refs == 0) { Refs++; } page.Refs++; if (id == 1) { Page1 = pgHdr; } } pageOut = pgHdr; return(pgHdr == null && create != 0 ? RC.NOMEM : RC.OK); }
public static void Ref(PgHdr p) { Debug.Assert(p.Refs > 0); p.Refs++; }
static RC accessPayload(BtCursor cur, uint offset, uint amount, byte[] buf, int op) { MemPage page = cur.Pages[cur.ID]; // Btree page of current entry Debug.Assert(page != null); Debug.Assert(cur.State == CURSOR.VALID); Debug.Assert(cur.Idxs[cur.ID] < page.Cells); Debug.Assert(cursorHoldsMutex(cur)); getCellInfo(cur); var payload = cur.Info.Cell; //cur.Info.Cell + cur.Info.Header; var key = (uint)(page.IntKey ? 0 : (int)cur.Info.Key); BtShared bt = cur.Bt; // Btree this cursor belongs to if (C._NEVER(offset + amount > key + cur.Info.Data) || cur.Info.Local > bt.UsableSize) // Trying to read or write past the end of the data is an error return SysEx.CORRUPT_BKPT(); // Check if data must be read/written to/from the btree page itself. var idx = 0U; var rc = RC.OK; var buf_ = 0U; if (offset < cur.Info.Local) { int a = (int)amount; if (a + offset > cur.Info.Local) a = (int)(cur.Info.Local - offset); rc = copyPayload(payload, (uint)(offset + cur.Info.Cell_ + cur.Info.Header), buf, buf_, (uint)a, op, page.DBPage); offset = 0; buf_ += (uint)a; amount -= (uint)a; } else offset -= cur.Info.Local; if (rc == RC.OK && amount > 0) { var ovflSize = (uint)(bt.UsableSize - 4); // Bytes content per ovfl page Pid nextPage = ConvertEx.Get4(payload, cur.Info.Local + cur.Info.Cell_ + cur.Info.Header); #if !OMIT_INCRBLOB // If the isIncrblobHandle flag is set and the BtCursor.aOverflow[] has not been allocated, allocate it now. The array is sized at // one entry for each overflow page in the overflow chain. The page number of the first overflow page is stored in aOverflow[0], // etc. A value of 0 in the aOverflow[] array means "not yet known" (the cache is lazily populated). if (cur.IsIncrblobHandle && cur.Overflows == null) { uint ovfl = (cur.Info.Payload - cur.Info.Local + ovflSize - 1) / ovflSize; cur.Overflows = new Pid[ovfl]; // nOvfl is always positive. If it were zero, fetchPayload would have been used instead of this routine. */ if (C._ALWAYS(ovfl != 0) && cur.Overflows == null) rc = RC.NOMEM; } // If the overflow page-list cache has been allocated and the entry for the first required overflow page is valid, skip // directly to it. if (cur.Overflows != null && cur.Overflows[offset / ovflSize] != 0) { idx = (offset / ovflSize); nextPage = cur.Overflows[idx]; offset = (offset % ovflSize); } #endif for (; rc == RC.OK && amount > 0 && nextPage != 0; idx++) { #if !OMIT_INCRBLOB // If required, populate the overflow page-list cache. if (cur.Overflows != null) { Debug.Assert(cur.Overflows[idx] == 0 || cur.Overflows[idx] == nextPage); cur.Overflows[idx] = nextPage; } #endif MemPage dummy = null; if (offset >= ovflSize) { // The only reason to read this page is to obtain the page number for the next page in the overflow chain. The page // data is not required. So first try to lookup the overflow page-list cache, if any, then fall back to the getOverflowPage() function. #if !OMIT_INCRBLOB if (cur.Overflows == null && cur.Overflows[idx + 1] != 0) nextPage = cur.Overflows[idx + 1]; else #endif rc = getOverflowPage(bt, nextPage, out dummy, out nextPage); offset -= ovflSize; } else { // Need to read this page properly. It contains some of the range of data that is being read (eOp==0) or written (eOp!=0). int a = (int)amount; if (a + offset > ovflSize) a = (int)(ovflSize - offset); #if DIRECT_OVERFLOW_READ // If all the following are true: // // 1) this is a read operation, and // 2) data is required from the start of this overflow page, and // 3) the database is file-backed, and // 4) there is no open write-transaction, and // 5) the database is not a WAL database, // // then data can be read directly from the database file into the output buffer, bypassing the page-cache altogether. This speeds // up loading large records that span many overflow pages. VFile fd; if (op == 0 && // (1) offset == 0 && // (2) bt.InTransaction == TRANS.READ && // (4) (fd = bt.Pager.File())->Methods && // (3) bt->Page1->Data[19] == 0x01) // (5) { var save = new byte[4]; var writeOffset = bufOffset - 4; Buffer.BlockCopy(buf, writeOffset, save, 0, 4); rc = fd.Read(buf, a + 4, writeOffset + (long)bt.PageSize * (nextPage - 1)); nextPage = ConvertEx.Get4(buf, writeOffset); Buffer.BlockCopy(save, 0, buf, writeOffset, 4); } else #endif { var dbPage = new PgHdr(); rc = bt.Pager.Acquire(nextPage, ref dbPage, false); if (rc == RC.OK) { payload = Pager.GetData(dbPage); nextPage = ConvertEx.Get4(payload); rc = copyPayload(payload, offset + 4, buf, buf_, (uint)a, op, dbPage); Pager.Unref(dbPage); offset = 0; } } amount -= (uint)a; buf_ += (uint)a; } } } if (rc == RC.OK && amount > 0) return SysEx.CORRUPT_BKPT(); return rc; }
public static void Release(PgHdr p) { Debug.Assert(p.Refs > 0); p.Refs--; if (p.Refs == 0) { var cache = p.Cache; cache.Refs--; if ((p.Flags & PgHdr.PGHDR.DIRTY) == 0) Unpin(p); else { // Move the page to the head of the dirty list. RemoveFromDirtyList(p); AddToDirtyList(p); } } }
public RC Step(int pages) { MutexEx.Enter(SrcCtx.Mutex); Src.Enter(); if (DestCtx != null) MutexEx.Enter(DestCtx.Mutex); RC rc = RC_; if (!IsFatalError(rc)) { Pager srcPager = Src.get_Pager(); // Source pager Pager destPager = Dest.get_Pager(); // Dest pager Pid srcPage = 0; // Size of source db in pages bool closeTrans = false; // True if src db requires unlocking // If the source pager is currently in a write-transaction, return SQLITE_BUSY immediately. rc = (DestCtx != null && Src.Bt.InTransaction == TRANS.WRITE ? RC.BUSY : RC.OK); // Lock the destination database, if it is not locked already. if (rc == RC.OK && !DestLocked && (rc = Dest.BeginTrans(2)) == RC.OK) { DestLocked = true; Dest.GetMeta(Btree.META.SCHEMA_VERSION, ref DestSchema); } // If there is no open read-transaction on the source database, open one now. If a transaction is opened here, then it will be closed // before this function exits. if (rc == RC.OK && !Src.IsInReadTrans()) { rc = Src.BeginTrans(0); closeTrans = true; } // Do not allow backup if the destination database is in WAL mode and the page sizes are different between source and destination int pgszSrc = Src.GetPageSize(); // Source page size int pgszDest = Dest.GetPageSize(); // Destination page size IPager.JOURNALMODE destMode = Dest.get_Pager().GetJournalMode(); // Destination journal mode if (rc == RC.OK && destMode == IPager.JOURNALMODE.WAL && pgszSrc != pgszDest) rc = RC.READONLY; // Now that there is a read-lock on the source database, query the source pager for the number of pages in the database. srcPage = Src.LastPage(); Debug.Assert(srcPage >= 0); for (int ii = 0; (pages < 0 || ii < pages) && NextId <= (Pid)srcPage && rc == 0; ii++) { Pid srcPg = NextId; // Source page number if (srcPg != Btree.PENDING_BYTE_PAGE(Src.Bt)) { IPage srcPgAsObj = null; // Source page object rc = srcPager.Acquire(srcPg, ref srcPgAsObj, false); if (rc == RC.OK) { rc = BackupOnePage(p, srcPg, Pager.GetData(srcPgAsObj), false); Pager.Unref(srcPgAsObj); } } NextId++; } if (rc == RC.OK) { Pagecount = srcPage; Remaining = (srcPage + 1 - NextId); if (NextId > srcPage) rc = RC.DONE; else if (!IsAttached) AttachBackupObject(p); } // Update the schema version field in the destination database. This is to make sure that the schema-version really does change in // the case where the source and destination databases have the same schema version. if (rc == RC.DONE) { if (srcPage == null) { rc = Dest.NewDb(); srcPage = 1; } if (rc == RC.OK || rc == RC.DONE) rc = Dest.UpdateMeta(Btree.META.SCHEMA_VERSION, DestSchema + 1); if (rc == RC.OK) { if (DestCtx != null) Main.ResetAllSchemasOfConnection(DestCtx); if (destMode == IPager.JOURNALMODE.WAL) rc = Dest.SetVersion(2); } if (rc == RC.OK) { // Set nDestTruncate to the final number of pages in the destination database. The complication here is that the destination page // size may be different to the source page size. // // If the source page size is smaller than the destination page size, round up. In this case the call to sqlite3OsTruncate() below will // fix the size of the file. However it is important to call sqlite3PagerTruncateImage() here so that any pages in the // destination file that lie beyond the nDestTruncate page mark are journalled by PagerCommitPhaseOne() before they are destroyed // by the file truncation. Debug.Assert(pgszSrc == Src.GetPageSize()); Debug.Assert(pgszDest == Dest.GetPageSize()); Pid destTruncate; if (pgszSrc < pgszDest) { int ratio = pgszDest / pgszSrc; destTruncate = (Pid)((srcPage + ratio - 1) / ratio); if (destTruncate == Btree.PENDING_BYTE_PAGE(Dest.Bt)) destTruncate--; } else destTruncate = (Pid)(srcPage * (pgszSrc / pgszDest)); Debug.Assert(destTruncate > 0); if (pgszSrc < pgszDest) { // If the source page-size is smaller than the destination page-size, two extra things may need to happen: // // * The destination may need to be truncated, and // // * Data stored on the pages immediately following the pending-byte page in the source database may need to be // copied into the destination database. int size = (int)(pgszSrc * srcPage); VFile file = destPager.get_File(); Debug.Assert(file != null); Debug.Assert((long)destTruncate * (long)pgszDest >= size || (destTruncate == (int)(Btree.PENDING_BYTE_PAGE(Dest.Bt) - 1) && size >= VFile.PENDING_BYTE && size <= VFile.PENDING_BYTE + pgszDest)); // This block ensures that all data required to recreate the original database has been stored in the journal for pDestPager and the // journal synced to disk. So at this point we may safely modify the database file in any way, knowing that if a power failure // occurs, the original database will be reconstructed from the journal file. uint dstPage; destPager.Pages(out dstPage); for (Pid pg = destTruncate; rc == RC.OK && pg <= (Pid)dstPage; pg++) { if (pg != Btree.PENDING_BYTE_PAGE(Dest.Bt)) { IPage pgAsObj; rc = destPager.Acquire(pg, ref pgAsObj, false); if (rc == RC.OK) { rc = Pager.Write(pgAsObj); Pager.Unref(pgAsObj); } } } if (rc == RC.OK) rc = destPager.CommitPhaseOne(null, true); // Write the extra pages and truncate the database file as required. long end = Math.Min(VFile.PENDING_BYTE + pgszDest, size); for (long off = VFile.PENDING_BYTE + pgszSrc; rc == RC.OK && off < end; off += pgszSrc) { Pid srcPg = (Pid)((off / pgszSrc) + 1); PgHdr srcPgAsObj = null; rc = srcPager.Acquire(srcPg, ref srcPgAsObj, false); if (rc == RC.OK) { byte[] data = Pager.GetData(srcPgAsObj); rc = file.Write(data, pgszSrc, off); } Pager.Unref(srcPgAsObj); } if (rc == RC.OK) rc = BackupTruncateFile(file, (int)size); // Sync the database file to disk. if (rc == RC.OK) rc = destPager.Sync(); } else { destPager.TruncateImage(destTruncate); rc = destPager.CommitPhaseOne(null, false); } // Finish committing the transaction to the destination database. if (rc == RC.OK && (rc = Dest.CommitPhaseTwo(false)) == RC.OK) rc = RC.DONE; } } // If bCloseTrans is true, then this function opened a read transaction on the source database. Close the read transaction here. There is // no need to check the return values of the btree methods here, as "committing" a read-only transaction cannot fail. if (closeTrans) { #if !DEBUG || COVERAGE_TEST RC rc2 = Src.CommitPhaseOne(null); rc2 |= Src.CommitPhaseTwo(false); Debug.Assert(rc2 == RC.OK); #else Src.CommitPhaseOne(null); Src.CommitPhaseTwo(false); #endif } if (rc == RC.IOERR_NOMEM) rc = RC.NOMEM; RC_ = rc; } if (DestCtx != null) MutexEx.Leave(DestCtx.Mutex); Src.Leave(); MutexEx.Leave(SrcCtx.Mutex); return rc; }
internal RC Frames(int v, PgHdr w, Pid x, int y, int z) { return(0); }