static void SchemaIsValid(Parse parse) { Context ctx = parse.Ctx; Debug.Assert(parse.CheckSchema != 0); Debug.Assert(MutexEx.Held(ctx.Mutex)); for (int db = 0; db < ctx.DBs.length; db++) { bool openedTransaction = false; // True if a transaction is opened Btree bt = ctx.DBs[db].Bt; // Btree database to read cookie from if (bt == null) { continue; } // If there is not already a read-only (or read-write) transaction opened on the b-tree database, open one now. If a transaction is opened, it // will be closed immediately after reading the meta-value. if (!bt.IsInReadTrans()) { RC rc = bt.BeginTrans(0); if (rc == RC.NOMEM || rc == RC.IOERR_NOMEM) { ctx.MallocFailed = true; } if (rc != RC.OK) { return; } openedTransaction = true; } // Read the schema cookie from the database. If it does not match the value stored as part of the in-memory schema representation, // set Parse.rc to SQLITE_SCHEMA. uint cookie; bt.GetMeta(Btree.META.SCHEMA_VERSION, ref cookie); Debug.Assert(Btree.SchemaMutexHeld(ctx, db, null)); if (cookie != ctx.DBs[db].Schema.SchemaCookie) { ResetOneSchema(ctx, db); parse.RC = RC.SCHEMA; } // Close the transaction, if one was opened. if (openedTransaction) { bt.Commit(); } } }
public RC Step(int pages) { MutexEx.Enter(SrcCtx.Mutex); Src.Enter(); if (DestCtx != null) MutexEx.Enter(DestCtx.Mutex); RC rc = RC_; if (!IsFatalError(rc)) { Pager srcPager = Src.get_Pager(); // Source pager Pager destPager = Dest.get_Pager(); // Dest pager Pid srcPage = 0; // Size of source db in pages bool closeTrans = false; // True if src db requires unlocking // If the source pager is currently in a write-transaction, return SQLITE_BUSY immediately. rc = (DestCtx != null && Src.Bt.InTransaction == TRANS.WRITE ? RC.BUSY : RC.OK); // Lock the destination database, if it is not locked already. if (rc == RC.OK && !DestLocked && (rc = Dest.BeginTrans(2)) == RC.OK) { DestLocked = true; Dest.GetMeta(Btree.META.SCHEMA_VERSION, ref DestSchema); } // If there is no open read-transaction on the source database, open one now. If a transaction is opened here, then it will be closed // before this function exits. if (rc == RC.OK && !Src.IsInReadTrans()) { rc = Src.BeginTrans(0); closeTrans = true; } // Do not allow backup if the destination database is in WAL mode and the page sizes are different between source and destination int pgszSrc = Src.GetPageSize(); // Source page size int pgszDest = Dest.GetPageSize(); // Destination page size IPager.JOURNALMODE destMode = Dest.get_Pager().GetJournalMode(); // Destination journal mode if (rc == RC.OK && destMode == IPager.JOURNALMODE.WAL && pgszSrc != pgszDest) rc = RC.READONLY; // Now that there is a read-lock on the source database, query the source pager for the number of pages in the database. srcPage = Src.LastPage(); Debug.Assert(srcPage >= 0); for (int ii = 0; (pages < 0 || ii < pages) && NextId <= (Pid)srcPage && rc == 0; ii++) { Pid srcPg = NextId; // Source page number if (srcPg != Btree.PENDING_BYTE_PAGE(Src.Bt)) { IPage srcPgAsObj = null; // Source page object rc = srcPager.Acquire(srcPg, ref srcPgAsObj, false); if (rc == RC.OK) { rc = BackupOnePage(p, srcPg, Pager.GetData(srcPgAsObj), false); Pager.Unref(srcPgAsObj); } } NextId++; } if (rc == RC.OK) { Pagecount = srcPage; Remaining = (srcPage + 1 - NextId); if (NextId > srcPage) rc = RC.DONE; else if (!IsAttached) AttachBackupObject(p); } // Update the schema version field in the destination database. This is to make sure that the schema-version really does change in // the case where the source and destination databases have the same schema version. if (rc == RC.DONE) { if (srcPage == null) { rc = Dest.NewDb(); srcPage = 1; } if (rc == RC.OK || rc == RC.DONE) rc = Dest.UpdateMeta(Btree.META.SCHEMA_VERSION, DestSchema + 1); if (rc == RC.OK) { if (DestCtx != null) Main.ResetAllSchemasOfConnection(DestCtx); if (destMode == IPager.JOURNALMODE.WAL) rc = Dest.SetVersion(2); } if (rc == RC.OK) { // Set nDestTruncate to the final number of pages in the destination database. The complication here is that the destination page // size may be different to the source page size. // // If the source page size is smaller than the destination page size, round up. In this case the call to sqlite3OsTruncate() below will // fix the size of the file. However it is important to call sqlite3PagerTruncateImage() here so that any pages in the // destination file that lie beyond the nDestTruncate page mark are journalled by PagerCommitPhaseOne() before they are destroyed // by the file truncation. Debug.Assert(pgszSrc == Src.GetPageSize()); Debug.Assert(pgszDest == Dest.GetPageSize()); Pid destTruncate; if (pgszSrc < pgszDest) { int ratio = pgszDest / pgszSrc; destTruncate = (Pid)((srcPage + ratio - 1) / ratio); if (destTruncate == Btree.PENDING_BYTE_PAGE(Dest.Bt)) destTruncate--; } else destTruncate = (Pid)(srcPage * (pgszSrc / pgszDest)); Debug.Assert(destTruncate > 0); if (pgszSrc < pgszDest) { // If the source page-size is smaller than the destination page-size, two extra things may need to happen: // // * The destination may need to be truncated, and // // * Data stored on the pages immediately following the pending-byte page in the source database may need to be // copied into the destination database. int size = (int)(pgszSrc * srcPage); VFile file = destPager.get_File(); Debug.Assert(file != null); Debug.Assert((long)destTruncate * (long)pgszDest >= size || (destTruncate == (int)(Btree.PENDING_BYTE_PAGE(Dest.Bt) - 1) && size >= VFile.PENDING_BYTE && size <= VFile.PENDING_BYTE + pgszDest)); // This block ensures that all data required to recreate the original database has been stored in the journal for pDestPager and the // journal synced to disk. So at this point we may safely modify the database file in any way, knowing that if a power failure // occurs, the original database will be reconstructed from the journal file. uint dstPage; destPager.Pages(out dstPage); for (Pid pg = destTruncate; rc == RC.OK && pg <= (Pid)dstPage; pg++) { if (pg != Btree.PENDING_BYTE_PAGE(Dest.Bt)) { IPage pgAsObj; rc = destPager.Acquire(pg, ref pgAsObj, false); if (rc == RC.OK) { rc = Pager.Write(pgAsObj); Pager.Unref(pgAsObj); } } } if (rc == RC.OK) rc = destPager.CommitPhaseOne(null, true); // Write the extra pages and truncate the database file as required. long end = Math.Min(VFile.PENDING_BYTE + pgszDest, size); for (long off = VFile.PENDING_BYTE + pgszSrc; rc == RC.OK && off < end; off += pgszSrc) { Pid srcPg = (Pid)((off / pgszSrc) + 1); PgHdr srcPgAsObj = null; rc = srcPager.Acquire(srcPg, ref srcPgAsObj, false); if (rc == RC.OK) { byte[] data = Pager.GetData(srcPgAsObj); rc = file.Write(data, pgszSrc, off); } Pager.Unref(srcPgAsObj); } if (rc == RC.OK) rc = BackupTruncateFile(file, (int)size); // Sync the database file to disk. if (rc == RC.OK) rc = destPager.Sync(); } else { destPager.TruncateImage(destTruncate); rc = destPager.CommitPhaseOne(null, false); } // Finish committing the transaction to the destination database. if (rc == RC.OK && (rc = Dest.CommitPhaseTwo(false)) == RC.OK) rc = RC.DONE; } } // If bCloseTrans is true, then this function opened a read transaction on the source database. Close the read transaction here. There is // no need to check the return values of the btree methods here, as "committing" a read-only transaction cannot fail. if (closeTrans) { #if !DEBUG || COVERAGE_TEST RC rc2 = Src.CommitPhaseOne(null); rc2 |= Src.CommitPhaseTwo(false); Debug.Assert(rc2 == RC.OK); #else Src.CommitPhaseOne(null); Src.CommitPhaseTwo(false); #endif } if (rc == RC.IOERR_NOMEM) rc = RC.NOMEM; RC_ = rc; } if (DestCtx != null) MutexEx.Leave(DestCtx.Mutex); Src.Leave(); MutexEx.Leave(SrcCtx.Mutex); return rc; }
static RC btreeDropTable(Btree p, Pid tableID, ref int movedID) { BtShared bt = p.Bt; Debug.Assert(p.HoldsMutex()); Debug.Assert(p.InTrans == TRANS.WRITE); // It is illegal to drop a table if any cursors are open on the database. This is because in auto-vacuum mode the backend may // need to move another root-page to fill a gap left by the deleted root page. If an open cursor was using this page a problem would occur. // // This error is caught long before control reaches this point. if (C._NEVER(bt.Cursor != null)) { BContext.ConnectionBlocked(p.Ctx, bt.Cursor.Btree.Ctx); return RC.LOCKED_SHAREDCACHE; } MemPage page = null; RC rc = btreeGetPage(bt, (Pid)tableID, ref page, false); if (rc != RC.OK) return rc; int dummy0 = 0; rc = p.ClearTable((int)tableID, ref dummy0); if (rc != RC.OK) { releasePage(page); return rc; } movedID = 0; if (tableID > 1) { #if OMIT_AUTOVACUUM freePage(page, ref rc); releasePage(page); #else if (bt.AutoVacuum) { Pid maxRootID = 0; p.GetMeta(META.LARGEST_ROOT_PAGE, ref maxRootID); if (tableID == maxRootID) { // If the table being dropped is the table with the largest root-page number in the database, put the root page on the free list. freePage(page, ref rc); releasePage(page); if (rc != RC.OK) return rc; } else { // The table being dropped does not have the largest root-page number in the database. So move the page that does into the // gap left by the deleted root-page. releasePage(page); MemPage move = new MemPage(); rc = btreeGetPage(bt, maxRootID, ref move, false); if (rc != RC.OK) return rc; rc = relocatePage(bt, move, PTRMAP.ROOTPAGE, 0, tableID, false); releasePage(move); if (rc != RC.OK) return rc; move = null; rc = btreeGetPage(bt, maxRootID, ref move, false); freePage(move, ref rc); releasePage(move); if (rc != RC.OK) return rc; movedID = (int)maxRootID; } // Set the new 'max-root-page' value in the database header. This is the old value less one, less one more if that happens to // be a root-page number, less one again if that is the PENDING_BYTE_PAGE. maxRootID--; while (maxRootID == PENDING_BYTE_PAGE(bt) || PTRMAP_ISPAGE(bt, maxRootID)) maxRootID--; Debug.Assert(maxRootID != PENDING_BYTE_PAGE(bt)); rc = p.UpdateMeta(META.LARGEST_ROOT_PAGE, maxRootID); } else { freePage(page, ref rc); releasePage(page); } #endif } else { // If sqlite3BtreeDropTable was called on page 1. This really never should happen except in a corrupt database. zeroPage(page, PTF_INTKEY | PTF_LEAF); releasePage(page); } return rc; }
static RC btreeCreateTable(Btree p, ref int tableID, int createTabFlags) { BtShared bt = p.Bt; Debug.Assert(p.HoldsMutex()); Debug.Assert(bt.InTransaction == TRANS.WRITE); Debug.Assert((bt.BtsFlags & BTS.READ_ONLY) == 0); RC rc; MemPage root = new MemPage(); Pid rootID = 0; #if OMIT_AUTOVACUUM rc = allocateBtreePage(bt, ref root, ref rootID, 1, BTALLOC.ANY); if (rc != RC.OK) return rc; #else if (bt.AutoVacuum) { // Creating a new table may probably require moving an existing database to make room for the new tables root page. In case this page turns // out to be an overflow page, delete all overflow page-map caches held by open cursors. invalidateAllOverflowCache(bt); // Read the value of meta[3] from the database to determine where the root page of the new table should go. meta[3] is the largest root-page // created so far, so the new root-page is (meta[3]+1). p.GetMeta(META.LARGEST_ROOT_PAGE, ref rootID); rootID++; // The new root-page may not be allocated on a pointer-map page, or the PENDING_BYTE page. while (rootID == PTRMAP_PAGENO(bt, rootID) || rootID == PENDING_BYTE_PAGE(bt)) rootID++; Debug.Assert(rootID >= 3); // Allocate a page. The page that currently resides at pgnoRoot will be moved to the allocated page (unless the allocated page happens // to reside at pgnoRoot). Pid moveID = 0; // Move a page here to make room for the root-page MemPage pageMove = new MemPage(); // The page to move to. rc = allocateBtreePage(bt, ref pageMove, ref moveID, rootID, BTALLOC.EXACT); if (rc != RC.OK) return rc; if (moveID != rootID) { releasePage(pageMove); // Move the page currently at pgnoRoot to pgnoMove. rc = btreeGetPage(bt, rootID, ref root, false); if (rc != RC.OK) return rc; // pgnoRoot is the page that will be used for the root-page of the new table (assuming an error did not occur). But we were // allocated pgnoMove. If required (i.e. if it was not allocated by extending the file), the current page at position pgnoMove // is already journaled. PTRMAP type = 0; Pid ptrPageID = 0; rc = ptrmapGet(bt, rootID, ref type, ref ptrPageID); if (type == PTRMAP.ROOTPAGE || type == PTRMAP.FREEPAGE) rc = SysEx.CORRUPT_BKPT(); if (rc != RC.OK) { releasePage(root); return rc; } Debug.Assert(type != PTRMAP.ROOTPAGE); Debug.Assert(type != PTRMAP.FREEPAGE); rc = relocatePage(bt, root, type, ptrPageID, moveID, false); releasePage(root); // Obtain the page at pgnoRoot if (rc != RC.OK) return rc; rc = btreeGetPage(bt, rootID, ref root, false); if (rc != RC.OK) return rc; rc = Pager.Write(root.DBPage); if (rc != RC.OK) { releasePage(root); return rc; } } else root = pageMove; // Update the 0pointer-map and meta-data with the new root-page number. ptrmapPut(bt, rootID, PTRMAP.ROOTPAGE, 0, ref rc); if (rc != RC.OK) { releasePage(root); return rc; } // When the new root page was allocated, page 1 was made writable in order either to increase the database filesize, or to decrement the // freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. Debug.Assert(Pager.Iswriteable(bt.Page1.DBPage)); rc = p.UpdateMeta(META.LARGEST_ROOT_PAGE, rootID); if (C._NEVER(rc != RC.OK)) { releasePage(root); return rc; } } else { rc = allocateBtreePage(bt, ref root, ref rootID, 1, BTALLOC.ANY); if (rc != RC.OK) return rc; } #endif Debug.Assert(Pager.Iswriteable(root.DBPage)); int ptfFlags; // Page-type flage for the root page of new table if ((createTabFlags & BTREE_INTKEY) != 0) ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF; else ptfFlags = PTF_ZERODATA | PTF_LEAF; zeroPage(root, ptfFlags); Pager.Unref(root.DBPage); Debug.Assert((bt.OpenFlags & OPEN.SINGLE) == 0 || rootID == 2); tableID = (int)rootID; return RC.OK; }