static VTable VTableDisconnectAll(Context ctx, Table table) { // Assert that the mutex (if any) associated with the BtShared database that contains table p is held by the caller. See header comments // above function sqlite3VtabUnlockList() for an explanation of why this makes it safe to access the sqlite3.pDisconnect list of any // database connection that may have an entry in the p->pVTable list. Debug.Assert(ctx == null || Btree.SchemaMutexHeld(ctx, 0, table.Schema)); VTable r = null; VTable vtable = table.VTables; table.VTables = null; while (vtable != null) { VTable next = vtable.Next; Context ctx2 = vtable.Ctx; Debug.Assert(ctx2 != null); if (ctx2 == ctx) { r = vtable; table.VTables = r; r.Next = null; } else { vtable.Next = ctx2.Disconnect; ctx2.Disconnect = vtable; } vtable = next; } Debug.Assert(ctx == null || r != null); return(r); }
static RC BlobSeekToRow(Incrblob p, long row, out string errOut) { string err = null; // Error message Vdbe v = p.Stmt; // Set the value of the SQL statements only variable to integer iRow. This is done directly instead of using sqlite3_bind_int64() to avoid // triggering asserts related to mutexes. Debug.Assert((v.Vars[0].Flags & MEM.Int) != 0); v.Vars[0].u.I = row; RC rc = v.Step(); if (rc == RC.ROW) { uint type = v.Cursors[0].Types[p.Col]; if (type < 12) { err = C._mtagprintf(p.Ctx, "cannot open value of type %s", type == 0 ? "null" : type == 7 ? "real" : "integer"); rc = RC.ERROR; Vdbe.Finalize(v); p.Stmt = null; } else { p.Offset = v.Cursors[0].Offsets[p.Col]; p.Bytes = SerialTypeLen(type); p.Cursor = v.Cursors[0].Cursor; p.Cursor.EnterCursor(); Btree.CacheOverflow(p.Cursor); p.Cursor.LeaveCursor(); } } if (rc == RC.ROW) { rc = RC.OK; } else if (p.Stmt != null) { rc = Vdbe.Finalize(p.Stmt); p.Stmt = null; if (rc == RC.OK) { err = C._mtagprintf(p.Ctx, "no such rowid: %lld", row); rc = RC.ERROR; } else { err = C._mtagprintf(p.Ctx, "%s", DataEx.Errmsg(p.Ctx)); } } Debug.Assert(rc != RC.OK || err == null); Debug.Assert(rc != RC.ROW && rc != RC.DONE); errOut = err; return(rc); }
public static void DropTrigger(Parse parse, SrcList name, int noErr) { Context ctx = parse.Ctx; if (ctx.MallocFailed || parse.ReadSchema(parse) != RC.OK) { goto drop_trigger_cleanup; } Debug.Assert(name.Srcs == 1); string dbName = name.Ids[0].Database; string nameAsString = name.Ids[0].Name; int nameLength = nameAsString.Length; Debug.Assert(dbName != null || Btree.HoldsAllMutexes(ctx)); Trigger trigger = null; for (int i = E.OMIT_TEMPDB; i < ctx.DBs.length; i++) { int j = (i < 2 ? i ^ 1 : i); // Search TEMP before MAIN if (dbName != null && !string.Equals(ctx.DBs[j].Name, dbName, StringComparison.InvariantCultureIgnoreCase)) { continue; } Debug.Assert(Btree.SchemaMutexHeld(ctx, j, null)); trigger = ctx.DBs[j].Schema.TriggerHash.Find(nameAsString, nameLength, (Trigger)null); if (trigger != null) { break; } } if (trigger == null) { if (noErr == 0) { parse.ErrorMsg("no such trigger: %S", name, 0); } else { parse.CodeVerifyNamedSchema(dbName); } parse.CheckSchema = true; goto drop_trigger_cleanup; } DropTriggerPtr(parse, trigger); drop_trigger_cleanup: SrcListDelete(ctx, ref name); }
static void SchemaIsValid(Parse parse) { Context ctx = parse.Ctx; Debug.Assert(parse.CheckSchema != 0); Debug.Assert(MutexEx.Held(ctx.Mutex)); for (int db = 0; db < ctx.DBs.length; db++) { bool openedTransaction = false; // True if a transaction is opened Btree bt = ctx.DBs[db].Bt; // Btree database to read cookie from if (bt == null) { continue; } // If there is not already a read-only (or read-write) transaction opened on the b-tree database, open one now. If a transaction is opened, it // will be closed immediately after reading the meta-value. if (!bt.IsInReadTrans()) { RC rc = bt.BeginTrans(0); if (rc == RC.NOMEM || rc == RC.IOERR_NOMEM) { ctx.MallocFailed = true; } if (rc != RC.OK) { return; } openedTransaction = true; } // Read the schema cookie from the database. If it does not match the value stored as part of the in-memory schema representation, // set Parse.rc to SQLITE_SCHEMA. uint cookie; bt.GetMeta(Btree.META.SCHEMA_VERSION, ref cookie); Debug.Assert(Btree.SchemaMutexHeld(ctx, db, null)); if (cookie != ctx.DBs[db].Schema.SchemaCookie) { ResetOneSchema(ctx, db); parse.RC = RC.SCHEMA; } // Close the transaction, if one was opened. if (openedTransaction) { bt.Commit(); } } }
public static void Disconnect(Context ctx, Table table) { Debug.Assert(E.IsVirtual(table)); Debug.Assert(Btree.HoldsAllMutexes(ctx)); Debug.Assert(MutexEx.Held(ctx.Mutex)); for (VTable pvtable = table.VTables; pvtable != null; pvtable = pvtable.Next) { if (pvtable.Ctx == ctx) { VTable vtable = pvtable; vtable = vtable.Next; vtable.Unlock(); break; } } }
public static void UnlockList(Context ctx) { Debug.Assert(Btree.HoldsAllMutexes(ctx)); Debug.Assert(MutexEx.Held(ctx.Mutex)); VTable vtable = ctx.Disconnect; ctx.Disconnect = null; if (vtable != null) { Vdbe.ExpirePreparedStatements(ctx); do { VTable next = vtable.Next; vtable.Unlock(); vtable = next; } while (vtable != null); } }
public static Schema SchemaGet(Context ctx, Btree bt) { Schema p = (bt != null ? bt.Schema(-1, SchemaClear) : new Schema()); if (p == null) { ctx.MallocFailed = true; } else if (p.FileFormat == 0) { p.TableHash.Init(); p.IndexHash.Init(); p.TriggerHash.Init(); p.FKeyHash.Init(); p.Encode = TEXTENCODE.UTF8; } return(p); }
static RC DoWalCallbacks(Context ctx) { RC rc = RC.OK; #if !OMIT_WAL for (int i = 0; i < ctx.DBs.length; i++) { Btree bt = ctx.DBs[i].Bt; if (bt != null) { int entrys = sqlite3PagerWalCallback(bt.get_Pager()); if (ctx.WalCallback != null && entrys > 0 && rc == RC.OK) { rc = ctx.WalCallback(ctx.WalArg, ctx, ctx.DBs[i].Name, entrys); } } } #endif return(rc); }
public static RC LockAndPrepare(Context ctx, string sql, int bytes, bool isPrepareV2, Vdbe reprepare, ref Vdbe stmtOut, ref string tailOut) { if (!sqlite3SafetyCheckOk(ctx)) { stmtOut = null; tailOut = null; return(SysEx.MISUSE_BKPT()); } MutexEx.Enter(ctx.Mutex); Btree.EnterAll(ctx); RC rc = Prepare_(ctx, sql, bytes, isPrepareV2, reprepare, ref stmtOut, ref tailOut); if (rc == RC.SCHEMA) { stmtOut.Finalize(); rc = Prepare_(ctx, sql, bytes, isPrepareV2, reprepare, ref stmtOut, ref tailOut); } Btree.LeaveAll(ctx); MutexEx.Leave(ctx.Mutex); Debug.Assert(rc == RC.OK || stmtOut == null); return(rc); }
public static RC MemFromBtree(Btree.BtCursor cur, int offset, int amount, bool key, Mem mem) { RC rc = RC.OK; Debug.Assert(Btree.CursorIsValid(cur)); // Note: the calls to BtreeKeyFetch() and DataFetch() below assert() that both the BtShared and database handle mutexes are held. Debug.Assert((mem.Flags & MEM.RowSet) == 0); int available = 0; // Number of bytes available on the local btree page uint dummy1; byte[] data = (byte[])(key ? Btree.KeyFetch(cur, ref available, out dummy1) : Btree.DataFetch(cur, ref available, out dummy1)); // Data from the btree layer Debug.Assert(data != null); if (offset + amount <= available && (mem.Flags & MEM.Dyn) == 0) { MemRelease(mem); mem.Z_ = C._alloc(amount); Buffer.BlockCopy(data, offset, mem.Z_, 0, amount); //: mem->Z = &data[offset]; mem.Flags = MEM.Blob | MEM.Ephem; } else if ((rc = MemGrow(mem, amount + 2, false)) == RC.OK) { mem.Flags = MEM.Blob | MEM.Dyn | MEM.Term; mem.Encode = 0; mem.Type = TYPE.BLOB; mem.Z = null; mem.Z_ = C._alloc(amount); rc = (key ? Btree.Key(cur, (uint)offset, (uint)amount, mem.Z_) : Btree.Data(cur, (uint)offset, (uint)amount, mem.Z_)); //: mem->Z[amount] = 0; //: mem->Z[amount + 1] = 0; if (rc != RC.OK) { MemRelease(mem); } } mem.N = amount; return(rc); }
static bool hasSharedCacheTableLock(Btree btree, Pid root, bool isIndex, LOCK lockType) { // If this database is not shareable, or if the client is reading and has the read-uncommitted flag set, then no lock is required. // Return true immediately. if (!btree.Sharable_ || (lockType == LOCK.READ && (btree.Ctx.Flags & BContext.FLAG.ReadUncommitted) != 0)) return true; // If the client is reading or writing an index and the schema is not loaded, then it is too difficult to actually check to see if // the correct locks are held. So do not bother - just return true. This case does not come up very often anyhow. var schema = btree.Bt.Schema; if (isIndex && (schema == null || (schema.Flags & SCHEMA_.SchemaLoaded) == 0)) return true; // Figure out the root-page that the lock should be held on. For table b-trees, this is just the root page of the b-tree being read or // written. For index b-trees, it is the root page of the associated table. Pid table = 0; if (isIndex) throw new NotImplementedException(); //for (var p = sqliteHashFirst(schema.IdxHash); p != null; p = sqliteHashNext(p)) //{ // var idx = (Index)sqliteHashData(p); // if (idx.TID == (int)root) // table = idx.Table.TID; //} else table = root; // Search for the required lock. Either a write-lock on root-page iTab, a write-lock on the schema table, or (if the client is reading) a // read-lock on iTab will suffice. Return 1 if any of these are found. for (var lock_ = btree.Bt.Lock; lock_ != null; lock_ = lock_.Next) if (lock_.Btree == btree && (lock_.Table == table || (lock_.Lock == LOCK.WRITE && lock_.Table == 1)) && lock_.Lock >= lockType) return true; // Failed to find the required lock. return false; }
public static void FKDelete(Context ctx, Table table) { Debug.Assert(ctx == null || Btree.SchemaMutexHeld(ctx, 0, table.Schema)); FKey next; // Copy of pFKey.pNextFrom for (FKey fkey = table.FKeys; fkey != null; fkey = next) { // Remove the FK from the fkeyHash hash table. //: if (!ctx || ctx->BytesFreed == 0) { if (fkey.PrevTo != null) { fkey.PrevTo.NextTo = fkey.NextTo; } else { FKey p = fkey.NextTo; string z = (p != null ? fkey.NextTo.To : fkey.To); table.Schema.FKeyHash.Insert(z, z.Length, p); } if (fkey.NextTo != null) { fkey.NextTo.PrevTo = fkey.PrevTo; } } // EV: R-30323-21917 Each foreign key constraint in SQLite is classified as either immediate or deferred. Debug.Assert(fkey.IsDeferred == false || fkey.IsDeferred == true); // Delete any triggers created to implement actions for this FK. #if !OMIT_TRIGGER FKTriggerDelete(ctx, fkey.Triggers[0]); FKTriggerDelete(ctx, fkey.Triggers[1]); #endif next = fkey.NextFrom; C._tagfree(ctx, ref fkey); } }
public static Trigger List(Parse parse, Table table) { Schema tmpSchema = parse.Ctx.DBs[1].Schema; Trigger list = null; // List of triggers to return if (parse.DisableTriggers) { return(null); } if (tmpSchema != table.Schema) { Debug.Assert(Btree.SchemaMutexHeld(parse.Ctx, 0, tmpSchema)); for (HashElem p = tmpSchema.TriggerHash.First; p != null; p = p.Next) { Trigger trig = (Trigger)p.Data; if (trig.TabSchema == table.Schema && string.Equals(trig.Table, table.Name, StringComparison.InvariantCultureIgnoreCase)) { trig.Next = (list != null ? list : table.Triggers); list = trig; } } } return(list != null ? list : table.Triggers); }
public static void UnlinkAndDeleteTrigger(Context ctx, int db, string name) { Debug.Assert(Btree.SchemaMutexHeld(ctx, db, null)); Trigger trigger = ctx.DBs[db].Schema.TriggerHash.Insert(name, name.Length, (Trigger)null); if (C._ALWAYS(trigger != null)) { if (trigger.Schema == trigger.TabSchema) { Table table = TableOfTrigger(trigger); //: Trigger** pp; //: for (pp = &table->Triggers; *pp != trigger; pp = &((*pp)->Next)) ; //: *pp = (*pp)->Next; if (table.Triggers == trigger) { table.Triggers = trigger.Next; } else { Trigger cc = table.Triggers; while (cc != null) { if (cc.Next == trigger) { cc.Next = cc.Next.Next; break; } cc = cc.Next; } Debug.Assert(cc != null); } } DeleteTrigger(ctx, ref trigger); ctx.Flags |= Context.FLAG.InternChanges; } }
static RC btreeCreateTable(Btree p, ref int tableID, int createTabFlags) { BtShared bt = p.Bt; Debug.Assert(p.HoldsMutex()); Debug.Assert(bt.InTransaction == TRANS.WRITE); Debug.Assert((bt.BtsFlags & BTS.READ_ONLY) == 0); RC rc; MemPage root = new MemPage(); Pid rootID = 0; #if OMIT_AUTOVACUUM rc = allocateBtreePage(bt, ref root, ref rootID, 1, BTALLOC.ANY); if (rc != RC.OK) return rc; #else if (bt.AutoVacuum) { // Creating a new table may probably require moving an existing database to make room for the new tables root page. In case this page turns // out to be an overflow page, delete all overflow page-map caches held by open cursors. invalidateAllOverflowCache(bt); // Read the value of meta[3] from the database to determine where the root page of the new table should go. meta[3] is the largest root-page // created so far, so the new root-page is (meta[3]+1). p.GetMeta(META.LARGEST_ROOT_PAGE, ref rootID); rootID++; // The new root-page may not be allocated on a pointer-map page, or the PENDING_BYTE page. while (rootID == PTRMAP_PAGENO(bt, rootID) || rootID == PENDING_BYTE_PAGE(bt)) rootID++; Debug.Assert(rootID >= 3); // Allocate a page. The page that currently resides at pgnoRoot will be moved to the allocated page (unless the allocated page happens // to reside at pgnoRoot). Pid moveID = 0; // Move a page here to make room for the root-page MemPage pageMove = new MemPage(); // The page to move to. rc = allocateBtreePage(bt, ref pageMove, ref moveID, rootID, BTALLOC.EXACT); if (rc != RC.OK) return rc; if (moveID != rootID) { releasePage(pageMove); // Move the page currently at pgnoRoot to pgnoMove. rc = btreeGetPage(bt, rootID, ref root, false); if (rc != RC.OK) return rc; // pgnoRoot is the page that will be used for the root-page of the new table (assuming an error did not occur). But we were // allocated pgnoMove. If required (i.e. if it was not allocated by extending the file), the current page at position pgnoMove // is already journaled. PTRMAP type = 0; Pid ptrPageID = 0; rc = ptrmapGet(bt, rootID, ref type, ref ptrPageID); if (type == PTRMAP.ROOTPAGE || type == PTRMAP.FREEPAGE) rc = SysEx.CORRUPT_BKPT(); if (rc != RC.OK) { releasePage(root); return rc; } Debug.Assert(type != PTRMAP.ROOTPAGE); Debug.Assert(type != PTRMAP.FREEPAGE); rc = relocatePage(bt, root, type, ptrPageID, moveID, false); releasePage(root); // Obtain the page at pgnoRoot if (rc != RC.OK) return rc; rc = btreeGetPage(bt, rootID, ref root, false); if (rc != RC.OK) return rc; rc = Pager.Write(root.DBPage); if (rc != RC.OK) { releasePage(root); return rc; } } else root = pageMove; // Update the 0pointer-map and meta-data with the new root-page number. ptrmapPut(bt, rootID, PTRMAP.ROOTPAGE, 0, ref rc); if (rc != RC.OK) { releasePage(root); return rc; } // When the new root page was allocated, page 1 was made writable in order either to increase the database filesize, or to decrement the // freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. Debug.Assert(Pager.Iswriteable(bt.Page1.DBPage)); rc = p.UpdateMeta(META.LARGEST_ROOT_PAGE, rootID); if (C._NEVER(rc != RC.OK)) { releasePage(root); return rc; } } else { rc = allocateBtreePage(bt, ref root, ref rootID, 1, BTALLOC.ANY); if (rc != RC.OK) return rc; } #endif Debug.Assert(Pager.Iswriteable(root.DBPage)); int ptfFlags; // Page-type flage for the root page of new table if ((createTabFlags & BTREE_INTKEY) != 0) ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF; else ptfFlags = PTF_ZERODATA | PTF_LEAF; zeroPage(root, ptfFlags); Pager.Unref(root.DBPage); Debug.Assert((bt.OpenFlags & OPEN.SINGLE) == 0 || rootID == 2); tableID = (int)rootID; return RC.OK; }
static bool hasSharedCacheTableLock(Btree a, Pid b, int c, int d) { return true; }
static void clearAllSharedCacheTableLocks(Btree a) { }
static void invalidateIncrblobCursors(Btree x, long y, bool z) { }
static RC setSharedCacheTableLock(Btree p, Pid table, LOCK lock_) { Debug.Assert(p.HoldsMutex()); Debug.Assert(lock_ == LOCK.READ || lock_ == LOCK.WRITE); Debug.Assert(p.Ctx != null); // A connection with the read-uncommitted flag set will never try to obtain a read-lock using this function. The only read-lock obtained // by a connection in read-uncommitted mode is on the sqlite_master table, and that lock is obtained in BtreeBeginTrans(). Debug.Assert((p.Ctx.Flags & BContext.FLAG.ReadUncommitted) == 0 || lock_ == LOCK.WRITE); // This function should only be called on a sharable b-tree after it has been determined that no other b-tree holds a conflicting lock. var bt = p.Bt; Debug.Assert(p.Sharable_); Debug.Assert(RC.OK == querySharedCacheTableLock(p, table, lock_)); // First search the list for an existing lock on this table. BtLock newLock = null; for (var iter = bt.Lock; iter != null; iter = iter.Next) if (iter.Table == table && iter.Btree == p) { newLock = iter; break; } // If the above search did not find a BtLock struct associating Btree p with table iTable, allocate one and link it into the list. if (newLock == null) { newLock = new BtLock(); newLock.Table = table; newLock.Btree = p; newLock.Next = bt.Lock; bt.Lock = newLock; } // Set the BtLock.eLock variable to the maximum of the current lock and the requested lock. This means if a write-lock was already held // and a read-lock requested, we don't incorrectly downgrade the lock. Debug.Assert(LOCK.WRITE > LOCK.READ); if (lock_ > newLock.Lock) newLock.Lock = lock_; return RC.OK; }
static RC querySharedCacheTableLock(Btree p, Pid table, LOCK lockType) { Debug.Assert(p.HoldsMutex()); Debug.Assert(lockType == LOCK.READ || lockType == LOCK.WRITE); Debug.Assert(p.Ctx != null); Debug.Assert((p.Ctx.Flags & BContext.FLAG.ReadUncommitted) == 0 || lockType == LOCK.WRITE || table == 1); // If requesting a write-lock, then the Btree must have an open write transaction on this file. And, obviously, for this to be so there // must be an open write transaction on the file itself. var bt = p.Bt; Debug.Assert(lockType == LOCK.READ || (p == bt.Writer && p.InTrans == TRANS.WRITE)); Debug.Assert(lockType == LOCK.READ || bt.InTransaction == TRANS.WRITE); // This routine is a no-op if the shared-cache is not enabled if (!p.Sharable_) return RC.OK; // If some other connection is holding an exclusive lock, the requested lock may not be obtained. if (bt.Writer != p && (bt.BtsFlags & BTS.EXCLUSIVE) != 0) { BContext.ConnectionBlocked(p.Ctx, bt.Writer.Ctx); return RC.LOCKED_SHAREDCACHE; } for (var iter = bt.Lock; iter != null; iter = iter.Next) { // The condition (pIter->eLock!=eLock) in the following if(...) statement is a simplification of: // // (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK) // // since we know that if eLock==WRITE_LOCK, then no other connection may hold a WRITE_LOCK on any table in this file (since there can // only be a single writer). Debug.Assert(iter.Lock == LOCK.READ || iter.Lock == LOCK.WRITE); Debug.Assert(lockType == LOCK.READ || iter.Btree == p || iter.Lock == LOCK.READ); if (iter.Btree != p && iter.Table == table && iter.Lock != lockType) { BContext.ConnectionBlocked(p.Ctx, iter.Btree.Ctx); if (lockType == LOCK.WRITE) { Debug.Assert(p == bt.Writer); bt.BtsFlags |= BTS.PENDING; } return RC.LOCKED_SHAREDCACHE; } } return RC.OK; }
public static void BeginTrigger(Parse parse, Token name1, Token name2, TK trTm, TK op, IdList columns, SrcList tableName, Expr when, bool isTemp, int noErr) { Context ctx = parse.Ctx; // The database connection Debug.Assert(name1 != null); // pName1.z might be NULL, but not pName1 itself Debug.Assert(name2 != null); Debug.Assert(op == TK.INSERT || op == TK.UPDATE || op == TK.DELETE); Debug.Assert(op > 0 && op < (TK)0xff); Trigger trigger = null; // The new trigger int db; // The database to store the trigger in Token name = null; // The unqualified db name if (isTemp) { // If TEMP was specified, then the trigger name may not be qualified. if (name2.length > 0) { parse.ErrorMsg("temporary trigger may not have qualified name"); goto trigger_cleanup; } db = 1; name = name1; } else { // Figure out the db that the the trigger will be created in db = parse.TwoPartName(name1, name2, ref name); if (db < 0) { goto trigger_cleanup; } } if (tableName == null || ctx.MallocFailed) { goto trigger_cleanup; } // A long-standing parser bug is that this syntax was allowed: // CREATE TRIGGER attached.demo AFTER INSERT ON attached.tab .... // ^^^^^^^^ // To maintain backwards compatibility, ignore the database name on pTableName if we are reparsing our of SQLITE_MASTER. if (ctx.Init.Busy && db != 1) { C._tagfree(ctx, ref tableName.Ids[0].Database); tableName.Ids[0].Database = null; } // If the trigger name was unqualified, and the table is a temp table, then set iDb to 1 to create the trigger in the temporary database. // If sqlite3SrcListLookup() returns 0, indicating the table does not exist, the error is caught by the block below. //? if (tableName == null) goto trigger_cleanup; Table table = Delete.SrcListLookup(parse, tableName); // Table that the trigger fires off of if (ctx.Init.Busy == null && name2.length == 0 && table != null && table.Schema == ctx.DBs[1].Schema) { db = 1; } // Ensure the table name matches database name and that the table exists if (ctx.MallocFailed) { goto trigger_cleanup; } Debug.Assert(tableName.Srcs == 1); DbFixer sFix = new DbFixer(); // State vector for the DB fixer if (sFix.FixInit(parse, db, "trigger", name) && sFix.FixSrcList(tableName)) { goto trigger_cleanup; } table = Delete.SrcListLookup(parse, tableName); if (table == null) { // The table does not exist. if (ctx.Init.DB == 1) { // Ticket #3810. // Normally, whenever a table is dropped, all associated triggers are dropped too. But if a TEMP trigger is created on a non-TEMP table // and the table is dropped by a different database connection, the trigger is not visible to the database connection that does the // drop so the trigger cannot be dropped. This results in an "orphaned trigger" - a trigger whose associated table is missing. ctx.Init.OrphanTrigger = true; } goto trigger_cleanup; } if (E.IsVirtual(table)) { parse.ErrorMsg("cannot create triggers on virtual tables"); goto trigger_cleanup; } // Check that the trigger name is not reserved and that no trigger of the specified name exists string nameAsString = Parse.NameFromToken(ctx, name); if (nameAsString == null || parse.CheckObjectName(nameAsString) != RC.OK) { goto trigger_cleanup; } Debug.Assert(Btree.SchemaMutexHeld(ctx, db, null)); if (ctx.DBs[db].Schema.TriggerHash.Find(nameAsString, nameAsString.Length, (Trigger)null) != null) { if (noErr == 0) { parse.ErrorMsg("trigger %T already exists", name); } else { Debug.Assert(!ctx.Init.Busy); parse.CodeVerifySchema(db); } goto trigger_cleanup; } // Do not create a trigger on a system table if (table.Name.StartsWith("sqlite_", StringComparison.InvariantCultureIgnoreCase)) { parse.ErrorMsg("cannot create trigger on system table"); parse.Errs++; goto trigger_cleanup; } // INSTEAD of triggers are only for views and views only support INSTEAD of triggers. if (table.Select != null && trTm != TK.INSTEAD) { parse.ErrorMsg("cannot create %s trigger on view: %S", (trTm == TK.BEFORE ? "BEFORE" : "AFTER"), tableName, 0); goto trigger_cleanup; } if (table.Select == null && trTm == TK.INSTEAD) { parse.ErrorMsg("cannot create INSTEAD OF trigger on table: %S", tableName, 0); goto trigger_cleanup; } #if !OMIT_AUTHORIZATION { int tabDb = Prepare.SchemaToIndex(ctx, table.Schema); // Index of the database holding pTab AUTH code = AUTH.CREATE_TRIGGER; string dbName = ctx.DBs[tabDb].Name; string dbTrigName = (isTemp ? ctx.DBs[1].Name : dbName); if (tabDb == 1 || isTemp) { code = AUTH.CREATE_TEMP_TRIGGER; } if (Auth.Check(parse, code, nameAsString, table.Name, dbTrigName) != 0 || Auth.Check(parse, AUTH.INSERT, E.SCHEMA_TABLE(tabDb), 0, dbName)) { goto trigger_cleanup; } } #endif // INSTEAD OF triggers can only appear on views and BEFORE triggers cannot appear on views. So we might as well translate every // INSTEAD OF trigger into a BEFORE trigger. It simplifies code elsewhere. if (trTm == TK.INSTEAD) { trTm = TK.BEFORE; } // Build the Trigger object trigger = new Trigger(); //: (Trigger *)_tagalloc(db, sizeof(Trigger), true); if (trigger == null) { goto trigger_cleanup; } trigger.Name = name; trigger.Table = tableName.Ids[0].Name; //: _tagstrdup(ctx, tableName->Ids[0].Name); trigger.Schema = ctx.DBs[db].Schema; trigger.TabSchema = table.Schema; trigger.OP = op; trigger.TRtm = (trTm == TK.BEFORE ? TRIGGER.BEFORE : TRIGGER.AFTER); trigger.When = Expr.Dup(db, when, E.EXPRDUP_REDUCE); trigger.Columns = Expr.IdListDup(ctx, columns); Debug.Assert(parse.NewTrigger == null); parse.NewTrigger = trigger; trigger_cleanup: C._tagfree(ctx, ref name); Expr.SrcListDelete(ctx, ref tableName); Expr.IdListDelete(ctx, ref columns); Expr.Delete(ctx, ref when); if (parse.NewTrigger == null) { DeleteTrigger(ctx, ref trigger); } else { Debug.Assert(parse.NewTrigger == trigger); } }
static bool HoldsMutex(Btree b) { return(true); }
public static void FinishTrigger(Parse parse, TriggerStep stepList, Token all) { Trigger trig = parse.NewTrigger; // Trigger being finished Context ctx = parse.Ctx; // The database Token nameToken = new Token(); // Trigger name for error reporting parse.NewTrigger = null; if (C._NEVER(parse.Errs != 0) || trig == null) { goto triggerfinish_cleanup; } string name = trig.Name; // Name of trigger int db = Prepare.SchemaToIndex(parse.Ctx, trig.Schema); // Database containing the trigger trig.StepList = stepList; while (stepList != null) { stepList.Trig = trig; stepList = stepList.Next; } nameToken.data = trig.Name; nameToken.length = (uint)nameToken.data.Length; DbFixer sFix = new DbFixer(); // Fixer object if (sFix.FixInit(parse, db, "trigger", nameToken) && sFix.FixTriggerStep(trig.StepList)) { goto triggerfinish_cleanup; } // if we are not initializing, build the sqlite_master entry if (ctx.Init.Busy) { // Make an entry in the sqlite_master table Vdbe v = parse.GetVdbe(); if (v == null) { goto triggerfinish_cleanup; } parse.BeginWriteOperation(0, db); string z = all.data.Substring(0, (int)all.length); //: _tagstrndup(ctx, (char *)all->data, all->length); parse.NestedParse("INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", ctx.DBs[db].Name, E.SCHEMA_TABLE(db), name, trig.Table, z); C._tagfree(ctx, ref z); parse.ChangeCookie(db); v.AddParseSchemaOp(db, C._mtagprintf(ctx, "type='trigger' AND name='%q'", name)); } if (!ctx.Init.Busy) { Trigger link = trig; Debug.Assert(Btree.SchemaMutexHeld(ctx, db, null)); trig = ctx.DBs[db].Schema.TriggerHash.Insert(name, name.Length, trig); if (trig != null) { ctx.MallocFailed = true; } else if (link.Schema == link.TabSchema) { int tableLength = link.Table.Length; Table table = (Table)link.TabSchema.TableHash.Find(link.Table, tableLength, (Table)null); Debug.Assert(table != null); link.Next = table.Triggers; table.Triggers = link; } } triggerfinish_cleanup: DeleteTrigger(ctx, ref trig); Debug.Assert(parse.NewTrigger == null); DeleteTriggerStep(ctx, ref stepList); }
static void btreeIntegrity(Btree p) { }
static void btreeIntegrity(Btree p) { Debug.Assert(p.Bt.InTransaction != TRANS.NONE || p.Bt.Transactions == 0); Debug.Assert(p.Bt.InTransaction >= p.InTrans); }
public static void FinishParse(Parse parse, Token end) { Table table = parse.NewTable; // The table being constructed Context ctx = parse.Ctx; // The database connection if (table == null) { return; } AddArgumentToVtab(parse); parse.Arg.data = null; if (table.ModuleArgs.length < 1) { return; } // If the CREATE VIRTUAL TABLE statement is being entered for the first time (in other words if the virtual table is actually being // created now instead of just being read out of sqlite_master) then do additional initialization work and store the statement text // in the sqlite_master table. if (!ctx.Init.Busy) { // Compute the complete text of the CREATE VIRTUAL TABLE statement if (end != null) { parse.NameToken.length = (uint)parse.NameToken.data.Length; //: (int)(end->data - parse->NameToken) + end->length; } string stmt = C._mtagprintf(ctx, "CREATE VIRTUAL TABLE %T", parse.NameToken); //.Z.Substring(0, parse.NameToken.length)); // A slot for the record has already been allocated in the SQLITE_MASTER table. We just need to update that slot with all // the information we've collected. // // The VM register number pParse->regRowid holds the rowid of an entry in the sqlite_master table tht was created for this vtab // by sqlite3StartTable(). int db = Prepare.SchemaToIndex(ctx, table.Schema); parse.NestedParse("UPDATE %Q.%s SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q WHERE rowid=#%d", ctx.DBs[db].Name, E.SCHEMA_TABLE(db), table.Name, table.Name, stmt, parse.RegRowid ); C._tagfree(ctx, ref stmt); Vdbe v = parse.GetVdbe(); parse.ChangeCookie(db); v.AddOp2(OP.Expire, 0, 0); string where_ = C._mtagprintf(ctx, "name='%q' AND type='table'", table.Name); v.AddParseSchemaOp(db, where_); v.AddOp4(OP.VCreate, db, 0, 0, table.Name, (Vdbe.P4T)table.Name.Length + 1); } // If we are rereading the sqlite_master table create the in-memory record of the table. The xConnect() method is not called until // the first time the virtual table is used in an SQL statement. This allows a schema that contains virtual tables to be loaded before // the required virtual table implementations are registered. else { Schema schema = table.Schema; string name = table.Name; int nameLength = name.Length; Debug.Assert(Btree.SchemaMutexHeld(ctx, 0, schema)); Table oldTable = schema.TableHash.Insert(name, nameLength, table); if (oldTable != null) { ctx.MallocFailed = true; Debug.Assert(table == oldTable); // Malloc must have failed inside HashInsert() return; } parse.NewTable = null; } }
static void Leave(Btree b) { }
public static RC MemFromBtree(Btree.BtCursor cur, int offset, int amount, bool key, Mem mem) { RC rc = RC.OK; Debug.Assert(Btree.CursorIsValid(cur)); // Note: the calls to BtreeKeyFetch() and DataFetch() below assert() that both the BtShared and database handle mutexes are held. Debug.Assert((mem.Flags & MEM.RowSet) == 0); int available = 0; // Number of bytes available on the local btree page uint dummy1; byte[] data = (byte[])(key ? Btree.KeyFetch(cur, ref available, out dummy1) : Btree.DataFetch(cur, ref available, out dummy1)); // Data from the btree layer Debug.Assert(data != null); if (offset + amount <= available && (mem.Flags & MEM.Dyn) == 0) { MemRelease(mem); mem.Z_ = C._alloc(amount); Buffer.BlockCopy(data, offset, mem.Z_, 0, amount); //: mem->Z = &data[offset]; mem.Flags = MEM.Blob | MEM.Ephem; } else if ((rc = MemGrow(mem, amount + 2, false)) == RC.OK) { mem.Flags = MEM.Blob | MEM.Dyn | MEM.Term; mem.Encode = 0; mem.Type = TYPE.BLOB; mem.Z = null; mem.Z_ = C._alloc(amount); rc = (key ? Btree.Key(cur, (uint)offset, (uint)amount, mem.Z_) : Btree.Data(cur, (uint)offset, (uint)amount, mem.Z_)); //: mem->Z[amount] = 0; //: mem->Z[amount + 1] = 0; if (rc != RC.OK) MemRelease(mem); } mem.N = amount; return rc; }
static void btreeEndTransaction(Btree p) { var bt = p.Bt; Debug.Assert(p.HoldsMutex()); #if !OMIT_AUTOVACUUM bt.DoTruncate = false; #endif btreeClearHasContent(bt); if (p.InTrans > TRANS.NONE && p.Ctx.ActiveVdbeCnt > 1) { // If there are other active statements that belong to this database handle, downgrade to a read-only transaction. The other statements // may still be reading from the database. downgradeAllSharedCacheTableLocks(p); p.InTrans = TRANS.READ; } else { // If the handle had any kind of transaction open, decrement the transaction count of the shared btree. If the transaction count // reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused() call below will unlock the pager. if (p.InTrans != TRANS.NONE) { clearAllSharedCacheTableLocks(p); bt.Transactions--; if (bt.Transactions == 0) bt.InTransaction = TRANS.NONE; } // Set the current transaction state to TRANS_NONE and unlock the pager if this call closed the only read or write transaction. p.InTrans = TRANS.NONE; unlockBtreeIfUnused(bt); } btreeIntegrity(p); }
public static RC Open(VSystem vfs, string filename, BContext ctx, ref Btree btree, OPEN flags, VSystem.OPEN vfsFlags) { // True if opening an ephemeral, temporary database bool tempDB = string.IsNullOrEmpty(filename); // Set the variable isMemdb to true for an in-memory database, or false for a file-based database. bool memoryDB = (filename == ":memory:") || (tempDB && ctx.TempInMemory()) || (vfsFlags & VSystem.OPEN.MEMORY) != 0; Debug.Assert(ctx != null); Debug.Assert(vfs != null); Debug.Assert(MutexEx.Held(ctx.Mutex)); Debug.Assert(((uint)flags & 0xff) == (uint)flags); // flags fit in 8 bits // Only a BTREE_SINGLE database can be BTREE_UNORDERED Debug.Assert((flags & OPEN.UNORDERED) == 0 || (flags & OPEN.SINGLE) != 0); // A BTREE_SINGLE database is always a temporary and/or ephemeral Debug.Assert((flags & OPEN.SINGLE) == 0 || tempDB); if (memoryDB) flags |= OPEN.MEMORY; if ((vfsFlags & VSystem.OPEN.MAIN_DB) != 0 && (memoryDB || tempDB)) vfsFlags = (vfsFlags & ~VSystem.OPEN.MAIN_DB) | VSystem.OPEN.TEMP_DB; var p = new Btree(); // Handle to return if (p == null) return RC.NOMEM; p.InTrans = TRANS.NONE; p.Ctx = ctx; #if !OMIT_SHARED_CACHE p.Lock.Btree = p; p.Lock.Table = 1; #endif RC rc = RC.OK; // Result code from this function BtShared bt = null; // Shared part of btree structure MutexEx mutexOpen = null; #if !OMIT_SHARED_CACHE && !OMIT_DISKIO // If this Btree is a candidate for shared cache, try to find an existing BtShared object that we can share with if (!tempDB && (!memoryDB || (vfsFlags & VSystem.OPEN.URI) != 0)) if ((vfsFlags & VSystem.OPEN.SHAREDCACHE) != 0) { string fullPathname; p.Sharable_ = true; if (memoryDB) fullPathname = filename; else vfs.FullPathname(filename, out fullPathname); MutexEx mutexShared; #if THREADSAFE mutexOpen = MutexEx.Alloc(MutexEx.MUTEX.STATIC_OPEN); // Prevents a race condition. Ticket #3537 MutexEx.Enter(mutexOpen); mutexShared = MutexEx.Alloc(MutexEx.MUTEX.STATIC_MASTER); MutexEx.Enter(mutexShared); #endif for (bt = _sharedCacheList; bt != null; bt = bt.Next) { Debug.Assert(bt.Refs > 0); if (fullPathname == bt.Pager.get_Filename(false) && bt.Pager.get_Vfs() == vfs) { for (var i = ctx.DBs.length - 1; i >= 0; i--) { var existing = ctx.DBs[i].Bt; if (existing != null && existing.Bt == bt) { MutexEx.Leave(mutexShared); MutexEx.Leave(mutexOpen); fullPathname = null; p = null; return RC.CONSTRAINT; } } p.Bt = bt; bt.Refs++; break; } } MutexEx.Leave(mutexShared); fullPathname = null; } #if DEBUG else // In debug mode, we mark all persistent databases as sharable even when they are not. This exercises the locking code and // gives more opportunity for asserts(sqlite3_mutex_held()) statements to find locking problems. p.Sharable_ = true; #endif #endif byte reserves; // Byte of unused space on each page var dbHeader = new byte[100]; // Database header content if (bt == null) { // The following asserts make sure that structures used by the btree are the right size. This is to guard against size changes that result // when compiling on a different architecture. Debug.Assert(sizeof(long) == 8 || sizeof(long) == 4); Debug.Assert(sizeof(ulong) == 8 || sizeof(ulong) == 4); Debug.Assert(sizeof(uint) == 4); Debug.Assert(sizeof(ushort) == 2); Debug.Assert(sizeof(Pid) == 4); bt = new BtShared(); if (bt == null) { rc = RC.NOMEM; goto btree_open_out; } rc = Pager.Open(vfs, out bt.Pager, filename, EXTRA_SIZE, (IPager.PAGEROPEN)flags, vfsFlags, pageReinit, null); if (rc == RC.OK) rc = bt.Pager.ReadFileHeader(dbHeader.Length, dbHeader); if (rc != RC.OK) goto btree_open_out; bt.OpenFlags = flags; bt.Ctx = ctx; bt.Pager.SetBusyHandler(btreeInvokeBusyHandler, bt); p.Bt = bt; bt.Cursor = null; bt.Page1 = null; if (bt.Pager.get_Readonly()) bt.BtsFlags |= BTS.READ_ONLY; #if SECURE_DELETE bt.BtsFlags |= BTS.SECURE_DELETE; #endif bt.PageSize = (Pid)((dbHeader[16] << 8) | (dbHeader[17] << 16)); if (bt.PageSize < 512 || bt.PageSize > Pager.MAX_PAGE_SIZE || ((bt.PageSize - 1) & bt.PageSize) != 0) { bt.PageSize = 0; #if !OMIT_AUTOVACUUM // If the magic name ":memory:" will create an in-memory database, then leave the autoVacuum mode at 0 (do not auto-vacuum), even if // SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a // regular file-name. In this case the auto-vacuum applies as per normal. if (filename != null && !memoryDB) { bt.AutoVacuum = (DEFAULT_AUTOVACUUM != 0); bt.IncrVacuum = (DEFAULT_AUTOVACUUM == AUTOVACUUM.INCR); } #endif reserves = 0; } else { reserves = dbHeader[20]; bt.BtsFlags |= BTS.PAGESIZE_FIXED; #if !OMIT_AUTOVACUUM bt.AutoVacuum = (ConvertEx.Get4(dbHeader, 36 + 4 * 4) != 0); bt.IncrVacuum = (ConvertEx.Get4(dbHeader, 36 + 7 * 4) != 0); #endif } rc = bt.Pager.SetPageSize(ref bt.PageSize, reserves); if (rc != RC.OK) goto btree_open_out; bt.UsableSize = (ushort)(bt.PageSize - reserves); Debug.Assert((bt.PageSize & 7) == 0); // 8-byte alignment of pageSize #if !SHARED_CACHE && !OMIT_DISKIO // Add the new BtShared object to the linked list sharable BtShareds. if (p.Sharable_) { bt.Refs = 1; MutexEx mutexShared; #if THREADSAFE mutexShared = MutexEx.Alloc(MutexEx.MUTEX.STATIC_MASTER); bt.Mutex = MutexEx.Alloc(MutexEx.MUTEX.FAST); #endif MutexEx.Enter(mutexShared); bt.Next = _sharedCacheList; _sharedCacheList = bt; MutexEx.Leave(mutexShared); } #endif } #if !OMIT_SHARED_CACHE && !OMIT_DISKIO // If the new Btree uses a sharable pBtShared, then link the new Btree into the list of all sharable Btrees for the same connection. // The list is kept in ascending order by pBt address. if (p.Sharable_) { Btree sib; for (var i = 0; i < ctx.DBs.length; i++) if ((sib = ctx.DBs[i].Bt) != null && sib.Sharable_) { while (sib.Prev != null) { sib = sib.Prev; } if (p.Bt.AutoID < sib.Bt.AutoID) { p.Next = sib; p.Prev = null; sib.Prev = p; } else { while (sib.Next != null && sib.Next.Bt.AutoID < p.Bt.AutoID) sib = sib.Next; p.Next = sib.Next; p.Prev = sib; if (p.Next != null) p.Next.Prev = p; sib.Next = p; } break; } } #endif btree = p; btree_open_out: if (rc != RC.OK) { if (bt != null && bt.Pager != null) bt.Pager.Close(); bt = null; p = null; btree = null; } else // If the B-Tree was successfully opened, set the pager-cache size to the default value. Except, when opening on an existing shared pager-cache, // do not change the pager-cache size. if (p.Schema(0, null) == null) p.Bt.Pager.SetCacheSize(DEFAULT_CACHE_SIZE); #if THREADSAFE Debug.Assert(MutexEx.Held(mutexOpen)); MutexEx.Leave(mutexOpen); #endif return rc; }
static RC btreeCursor(Btree p, Pid tableID, bool wrFlag, KeyInfo keyInfo, BtCursor cur) { var bt = p.Bt; // Shared b-tree handle Debug.Assert(p.HoldsMutex()); // The following assert statements verify that if this is a sharable b-tree database, the connection is holding the required table locks, // and that no other connection has any open cursor that conflicts with this lock. Debug.Assert(hasSharedCacheTableLock(p, (uint)tableID, keyInfo != null, (LOCK)(wrFlag ? 1 : 0) + 1)); Debug.Assert(!wrFlag || !hasReadConflicts(p, (uint)tableID)); // Assert that the caller has opened the required transaction. Debug.Assert(p.InTrans > TRANS.NONE); Debug.Assert(!wrFlag || p.InTrans == TRANS.WRITE); Debug.Assert(bt.Page1 != null && bt.Page1.Data != null); if (C._NEVER(wrFlag && (bt.BtsFlags & BTS.READ_ONLY) != 0)) return RC.READONLY; if (tableID == 1 && btreePagecount(bt) == 0) { Debug.Assert(!wrFlag); tableID = 0; } // Now that no other errors can occur, finish filling in the BtCursor variables and link the cursor into the BtShared list. cur.RootID = tableID; cur.ID = -1; cur.KeyInfo = keyInfo; cur.Btree = p; cur.Bt = bt; cur.WrFlag = wrFlag; cur.Next = bt.Cursor; if (cur.Next != null) cur.Next.Prev = cur; bt.Cursor = cur; cur.State = CURSOR.INVALID; cur.CachedRowID = 0; return RC.OK; }
static void clearAllSharedCacheTableLocks(Btree p) { var bt = p.Bt; var iter = bt.Lock; Debug.Assert(p.HoldsMutex()); Debug.Assert(p.Sharable_ || iter == null); Debug.Assert((int)p.InTrans > 0); while (iter != null) { BtLock lock_ = iter; Debug.Assert((bt.BtsFlags & BTS.EXCLUSIVE) == 0 || bt.Writer == lock_.Btree); Debug.Assert((int)lock_.Btree.InTrans >= (int)lock_.Lock); if (lock_.Btree == p) { iter = lock_.Next; Debug.Assert(lock_.Table != 1 || lock_ == p.Lock); if (lock_.Table != 1) lock_ = null; } else iter = lock_.Next; } Debug.Assert((bt.BtsFlags & BTS.PENDING) == 0 || bt.Writer != null); if (bt.Writer == p) { bt.Writer = null; bt.BtsFlags &= ~(BTS.EXCLUSIVE | BTS.PENDING); } else if (bt.Transactions == 2) { // This function is called when Btree p is concluding its transaction. If there currently exists a writer, and p is not // that writer, then the number of locks held by connections other than the writer must be about to drop to zero. In this case // set the BTS_PENDING flag to 0. // // If there is not currently a writer, then BTS_PENDING must be zero already. So this next line is harmless in that case. bt.BtsFlags &= ~BTS.PENDING; } }
static void downgradeAllSharedCacheTableLocks(Btree p) { var bt = p.Bt; if (bt.Writer == p) { bt.Writer = null; bt.BtsFlags &= ~(BTS.EXCLUSIVE | BTS.PENDING); for (var lock_ = bt.Lock; lock_ != null; lock_ = lock_.Next) { Debug.Assert(lock_.Lock == LOCK.READ || lock_.Btree == p); lock_.Lock = LOCK.READ; } } }
static RC BackupOnePage(Backup p, Pid srcPg, byte[] srcData, bool update) { Pager destPager = p.Dest.get_Pager(); int srcPgsz = p.Src.GetPageSize(); int destPgsz = p.Dest.GetPageSize(); int copy = Math.Min(srcPgsz, destPgsz); long end = (long)srcPg * (long)srcPgsz; RC rc = RC.OK; Debug.Assert(p.Src.GetReserveNoMutex() >= 0); Debug.Assert(p.DestLocked); Debug.Assert(!IsFatalError(p.RC_)); Debug.Assert(srcPg != Btree.PENDING_BYTE_PAGE(p.Src.Bt)); Debug.Assert(srcData != null); // Catch the case where the destination is an in-memory database and the page sizes of the source and destination differ. if (srcPgsz != destPgsz && destPager.get_MemoryDB) rc = RC.READONLY; #if HAS_CODEC int srcReserve = p.Src.GetReserveNoMutex(); int destReserve = p.Dest.GetReserve(); // Backup is not possible if the page size of the destination is changing and a codec is in use. if (srcPgsz != destPgsz && Pager.GetCodec(destPager) != null) rc = RC.READONLY; // Backup is not possible if the number of bytes of reserve space differ between source and destination. If there is a difference, try to // fix the destination to agree with the source. If that is not possible, then the backup cannot proceed. if (srcReserve != destReserve) { uint newPgsz = (uint)srcPgsz; rc = destPager.SetPageSize(ref newPgsz, srcReserve); if (rc == RC.OK && newPgsz != srcPgsz) rc = RC.READONLY; } #endif // This loop runs once for each destination page spanned by the source page. For each iteration, variable iOff is set to the byte offset // of the destination page. for (long off = end - (long)srcPgsz; rc == RC.OK && off < end; off += destPgsz) { IPage destPg = null; uint dest = (uint)(off / destPgsz) + 1; if (dest == Btree.PENDING_BYTE_PAGE(p.Dest.Bt)) continue; if ((rc = destPager.Acquire(dest, ref destPg, false)) == RC.OK && (rc = Pager.Write(destPg)) == RC.OK) { byte[] destData = Pager.GetData(destPg); // Copy the data from the source page into the destination page. Then clear the Btree layer MemPage.isInit flag. Both this module // and the pager code use this trick (clearing the first byte of the page 'extra' space to invalidate the Btree layers // cached parse of the page). MemPage.isInit is marked "MUST BE FIRST" for this purpose. Buffer.BlockCopy(srcData, (int)(off % srcPgsz), destData, (int)(off % destPgsz), copy); Pager.GetExtra(destPg).IsInit = false; } Pager.Unref(destPg); } return rc; }
static void invalidateIncrblobCursors(Btree btree, long rowid, bool isClearTable) { var bt = btree.Bt; Debug.Assert(btree.HoldsMutex()); for (var p = bt.Cursor; p != null; p = p.Next) if (p.IsIncrblobHandle && (isClearTable || p.Info.Key == rowid)) p.State = CURSOR.INVALID; }
static bool Sharable(Btree b) { return(false); }
static RC querySharedCacheTableLock(Btree p, Pid table, LOCK lock_) { return RC.OK; }
static bool sqlite3BtreeHoldsMutex(Btree X) { return true; }
static void downgradeAllSharedCacheTableLocks(Btree a) { }
static void sqlite3BtreeLeave(Btree X) { }
static bool hasReadConflicts(Btree a, Pid b) { return false; }
//int sqlite3BtreeHoldsAllMutexes(sqlite3); //int sqlite3SchemaMutexHeld(sqlite3*,int,Schema); //#endif static bool sqlite3BtreeSharable(Btree X) { return false; }
static RC btreeDropTable(Btree p, Pid tableID, ref int movedID) { BtShared bt = p.Bt; Debug.Assert(p.HoldsMutex()); Debug.Assert(p.InTrans == TRANS.WRITE); // It is illegal to drop a table if any cursors are open on the database. This is because in auto-vacuum mode the backend may // need to move another root-page to fill a gap left by the deleted root page. If an open cursor was using this page a problem would occur. // // This error is caught long before control reaches this point. if (C._NEVER(bt.Cursor != null)) { BContext.ConnectionBlocked(p.Ctx, bt.Cursor.Btree.Ctx); return RC.LOCKED_SHAREDCACHE; } MemPage page = null; RC rc = btreeGetPage(bt, (Pid)tableID, ref page, false); if (rc != RC.OK) return rc; int dummy0 = 0; rc = p.ClearTable((int)tableID, ref dummy0); if (rc != RC.OK) { releasePage(page); return rc; } movedID = 0; if (tableID > 1) { #if OMIT_AUTOVACUUM freePage(page, ref rc); releasePage(page); #else if (bt.AutoVacuum) { Pid maxRootID = 0; p.GetMeta(META.LARGEST_ROOT_PAGE, ref maxRootID); if (tableID == maxRootID) { // If the table being dropped is the table with the largest root-page number in the database, put the root page on the free list. freePage(page, ref rc); releasePage(page); if (rc != RC.OK) return rc; } else { // The table being dropped does not have the largest root-page number in the database. So move the page that does into the // gap left by the deleted root-page. releasePage(page); MemPage move = new MemPage(); rc = btreeGetPage(bt, maxRootID, ref move, false); if (rc != RC.OK) return rc; rc = relocatePage(bt, move, PTRMAP.ROOTPAGE, 0, tableID, false); releasePage(move); if (rc != RC.OK) return rc; move = null; rc = btreeGetPage(bt, maxRootID, ref move, false); freePage(move, ref rc); releasePage(move); if (rc != RC.OK) return rc; movedID = (int)maxRootID; } // Set the new 'max-root-page' value in the database header. This is the old value less one, less one more if that happens to // be a root-page number, less one again if that is the PENDING_BYTE_PAGE. maxRootID--; while (maxRootID == PENDING_BYTE_PAGE(bt) || PTRMAP_ISPAGE(bt, maxRootID)) maxRootID--; Debug.Assert(maxRootID != PENDING_BYTE_PAGE(bt)); rc = p.UpdateMeta(META.LARGEST_ROOT_PAGE, maxRootID); } else { freePage(page, ref rc); releasePage(page); } #endif } else { // If sqlite3BtreeDropTable was called on page 1. This really never should happen except in a corrupt database. zeroPage(page, PTF_INTKEY | PTF_LEAF); releasePage(page); } return rc; }
static bool hasReadConflicts(Btree btree, Pid root) { for (var p = btree.Bt.Cursor; p != null; p = p.Next) if (p.RootID == root && p.Btree != btree && (p.Btree.Ctx.Flags & BContext.FLAG.ReadUncommitted) == 0) return true; return false; }
public static RC Prepare_(Context ctx, string sql, int bytes, bool isPrepareV2, Vdbe reprepare, ref Vdbe stmtOut, ref string tailOut) { stmtOut = null; tailOut = null; string errMsg = null; // Error message RC rc = RC.OK; int i; // Allocate the parsing context Parse parse = new Parse(); // Parsing context if (parse == null) { rc = RC.NOMEM; goto end_prepare; } parse.Reprepare = reprepare; parse.LastToken.data = null; //: C#? Debug.Assert(tailOut == null); Debug.Assert(!ctx.MallocFailed); Debug.Assert(MutexEx.Held(ctx.Mutex)); // Check to verify that it is possible to get a read lock on all database schemas. The inability to get a read lock indicates that // some other database connection is holding a write-lock, which in turn means that the other connection has made uncommitted changes // to the schema. // // Were we to proceed and prepare the statement against the uncommitted schema changes and if those schema changes are subsequently rolled // back and different changes are made in their place, then when this prepared statement goes to run the schema cookie would fail to detect // the schema change. Disaster would follow. // // This thread is currently holding mutexes on all Btrees (because of the sqlite3BtreeEnterAll() in sqlite3LockAndPrepare()) so it // is not possible for another thread to start a new schema change while this routine is running. Hence, we do not need to hold // locks on the schema, we just need to make sure nobody else is holding them. // // Note that setting READ_UNCOMMITTED overrides most lock detection, but it does *not* override schema lock detection, so this all still // works even if READ_UNCOMMITTED is set. for (i = 0; i < ctx.DBs.length; i++) { Btree bt = ctx.DBs[i].Bt; if (bt != null) { Debug.Assert(bt.HoldsMutex()); rc = bt.SchemaLocked(); if (rc != 0) { string dbName = ctx.DBs[i].Name; sqlite3Error(ctx, rc, "database schema is locked: %s", dbName); C.ASSERTCOVERAGE((ctx.Flags & Context.FLAG.ReadUncommitted) != 0); goto end_prepare; } } } VTable.UnlockList(ctx); parse.Ctx = ctx; parse.QueryLoops = (double)1; if (bytes >= 0 && (bytes == 0 || sql[bytes - 1] != 0)) { int maxLen = ctx.aLimit[SQLITE_LIMIT_SQL_LENGTH]; C.ASSERTCOVERAGE(bytes == maxLen); C.ASSERTCOVERAGE(bytes == maxLen + 1); if (bytes > maxLen) { sqlite3Error(ctx, RC.TOOBIG, "statement too long"); rc = SysEx.ApiExit(ctx, RC.TOOBIG); goto end_prepare; } string sqlCopy = sql.Substring(0, bytes); if (sqlCopy != null) { parse.RunParser(sqlCopy, ref errMsg); C._tagfree(ctx, ref sqlCopy); parse.Tail = null; //: &sql[parse->Tail - sqlCopy]; } else { parse.Tail = null; //: &sql[bytes]; } } else { parse.RunParser(sql, ref errMsg); } Debug.Assert((int)parse.QueryLoops == 1); if (ctx.MallocFailed) { parse.RC = RC.NOMEM; } if (parse.RC == RC.DONE) { parse.RC = RC.OK; } if (parse.CheckSchema != 0) { SchemaIsValid(parse); } if (ctx.MallocFailed) { parse.RC = RC.NOMEM; } tailOut = (parse.Tail == null ? null : parse.Tail.ToString()); rc = parse.RC; Vdbe v = parse.V; #if !OMIT_EXPLAIN if (rc == RC.OK && parse.V != null && parse.Explain != 0) { int first, max; if (parse.Explain == 2) { v.SetNumCols(4); first = 8; max = 12; } else { v.SetNumCols(8); first = 0; max = 8; } for (i = first; i < max; i++) { v.SetColName(i - first, COLNAME_NAME, _colName[i], C.DESTRUCTOR_STATIC); } } #endif Debug.Assert(!ctx.Init.Busy || !isPrepareV2); if (!ctx.Init.Busy) { Vdbe.SetSql(v, sql, (int)(sql.Length - (parse.Tail == null ? 0 : parse.Tail.Length)), isPrepareV2); } if (v != null && (rc != RC.OK || ctx.MallocFailed)) { v.Finalize(); Debug.Assert(stmtOut == null); } else { stmtOut = v; } if (errMsg != null) { sqlite3Error(ctx, rc, "%s", errMsg); C._tagfree(ctx, ref errMsg); } else { sqlite3Error(ctx, rc, null); } // Delete any TriggerPrg structures allocated while parsing this statement. while (parse.TriggerPrg != null) { TriggerPrg t = parse.TriggerPrg; parse.TriggerPrg = t.Next; C._tagfree(ctx, ref t); } end_prepare: //sqlite3StackFree( db, pParse ); rc = SysEx.ApiExit(ctx, rc); Debug.Assert((RC)((int)rc & ctx.ErrMask) == rc); return(rc); }
public RC Step(int pages) { MutexEx.Enter(SrcCtx.Mutex); Src.Enter(); if (DestCtx != null) MutexEx.Enter(DestCtx.Mutex); RC rc = RC_; if (!IsFatalError(rc)) { Pager srcPager = Src.get_Pager(); // Source pager Pager destPager = Dest.get_Pager(); // Dest pager Pid srcPage = 0; // Size of source db in pages bool closeTrans = false; // True if src db requires unlocking // If the source pager is currently in a write-transaction, return SQLITE_BUSY immediately. rc = (DestCtx != null && Src.Bt.InTransaction == TRANS.WRITE ? RC.BUSY : RC.OK); // Lock the destination database, if it is not locked already. if (rc == RC.OK && !DestLocked && (rc = Dest.BeginTrans(2)) == RC.OK) { DestLocked = true; Dest.GetMeta(Btree.META.SCHEMA_VERSION, ref DestSchema); } // If there is no open read-transaction on the source database, open one now. If a transaction is opened here, then it will be closed // before this function exits. if (rc == RC.OK && !Src.IsInReadTrans()) { rc = Src.BeginTrans(0); closeTrans = true; } // Do not allow backup if the destination database is in WAL mode and the page sizes are different between source and destination int pgszSrc = Src.GetPageSize(); // Source page size int pgszDest = Dest.GetPageSize(); // Destination page size IPager.JOURNALMODE destMode = Dest.get_Pager().GetJournalMode(); // Destination journal mode if (rc == RC.OK && destMode == IPager.JOURNALMODE.WAL && pgszSrc != pgszDest) rc = RC.READONLY; // Now that there is a read-lock on the source database, query the source pager for the number of pages in the database. srcPage = Src.LastPage(); Debug.Assert(srcPage >= 0); for (int ii = 0; (pages < 0 || ii < pages) && NextId <= (Pid)srcPage && rc == 0; ii++) { Pid srcPg = NextId; // Source page number if (srcPg != Btree.PENDING_BYTE_PAGE(Src.Bt)) { IPage srcPgAsObj = null; // Source page object rc = srcPager.Acquire(srcPg, ref srcPgAsObj, false); if (rc == RC.OK) { rc = BackupOnePage(p, srcPg, Pager.GetData(srcPgAsObj), false); Pager.Unref(srcPgAsObj); } } NextId++; } if (rc == RC.OK) { Pagecount = srcPage; Remaining = (srcPage + 1 - NextId); if (NextId > srcPage) rc = RC.DONE; else if (!IsAttached) AttachBackupObject(p); } // Update the schema version field in the destination database. This is to make sure that the schema-version really does change in // the case where the source and destination databases have the same schema version. if (rc == RC.DONE) { if (srcPage == null) { rc = Dest.NewDb(); srcPage = 1; } if (rc == RC.OK || rc == RC.DONE) rc = Dest.UpdateMeta(Btree.META.SCHEMA_VERSION, DestSchema + 1); if (rc == RC.OK) { if (DestCtx != null) Main.ResetAllSchemasOfConnection(DestCtx); if (destMode == IPager.JOURNALMODE.WAL) rc = Dest.SetVersion(2); } if (rc == RC.OK) { // Set nDestTruncate to the final number of pages in the destination database. The complication here is that the destination page // size may be different to the source page size. // // If the source page size is smaller than the destination page size, round up. In this case the call to sqlite3OsTruncate() below will // fix the size of the file. However it is important to call sqlite3PagerTruncateImage() here so that any pages in the // destination file that lie beyond the nDestTruncate page mark are journalled by PagerCommitPhaseOne() before they are destroyed // by the file truncation. Debug.Assert(pgszSrc == Src.GetPageSize()); Debug.Assert(pgszDest == Dest.GetPageSize()); Pid destTruncate; if (pgszSrc < pgszDest) { int ratio = pgszDest / pgszSrc; destTruncate = (Pid)((srcPage + ratio - 1) / ratio); if (destTruncate == Btree.PENDING_BYTE_PAGE(Dest.Bt)) destTruncate--; } else destTruncate = (Pid)(srcPage * (pgszSrc / pgszDest)); Debug.Assert(destTruncate > 0); if (pgszSrc < pgszDest) { // If the source page-size is smaller than the destination page-size, two extra things may need to happen: // // * The destination may need to be truncated, and // // * Data stored on the pages immediately following the pending-byte page in the source database may need to be // copied into the destination database. int size = (int)(pgszSrc * srcPage); VFile file = destPager.get_File(); Debug.Assert(file != null); Debug.Assert((long)destTruncate * (long)pgszDest >= size || (destTruncate == (int)(Btree.PENDING_BYTE_PAGE(Dest.Bt) - 1) && size >= VFile.PENDING_BYTE && size <= VFile.PENDING_BYTE + pgszDest)); // This block ensures that all data required to recreate the original database has been stored in the journal for pDestPager and the // journal synced to disk. So at this point we may safely modify the database file in any way, knowing that if a power failure // occurs, the original database will be reconstructed from the journal file. uint dstPage; destPager.Pages(out dstPage); for (Pid pg = destTruncate; rc == RC.OK && pg <= (Pid)dstPage; pg++) { if (pg != Btree.PENDING_BYTE_PAGE(Dest.Bt)) { IPage pgAsObj; rc = destPager.Acquire(pg, ref pgAsObj, false); if (rc == RC.OK) { rc = Pager.Write(pgAsObj); Pager.Unref(pgAsObj); } } } if (rc == RC.OK) rc = destPager.CommitPhaseOne(null, true); // Write the extra pages and truncate the database file as required. long end = Math.Min(VFile.PENDING_BYTE + pgszDest, size); for (long off = VFile.PENDING_BYTE + pgszSrc; rc == RC.OK && off < end; off += pgszSrc) { Pid srcPg = (Pid)((off / pgszSrc) + 1); PgHdr srcPgAsObj = null; rc = srcPager.Acquire(srcPg, ref srcPgAsObj, false); if (rc == RC.OK) { byte[] data = Pager.GetData(srcPgAsObj); rc = file.Write(data, pgszSrc, off); } Pager.Unref(srcPgAsObj); } if (rc == RC.OK) rc = BackupTruncateFile(file, (int)size); // Sync the database file to disk. if (rc == RC.OK) rc = destPager.Sync(); } else { destPager.TruncateImage(destTruncate); rc = destPager.CommitPhaseOne(null, false); } // Finish committing the transaction to the destination database. if (rc == RC.OK && (rc = Dest.CommitPhaseTwo(false)) == RC.OK) rc = RC.DONE; } } // If bCloseTrans is true, then this function opened a read transaction on the source database. Close the read transaction here. There is // no need to check the return values of the btree methods here, as "committing" a read-only transaction cannot fail. if (closeTrans) { #if !DEBUG || COVERAGE_TEST RC rc2 = Src.CommitPhaseOne(null); rc2 |= Src.CommitPhaseTwo(false); Debug.Assert(rc2 == RC.OK); #else Src.CommitPhaseOne(null); Src.CommitPhaseTwo(false); #endif } if (rc == RC.IOERR_NOMEM) rc = RC.NOMEM; RC_ = rc; } if (DestCtx != null) MutexEx.Leave(DestCtx.Mutex); Src.Leave(); MutexEx.Leave(SrcCtx.Mutex); return rc; }
public bool IsPCacheInit; // True after malloc is initialized public GlobalStatics( bool useCis, //sqlite3_pcache_methods pcache, Btree.MemPage page, int pageSize, int pages, int maxParserStack, bool isPCacheInit) { UseCis = useCis; //pcache = pcache; Page = page; PageSize = pageSize; Pages = pages; MaxParserStack = maxParserStack; IsPCacheInit = isPCacheInit; }