/// <summary> /// Instantiate a new SecondaryQueueDatabaseConfig object /// </summary> public SecondaryQueueDatabaseConfig( Database PrimaryDB, SecondaryKeyGenDelegate KeyGenFunc) : base(PrimaryDB, KeyGenFunc) { lengthIsSet = false; padIsSet = false; extentIsSet = false; DbType = DatabaseType.QUEUE; }
public static void AddOneByCursor(Database db, Cursor cursor) { DatabaseEntry key, data; KeyValuePair<DatabaseEntry, DatabaseEntry> pair; // Add a record to db via cursor. key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")); data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")); pair = new KeyValuePair<DatabaseEntry,DatabaseEntry>(key, data); cursor.Add(pair); // Confirm that the record has been put to the database. Assert.IsTrue(db.Exists(key)); }
public static void Confirm(XmlElement xmlElement, Database db, bool compulsory) { uint defaultBytes; defaultBytes = getDefaultCacheSizeBytes(); Configuration.ConfirmBool(xmlElement, "AutoCommit", db.AutoCommit, compulsory); Configuration.ConfirmCacheSize(xmlElement, "CacheSize", db.CacheSize, defaultBytes, compulsory); Configuration.ConfirmCreatePolicy(xmlElement, "Creation", db.Creation, compulsory); Configuration.ConfirmString(xmlElement, "DatabaseName", db.DatabaseName, compulsory); Configuration.ConfirmBool(xmlElement, "DoChecksum", db.DoChecksum, compulsory); // Encrypted and EncryptWithAES? Configuration.ConfirmByteOrder(xmlElement, "ByteOrder", db.Endianness, compulsory); Configuration.ConfirmString(xmlElement, "ErrorPrefix", db.ErrorPrefix, compulsory); // File name is confirmed in functiion, not here. Configuration.ConfirmBool(xmlElement, "FreeThreaded", db.FreeThreaded, compulsory); Configuration.ConfirmBool(xmlElement, "HasMultiple", db.HasMultiple, compulsory); if (db.Endianness == getMachineByteOrder()) Assert.IsTrue(db.InHostOrder); else Assert.IsFalse(db.InHostOrder); Configuration.ConfirmBool(xmlElement, "NoMMap", db.NoMMap, compulsory); Configuration.ConfirmBool(xmlElement, "NonDurableTxns", db.NonDurableTxns, compulsory); Configuration.ConfirmUint(xmlElement, "PageSize", db.Pagesize, compulsory); Configuration.ConfirmCachePriority(xmlElement, "Priority", db.Priority, compulsory); Configuration.ConfirmBool(xmlElement, "ReadOnly", db.ReadOnly, compulsory); Configuration.ConfirmBool(xmlElement, "ReadUncommitted", db.ReadUncommitted, compulsory); /* * Database.Truncated is the value set in * DatabaseConfig.Truncate. */ Configuration.ConfirmBool(xmlElement, "Truncate", db.Truncated, compulsory); Configuration.ConfirmBool(xmlElement, "UseMVCC", db.UseMVCC, compulsory); }
/// <summary> /// Verify the integrity of the database specified by /// <paramref name="file"/> and <paramref name="database"/>. /// </summary> /// <remarks> /// <para> /// Berkeley DB normally verifies that btree keys and duplicate items /// are correctly sorted, and hash keys are correctly hashed. If the /// file being verified contains multiple databases using differing /// sorting or hashing algorithms, some of them must necessarily fail /// database verification because only one sort order or hash function /// can be specified in <paramref name="cfg"/>. To verify files with /// multiple databases having differing sorting orders or hashing /// functions, first perform verification of the file as a whole by /// using <see cref="VerifyOperation.NO_ORDER_CHECK"/>, and then /// individually verify the sort order and hashing function for each /// database in the file using /// <see cref="VerifyOperation.ORDER_CHECK_ONLY"/>. /// </para> /// </remarks> /// <param name="file"> /// The physical file in which the databases to be verified are found. /// </param> /// <param name="database"> /// The database in <paramref name="file"/> on which the database checks /// for btree and duplicate sort order and for hashing are to be /// performed. A non-null value for database is only allowed with /// <see cref="VerifyOperation.ORDER_CHECK_ONLY"/>. /// </param> /// <param name="cfg"> /// Configuration parameters for the databases to be verified. /// </param> /// <param name="op">The extent of verification</param> public static void Verify(string file, string database, DatabaseConfig cfg, VerifyOperation op) { using (Database db = new Database(cfg.Env, 0)) { db.Config(cfg); uint flags; switch (op) { case VerifyOperation.NO_ORDER_CHECK: flags = DbConstants.DB_NOORDERCHK; break; case VerifyOperation.ORDER_CHECK_ONLY: flags = DbConstants.DB_ORDERCHKONLY; break; case VerifyOperation.DEFAULT: default: flags = 0; break; } db.db.verify(file, database, null, null, flags); } }
/// <summary> /// Upgrade all of the databases included in the file /// <paramref name="file"/>, if necessary. If no upgrade is necessary, /// Upgrade always returns successfully. /// </summary> /// <overloads> /// Database upgrades are done in place and are destructive. For /// example, if pages need to be allocated and no disk space is /// available, the database may be left corrupted. Backups should be /// made before databases are upgraded. See Upgrading databases in the /// Programmer's Reference Guide for more information. /// </overloads> /// <remarks> /// <para> /// As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 /// release, the on-disk format of duplicate data items changed. To /// correctly upgrade the format requires applications to specify /// whether duplicate data items in the database are sorted or not. /// Specifying <paramref name="dupSortUpgraded"/> informs Upgrade that /// the duplicates are sorted; otherwise they are assumed to be /// unsorted. Incorrectly specifying the value of this flag may lead to /// database corruption. /// </para> /// <para> /// Further, because this method upgrades a physical file (including all /// the databases it contains), it is not possible to use Upgrade to /// upgrade files in which some of the databases it includes have sorted /// duplicate data items, and some of the databases it includes have /// unsorted duplicate data items. If the file does not have more than a /// single database, if the databases do not support duplicate data /// items, or if all of the databases that support duplicate data items /// support the same style of duplicates (either sorted or unsorted), /// Upgrade works correctly as long as /// <paramref name="dupSortUpgraded"/> is correctly specified. /// Otherwise, the file cannot be upgraded using Upgrade it must be /// upgraded manually by dumping and reloading the databases. /// </para> /// </remarks> /// <param name="file"> /// The physical file containing the databases to be upgraded. /// </param> /// <param name="cfg"> /// Configuration parameters for the databases to be upgraded. /// </param> /// <param name="dupSortUpgraded"> /// If true, the duplicates in the upgraded database are sorted; /// otherwise they are assumed to be unsorted. This setting is only /// meaningful when upgrading databases from releases before the /// Berkeley DB 3.1 release. /// </param> public static void Upgrade( string file, DatabaseConfig cfg, bool dupSortUpgraded) { Database db = new Database(cfg.Env, 0); db.Config(cfg); db.db.upgrade(file, dupSortUpgraded ? DbConstants.DB_DUPSORT : 0); }
/// <summary> /// Write the key/data pairs from all databases in the file to /// <paramref name="OutputStream"/>. Key values are written for Btree, /// Hash and Queue databases, but not for Recno databases. /// </summary> /// <param name="file"> /// The physical file in which the databases to be salvaged are found. /// </param> /// <param name="cfg"> /// Configuration parameters for the databases to be salvaged. /// </param> /// <param name="Printable"> /// If true and characters in either the key or data items are printing /// characters (as defined by isprint(3)), use printing characters to /// represent them. This setting permits users to use standard text /// editors and tools to modify the contents of databases or selectively /// remove data from salvager output. /// </param> /// <param name="Aggressive"> /// If true, output all the key/data pairs found in the file. /// Corruption of these data pairs is assumed, and corrupted or deleted /// data pairs may appear in the output (even if the salvaged file is in no /// way corrupt). This output almost certainly requires editing before being /// loaded into a database. /// </param> /// <param name="OutputStream"> /// The TextWriter to which the databases' key/data pairs are written. /// If null, <see cref="Console.Out"/> is used. /// </param> public static void Salvage(string file, DatabaseConfig cfg, bool Printable, bool Aggressive, TextWriter OutputStream) { using (Database db = new Database(cfg.Env, 0)) { db.Config(cfg); if (OutputStream == null) OutputStream = Console.Out; uint flags = DbConstants.DB_SALVAGE; flags |= Aggressive ? DbConstants.DB_AGGRESSIVE : 0; flags |= Printable ? DbConstants.DB_PRINTABLE : 0; writeToFileRef = new BDB_FileWriteDelegate(writeToFile); db.db.verify(file, null, OutputStream, writeToFileRef, flags); } }
private void Open() { Console.WriteLine("Opening environment and database"); // Set up the environment. DatabaseEnvironmentConfig envCfg = new DatabaseEnvironmentConfig(); envCfg.Create = true; envCfg.UseMPool = true; envCfg.UseLocking = true; envCfg.UseLogging = true; envCfg.UseTxns = true; // Allow multiple threads visit to the environment handle. envCfg.FreeThreaded = true; if (inMem) envCfg.Private = true; else envCfg.RunRecovery = true; /* * Indicate that we want db to internally perform * deadlock detection, aborting the transaction that * has performed the least amount of WriteData activity * in the event of a deadlock. */ envCfg.LockSystemCfg = new LockingConfig(); envCfg.LockSystemCfg.DeadlockResolution = DeadlockPolicy.MIN_WRITE; if (inMem) { // Specify in-memory logging. envCfg.LogSystemCfg = new LogConfig(); envCfg.LogSystemCfg.InMemory = true; /* * Specify the size of the in-memory log buffer * Must be large enough to handle the log data * created by the largest transaction. */ envCfg.LogSystemCfg.BufferSize = 10 * 1024 * 1024; /* * Specify the size of the in-memory cache, * large enough to avoid paging to disk. */ envCfg.MPoolSystemCfg = new MPoolConfig(); envCfg.MPoolSystemCfg.CacheSize = new CacheInfo(0, 10 * 1024 * 1024, 1); } // Set up the database. BTreeDatabaseConfig dbCfg = new BTreeDatabaseConfig(); dbCfg.AutoCommit = true; dbCfg.Creation = CreatePolicy.IF_NEEDED; dbCfg.Duplicates = DuplicatesPolicy.SORTED; dbCfg.FreeThreaded = true; dbCfg.ReadUncommitted = true; /* * Open the environment. Any errors will be caught * by the caller. */ env = DatabaseEnvironment.Open(home, envCfg); /* * Open the database. Do not provide a txn handle. This * Open is autocommitted because BTreeDatabaseConfig.AutoCommit * is true. */ dbCfg.Env = env; db = BTreeDatabase.Open(dbName, dbCfg); }
/// <summary> /// Instantiate a new SecondaryRecnoDatabaseConfig object /// </summary> public SecondaryRecnoDatabaseConfig( Database PrimaryDB, SecondaryKeyGenDelegate KeyGenFunc) : base(PrimaryDB, KeyGenFunc) { Renumber = false; Snapshot = false; delimiterIsSet = false; lengthIsSet = false; padIsSet = false; BackingFile = null; DbType = DatabaseType.RECNO; }
/* * void return type since error conditions are propogated * via exceptions. */ private void printStocks(Database db) { Cursor dbc = db.Cursor(); Console.WriteLine("\tSymbol\t\tPrice"); Console.WriteLine("\t======\t\t====="); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); foreach (KeyValuePair<DatabaseEntry, DatabaseEntry> pair in dbc) { string keyStr = ASCIIEncoding.ASCII.GetString(pair.Key.Data); string dataStr = ASCIIEncoding.ASCII.GetString(pair.Value.Data); Console.WriteLine("\t"+keyStr+"\t\t"+dataStr); } dbc.Close(); }
/// <summary> /// Instantiate a new SecondaryDatabaseConfig object, with the default /// configuration settings. /// </summary> public SecondaryDatabaseConfig( Database PrimaryDB, SecondaryKeyGenDelegate KeyGenFunc) { Primary = PrimaryDB; KeyGen = KeyGenFunc; DbType = DatabaseType.UNKNOWN; }
/// <summary> /// Specify the action taken when a referenced record in the foreign key /// database is deleted. /// </summary> /// <param name="ForeignDB">The foreign key database.</param> /// <param name="OnDelete"> /// The action taken when a reference record is deleted. /// </param> /// <param name="NullifyFunc"> /// When <paramref name="OnDelete"/> is /// <see cref="ForeignKeyDeleteAction.NULLIFY"/>, NullifyFunc is used to /// set the foreign key to null. /// </param> public void SetForeignKeyConstraint(Database ForeignDB, ForeignKeyDeleteAction OnDelete, ForeignKeyNullifyDelegate NullifyFunc) { if (OnDelete == ForeignKeyDeleteAction.NULLIFY && NullifyFunc == null) throw new ArgumentException( "A nullifying function must " + "be provided when ForeignKeyDeleteAction.NULLIFY is set."); fdbp = ForeignDB; fkaction = OnDelete; nullifier = NullifyFunc; }
/// <summary> /// Specify the action taken when a referenced record in the foreign key /// database is deleted. /// </summary> /// <param name="ForeignDB">The foreign key database.</param> /// <param name="OnDelete"> /// The action taken when a referenced record is deleted. /// </param> public void SetForeignKeyConstraint( Database ForeignDB, ForeignKeyDeleteAction OnDelete) { SetForeignKeyConstraint(ForeignDB, OnDelete, null); }
/// <summary> /// Instantiate a new SecondaryHashDatabaseConfig object /// </summary> public SecondaryHashDatabaseConfig( Database PrimaryDB, SecondaryKeyGenDelegate KeyGenFunc) : base(PrimaryDB, KeyGenFunc) { Duplicates = DuplicatesPolicy.NONE; Compare = null; fillFactorIsSet = false; nelemIsSet = false; DbType = DatabaseType.HASH; }
/// <summary> /// Create a new SecondaryBTreeDatabaseConfig object /// </summary> public SecondaryBTreeDatabaseConfig( Database PrimaryDB, SecondaryKeyGenDelegate KeyGenFunc) : base(PrimaryDB, KeyGenFunc) { Duplicates = DuplicatesPolicy.NONE; NoReverseSplitting = false; UseRecordNumbers = false; Compare = null; PrefixCompare = null; minkeysIsSet = false; DbType = DatabaseType.BTREE; }