private void Config(HashDatabaseConfig cfg) { base.Config(cfg); /* * Database.Config calls set_flags, but that doesn't get the Hash * specific flags. No harm in calling it again. */ db.set_flags(cfg.flags); if (cfg.HashFunction != null) { HashFunction = cfg.HashFunction; } // The duplicate comparison function cannot change. if (cfg.DuplicateCompare != null) { DupCompare = cfg.DuplicateCompare; } if (cfg.fillFactorIsSet) { db.set_h_ffactor(cfg.FillFactor); } if (cfg.nelemIsSet) { db.set_h_nelem(cfg.TableSize); } if (cfg.HashComparison != null) { Compare = cfg.HashComparison; } }
public void ConfigCase1(HashDatabaseConfig dbConfig) { dbConfig.Creation = CreatePolicy.IF_NEEDED; dbConfig.Duplicates = DuplicatesPolicy.UNSORTED; dbConfig.FillFactor = 10; dbConfig.TableSize = 20; dbConfig.PageSize = 4096; }
public void GetHashDBAndCursor(string home, string name, out HashDatabase db, out HashCursor cursor) { string dbFileName = home + "/" + name + ".db"; HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; db = HashDatabase.Open(dbFileName, dbConfig); cursor = db.Cursor(); }
public override void TestConfigWithoutEnv() { testName = "TestConfigWithoutEnv"; SetUpTest(false); XmlElement xmlElem = Configuration.TestSetUp( testFixtureName, testName); HashDatabaseConfig hashConfig = new HashDatabaseConfig(); Config(xmlElem, ref hashConfig, true); Confirm(xmlElem, hashConfig, true); }
/// <summary> /// Instantiate a new HashDatabase object and open the database /// represented by <paramref name="Filename"/> and /// <paramref name="DatabaseName"/>. /// </summary> /// <remarks> /// <para> /// If both <paramref name="Filename"/> and /// <paramref name="DatabaseName"/> are null, the database is strictly /// temporary and cannot be opened by any other thread of control, thus /// the database can only be accessed by sharing the single database /// object that created it, in circumstances where doing so is safe. If /// <paramref name="Filename"/> is null and /// <paramref name="DatabaseName"/> is non-null, the database can be /// opened by other threads of control and will be replicated to client /// sites in any replication group. /// </para> /// <para> /// If <paramref name="txn"/> is null, but /// <see cref="DatabaseConfig.AutoCommit"/> is set, the operation will /// be implicitly transaction protected. Note that transactionally /// protected operations on a datbase object requires the object itself /// be transactionally protected during its open. Also note that the /// transaction must be committed before the object is closed. /// </para> /// </remarks> /// <param name="Filename"> /// The name of an underlying file that will be used to back the /// database. In-memory databases never intended to be preserved on disk /// may be created by setting this parameter to null. /// </param> /// <param name="DatabaseName"> /// This parameter allows applications to have multiple databases in a /// single file. Although no DatabaseName needs to be specified, it is /// an error to attempt to open a second database in a file that was not /// initially created using a database name. /// </param> /// <param name="cfg">The database's configuration</param> /// <param name="txn"> /// If the operation is part of an application-specified transaction, /// <paramref name="txn"/> is a Transaction object returned from /// <see cref="DatabaseEnvironment.BeginTransaction"/>; if /// the operation is part of a Berkeley DB Concurrent Data Store group, /// <paramref name="txn"/> is a handle returned from /// <see cref="DatabaseEnvironment.BeginCDSGroup"/>; otherwise null. /// </param> /// <returns>A new, open database object</returns> public static HashDatabase Open(string Filename, string DatabaseName, HashDatabaseConfig cfg, Transaction txn) { HashDatabase ret = new HashDatabase(cfg.Env, 0); ret.Config(cfg); ret.db.open(Transaction.getDB_TXN(txn), Filename, DatabaseName, DBTYPE.DB_HASH, cfg.openFlags, 0); ret.isOpen = true; return(ret); }
public static void Confirm(XmlElement xmlElement, HashDatabaseConfig hashDBConfig, bool compulsory) { DatabaseConfig dbConfig = hashDBConfig; Confirm(xmlElement, dbConfig, compulsory); // Confirm Hash database specific configuration. Configuration.ConfirmCreatePolicy(xmlElement, "Creation", hashDBConfig.Creation, compulsory); Configuration.ConfirmDuplicatesPolicy(xmlElement, "Duplicates", hashDBConfig.Duplicates, compulsory); Configuration.ConfirmUint(xmlElement, "FillFactor", hashDBConfig.FillFactor, compulsory); Configuration.ConfirmUint(xmlElement, "NumElements", hashDBConfig.TableSize, compulsory); }
static void Main( string[] args ) { try { var cfg = new HashDatabaseConfig(); cfg.Duplicates = DuplicatesPolicy.UNSORTED; cfg.Creation = CreatePolicy.IF_NEEDED; cfg.CacheSize = new CacheInfo( 0, 64 * 1024, 1 ); cfg.PageSize = 8 * 1024; Database db = HashDatabase.Open( "d:\\test.db", "hat_db", cfg ); Console.WriteLine("db opened"); var key = new DatabaseEntry(); var data = new DatabaseEntry(); key.Data = System.Text.Encoding.ASCII.GetBytes("key1"); data.Data = System.Text.Encoding.ASCII.GetBytes("val1"); try { db.Put( key, data ); db.Put( key, data ); } catch ( Exception ex ) { Console.WriteLine( ex.Message ); } using ( var dbc = db.Cursor() ) { System.Text.ASCIIEncoding decode = new ASCIIEncoding(); /* Walk through the database and print out key/data pairs. */ Console.WriteLine( "All key : data pairs:" ); foreach ( KeyValuePair<DatabaseEntry, DatabaseEntry> p in dbc ) Console.WriteLine( "{0}::{1}", decode.GetString( p.Key.Data ), decode.GetString( p.Value.Data ) ); } db.Close(); Console.WriteLine( "db closed" ); } catch ( Exception ex ) { Console.WriteLine( ex.Message ); } Console.ReadLine(); }
private void Config(HashDatabaseConfig cfg) { base.Config(cfg); /* * Database.Config calls set_flags, but that does not get the Hash * specific flags. No harm in calling it again. */ db.set_flags(cfg.flags); if (cfg.BlobDir != null && cfg.Env == null) db.set_blob_dir(cfg.BlobDir); if (cfg.blobThresholdIsSet) db.set_blob_threshold(cfg.BlobThreshold, 0); if (cfg.HashFunction != null) HashFunction = cfg.HashFunction; // The duplicate comparison function cannot change. if (cfg.DuplicateCompare != null) DupCompare = cfg.DuplicateCompare; if (cfg.fillFactorIsSet) db.set_h_ffactor(cfg.FillFactor); if (cfg.nelemIsSet) db.set_h_nelem(cfg.TableSize); if (cfg.HashComparison != null) Compare = cfg.HashComparison; if (cfg.partitionIsSet) { nparts = cfg.NParts; Partition = cfg.Partition; if (Partition == null) doPartitionRef = null; else doPartitionRef = new BDB_PartitionDelegate(doPartition); partitionKeys = cfg.PartitionKeys; IntPtr[] ptrs = null; if (partitionKeys != null) { int size = (int)nparts - 1; ptrs = new IntPtr[size]; for (int i = 0; i < size; i++) { ptrs[i] = DBT.getCPtr( DatabaseEntry.getDBT(partitionKeys[i])).Handle; } } db.set_partition(nparts, ptrs, doPartitionRef); } }
private void Config(HashDatabaseConfig cfg) { base.Config(cfg); /* * Database.Config calls set_flags, but that doesn't get the Hash * specific flags. No harm in calling it again. */ db.set_flags(cfg.flags); if (cfg.HashFunction != null) HashFunction = cfg.HashFunction; // The duplicate comparison function cannot change. if (cfg.DuplicateCompare != null) DupCompare = cfg.DuplicateCompare; if (cfg.fillFactorIsSet) db.set_h_ffactor(cfg.FillFactor); if (cfg.nelemIsSet) db.set_h_nelem(cfg.TableSize); if (cfg.HashComparison != null) Compare = cfg.HashComparison; }
public static void Config(XmlElement xmlElement, ref HashDatabaseConfig hashDBConfig, bool compulsory) { uint fillFactor = new uint(); uint numElements = new uint(); DatabaseConfig dbConfig = hashDBConfig; Config(xmlElement, ref dbConfig, compulsory); // Configure specific fields/properties of Hash db Configuration.ConfigCreatePolicy(xmlElement, "Creation", ref hashDBConfig.Creation, compulsory); Configuration.ConfigDuplicatesPolicy(xmlElement, "Duplicates", ref hashDBConfig.Duplicates, compulsory); if (Configuration.ConfigUint(xmlElement, "FillFactor", ref fillFactor, compulsory)) hashDBConfig.FillFactor = fillFactor; if (Configuration.ConfigUint(xmlElement, "NumElements", ref numElements, compulsory)) hashDBConfig.TableSize = numElements; }
public void OpenSecHashDB(string className, string funName, string dbFileName, string dbSecFileName, bool ifDBName) { XmlElement xmlElem = Configuration.TestSetUp( className, funName); // Open a primary recno database. HashDatabaseConfig primaryDBConfig = new HashDatabaseConfig(); primaryDBConfig.Creation = CreatePolicy.IF_NEEDED; HashDatabase primaryDB; /* * If secondary database name is given, the primary * database is also opened with database name. */ if (ifDBName == false) primaryDB = HashDatabase.Open(dbFileName, primaryDBConfig); else primaryDB = HashDatabase.Open(dbFileName, "primary", primaryDBConfig); try { // Open a new secondary database. SecondaryHashDatabaseConfig secHashDBConfig = new SecondaryHashDatabaseConfig( primaryDB, null); SecondaryHashDatabaseConfigTest.Config( xmlElem, ref secHashDBConfig, false); secHashDBConfig.Creation = CreatePolicy.IF_NEEDED; SecondaryHashDatabase secHashDB; if (ifDBName == false) secHashDB = SecondaryHashDatabase.Open( dbSecFileName, secHashDBConfig); else secHashDB = SecondaryHashDatabase.Open( dbSecFileName, "secondary", secHashDBConfig); // Close the secondary database. secHashDB.Close(); // Open the existing secondary database. SecondaryDatabaseConfig secDBConfig = new SecondaryDatabaseConfig( primaryDB, null); SecondaryDatabase secDB; if (ifDBName == false) secDB = SecondaryHashDatabase.Open( dbSecFileName, secDBConfig); else secDB = SecondaryHashDatabase.Open( dbSecFileName, "secondary", secDBConfig); // Close secondary database. secDB.Close(); } catch (DatabaseException) { throw new TestException(); } finally { // Close primary database. primaryDB.Close(); } }
public void TestStats() { testName = "TestStats"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig dbConfig = new HashDatabaseConfig(); ConfigCase1(dbConfig); HashDatabase db = HashDatabase.Open(dbFileName, dbConfig); HashStats stats = db.Stats(); HashStats fastStats = db.FastStats(); ConfirmStatsPart1Case1(stats); ConfirmStatsPart1Case1(fastStats); // Put 100 records into the database. PutRecordCase1(db, null); stats = db.Stats(); ConfirmStatsPart2Case1(stats); // Delete some data to get some free pages. byte[] bigArray = new byte[262144]; db.Delete(new DatabaseEntry(bigArray)); stats = db.Stats(); ConfirmStatsPart3Case1(stats); db.Close(); }
/// <summary> /// Instantiate a new HashDatabase object and open the database /// represented by <paramref name="Filename"/>. /// </summary> /// <remarks> /// <para> /// If <paramref name="Filename"/> is null, the database is strictly /// temporary and cannot be opened by any other thread of control, thus /// the database can only be accessed by sharing the single database /// object that created it, in circumstances where doing so is safe. /// </para> /// <para> /// If <see cref="DatabaseConfig.AutoCommit"/> is set, the operation /// will be implicitly transaction protected. Note that transactionally /// protected operations on a datbase object requires the object itself /// be transactionally protected during its open. /// </para> /// </remarks> /// <param name="Filename"> /// The name of an underlying file that will be used to back the /// database. In-memory databases never intended to be preserved on disk /// may be created by setting this parameter to null. /// </param> /// <param name="cfg">The database's configuration</param> /// <returns>A new, open database object</returns> public static HashDatabase Open( string Filename, HashDatabaseConfig cfg) { return(Open(Filename, null, cfg, null)); }
public void TestInsertToLoc() { HashDatabase db; HashDatabaseConfig dbConfig; HashCursor cursor; DatabaseEntry data; KeyValuePair<DatabaseEntry, DatabaseEntry> pair; string dbFileName; testName = "TestInsertToLoc"; SetUpTest(true); dbFileName = testHome + "/" + testName + ".db"; // Open database and cursor. dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; /* * The database should be set to be unsorted to * insert before/after a certain record. */ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED; db = HashDatabase.Open(dbFileName, dbConfig); cursor = db.Cursor(); // Add record("key", "data") into database. AddOneByCursor(cursor); /* * Insert the new record("key","data1") after the * record("key", "data"). */ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data1")); cursor.Insert(data, Cursor.InsertLocation.AFTER); /* * Move the cursor to the record("key", "data") and * confirm that the next record is the one just inserted. */ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>( new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")), new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data"))); Assert.IsTrue(cursor.Move(pair, true)); Assert.IsTrue(cursor.MoveNext()); Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("key"), cursor.Current.Key.Data); Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("data1"), cursor.Current.Value.Data); try { try { cursor.Insert(data, Cursor.InsertLocation.FIRST); throw new TestException(); } catch (ArgumentException) { } try { cursor.Insert(data, Cursor.InsertLocation.LAST); throw new TestException(); } catch (ArgumentException) { } } finally { cursor.Close(); db.Close(); } }
/// <summary> /// Instantiate a new HashDatabase object and open the database /// represented by <paramref name="Filename"/> and /// <paramref name="DatabaseName"/>. /// </summary> /// <remarks> /// <para> /// If both <paramref name="Filename"/> and /// <paramref name="DatabaseName"/> are null, the database is strictly /// temporary and cannot be opened by any other thread of control, thus /// the database can only be accessed by sharing the single database /// object that created it, in circumstances where doing so is safe. If /// <paramref name="Filename"/> is null and /// <paramref name="DatabaseName"/> is non-null, the database can be /// opened by other threads of control and will be replicated to client /// sites in any replication group. /// </para> /// <para> /// If <paramref name="txn"/> is null, but /// <see cref="DatabaseConfig.AutoCommit"/> is set, the operation will /// be implicitly transaction protected. Note that transactionally /// protected operations on a datbase object requires the object itself /// be transactionally protected during its open. Also note that the /// transaction must be committed before the object is closed. /// </para> /// </remarks> /// <param name="Filename"> /// The name of an underlying file that will be used to back the /// database. In-memory databases never intended to be preserved on disk /// may be created by setting this parameter to null. /// </param> /// <param name="DatabaseName"> /// This parameter allows applications to have multiple databases in a /// single file. Although no DatabaseName needs to be specified, it is /// an error to attempt to open a second database in a file that was not /// initially created using a database name. /// </param> /// <param name="cfg">The database's configuration</param> /// <param name="txn"> /// If the operation is part of an application-specified transaction, /// <paramref name="txn"/> is a Transaction object returned from /// <see cref="DatabaseEnvironment.BeginTransaction"/>; if /// the operation is part of a Berkeley DB Concurrent Data Store group, /// <paramref name="txn"/> is a handle returned from /// <see cref="DatabaseEnvironment.BeginCDSGroup"/>; otherwise null. /// </param> /// <returns>A new, open database object</returns> public static HashDatabase Open(string Filename, string DatabaseName, HashDatabaseConfig cfg, Transaction txn) { HashDatabase ret = new HashDatabase(cfg.Env, 0); ret.Config(cfg); ret.db.open(Transaction.getDB_TXN(txn), Filename, DatabaseName, DBTYPE.DB_HASH, cfg.openFlags, 0); ret.isOpen = true; return ret; }
/// <summary> /// Instantiate a new HashDatabase object and open the database /// represented by <paramref name="Filename"/> and /// <paramref name="DatabaseName"/>. /// </summary> /// <remarks> /// <para> /// If both <paramref name="Filename"/> and /// <paramref name="DatabaseName"/> are null, the database is strictly /// temporary and cannot be opened by any other thread of control, thus /// the database can only be accessed by sharing the single database /// object that created it, in circumstances where doing so is safe. If /// <paramref name="Filename"/> is null and /// <paramref name="DatabaseName"/> is non-null, the database can be /// opened by other threads of control and will be replicated to client /// sites in any replication group. /// </para> /// <para> /// If <see cref="DatabaseConfig.AutoCommit"/> is set, the operation /// will be implicitly transaction protected. Note that transactionally /// protected operations on a datbase object requires the object itself /// be transactionally protected during its open. /// </para> /// </remarks> /// <param name="Filename"> /// The name of an underlying file that will be used to back the /// database. In-memory databases never intended to be preserved on disk /// may be created by setting this parameter to null. /// </param> /// <param name="DatabaseName"> /// This parameter allows applications to have multiple databases in a /// single file. Although no DatabaseName needs to be specified, it is /// an error to attempt to open a second database in a file that was not /// initially created using a database name. /// </param> /// <param name="cfg">The database's configuration</param> /// <returns>A new, open database object</returns> public static HashDatabase Open( string Filename, string DatabaseName, HashDatabaseConfig cfg) { return Open(Filename, DatabaseName, cfg, null); }
public void TestAddUnique() { HashDatabase db; HashCursor cursor; KeyValuePair<DatabaseEntry, DatabaseEntry> pair; testName = "TestAddUnique"; SetUpTest(true); // Open a database and cursor. HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; /* * To put no duplicate data, the database should be * set to be sorted. */ dbConfig.Duplicates = DuplicatesPolicy.SORTED; db = HashDatabase.Open( testHome + "/" + testName + ".db", dbConfig); cursor = db.Cursor(); // Add record("key", "data") into database. AddOneByCursor(cursor); /* * Fail to add duplicate record("key","data"). */ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>( new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")), new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data"))); try { cursor.AddUnique(pair); } catch (KeyExistException) { } finally { cursor.Close(); db.Close(); } }
public void TestKeyExistException() { testName = "TestKeyExistException"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig hashConfig = new HashDatabaseConfig(); hashConfig.Creation = CreatePolicy.ALWAYS; hashConfig.Duplicates = DuplicatesPolicy.SORTED; HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig); // Put the same record into db twice. DatabaseEntry key, data; key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1")); data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1")); try { hashDB.PutNoDuplicate(key, data); hashDB.PutNoDuplicate(key, data); } catch (KeyExistException) { throw new ExpectedTestException(); } finally { hashDB.Close(); } }
public void TestHashFunction() { testName = "TestHashFunction"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; dbConfig.HashFunction = new HashFunctionDelegate(HashFunction); HashDatabase db = HashDatabase.Open(dbFileName, dbConfig); // Hash function will change the lowest byte to 0; uint data = db.HashFunction(BitConverter.GetBytes(1)); Assert.AreEqual(0, data); db.Close(); }
public void TestHashComparison() { testName = "TestHashComparison"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; dbConfig.HashComparison = new EntryComparisonDelegate(EntryComparison); HashDatabase db = HashDatabase.Open(dbFileName,dbConfig); int ret; /* * Comparison gets the value that lowest byte of the * former dbt minus that of the latter one. */ ret = db.Compare(new DatabaseEntry(BitConverter.GetBytes(2)), new DatabaseEntry(BitConverter.GetBytes(2))); Assert.AreEqual(0, ret); ret = db.Compare(new DatabaseEntry(BitConverter.GetBytes(256)), new DatabaseEntry(BitConverter.GetBytes(1))); Assert.Greater(0, ret); db.Close(); }
public void TestCompactWithoutTxn() { int i, nRecs; nRecs = 10000; testName = "TestCompactWithoutTxn"; testHome = testFixtureHome + "/" + testName; string hashDBFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig hashDBConfig = new HashDatabaseConfig(); hashDBConfig.Creation = CreatePolicy.ALWAYS; // The minimum page size hashDBConfig.PageSize = 512; hashDBConfig.HashComparison = new EntryComparisonDelegate(dbIntCompare); using (HashDatabase hashDB = HashDatabase.Open( hashDBFileName, hashDBConfig)) { DatabaseEntry key; DatabaseEntry data; // Fill the database with entries from 0 to 9999 for (i = 0; i < nRecs; i++) { key = new DatabaseEntry(BitConverter.GetBytes(i)); data = new DatabaseEntry(BitConverter.GetBytes(i)); hashDB.Put(key, data); } /* * Delete entries below 500, between 3000 and * 5000 and above 7000 */ for (i = 0; i < nRecs; i++) if (i < 500 || i > 7000 || (i < 5000 && i > 3000)) { key = new DatabaseEntry(BitConverter.GetBytes(i)); hashDB.Delete(key); } hashDB.Sync(); long fileSize = new FileInfo(hashDBFileName).Length; // Compact database CompactConfig cCfg = new CompactConfig(); cCfg.FillPercentage = 30; cCfg.Pages = 10; cCfg.Timeout = 1000; cCfg.TruncatePages = true; cCfg.start = new DatabaseEntry(BitConverter.GetBytes(1)); cCfg.stop = new DatabaseEntry(BitConverter.GetBytes(7000)); CompactData compactData = hashDB.Compact(cCfg); Assert.IsFalse((compactData.Deadlocks == 0) && (compactData.Levels == 0) && (compactData.PagesExamined == 0) && (compactData.PagesFreed == 0) && (compactData.PagesTruncated == 0)); hashDB.Sync(); long compactedFileSize = new FileInfo(hashDBFileName).Length; Assert.Less(compactedFileSize, fileSize); } }
public void StatsInTxn(string home, string name, bool ifIsolation) { DatabaseEnvironmentConfig envConfig = new DatabaseEnvironmentConfig(); EnvConfigCase1(envConfig); DatabaseEnvironment env = DatabaseEnvironment.Open(home, envConfig); Transaction openTxn = env.BeginTransaction(); HashDatabaseConfig dbConfig = new HashDatabaseConfig(); ConfigCase1(dbConfig); dbConfig.Env = env; HashDatabase db = HashDatabase.Open(name + ".db", dbConfig, openTxn); openTxn.Commit(); Transaction statsTxn = env.BeginTransaction(); HashStats stats; HashStats fastStats; if (ifIsolation == false) { stats = db.Stats(statsTxn); fastStats = db.Stats(statsTxn); } else { stats = db.Stats(statsTxn, Isolation.DEGREE_ONE); fastStats = db.Stats(statsTxn, Isolation.DEGREE_ONE); } ConfirmStatsPart1Case1(stats); // Put 100 records into the database. PutRecordCase1(db, statsTxn); if (ifIsolation == false) stats = db.Stats(statsTxn); else stats = db.Stats(statsTxn, Isolation.DEGREE_TWO); ConfirmStatsPart2Case1(stats); // Delete some data to get some free pages. byte[] bigArray = new byte[262144]; db.Delete(new DatabaseEntry(bigArray), statsTxn); if (ifIsolation == false) stats = db.Stats(statsTxn); else stats = db.Stats(statsTxn, Isolation.DEGREE_THREE); ConfirmStatsPart3Case1(stats); statsTxn.Commit(); db.Close(); env.Close(); }
public void TestHashFunction() { testName = "TestHashFunction"; SetUpTest(true); string dbFileName = testHome + "/" + testName + ".db"; string dbSecFileName = testHome + "/" + testName + "_sec.db"; // Open a primary hash database. HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; HashDatabase hashDB = HashDatabase.Open( dbFileName, dbConfig); /* * Define hash function and open a secondary * hash database. */ SecondaryHashDatabaseConfig secDBConfig = new SecondaryHashDatabaseConfig(hashDB, null); secDBConfig.HashFunction = new HashFunctionDelegate(HashFunction); secDBConfig.Creation = CreatePolicy.IF_NEEDED; SecondaryHashDatabase secDB = SecondaryHashDatabase.Open(dbSecFileName, secDBConfig); /* * Confirm the hash function defined in the configuration. * Call the hash function and the one from secondary * database. If they return the same value, then the hash * function is configured successfully. */ uint data = secDB.HashFunction(BitConverter.GetBytes(1)); Assert.AreEqual(0, data); // Close all. secDB.Close(); hashDB.Close(); }
public void TestConfig() { testName = "TestConfig"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); XmlElement xmlElem = Configuration.TestSetUp( testFixtureName, testName); // Open a primary btree database. HashDatabaseConfig hashDBConfig = new HashDatabaseConfig(); hashDBConfig.Creation = CreatePolicy.IF_NEEDED; HashDatabase hashDB = HashDatabase.Open( dbFileName, hashDBConfig); SecondaryHashDatabaseConfig secDBConfig = new SecondaryHashDatabaseConfig(hashDB, null); Config(xmlElem, ref secDBConfig, true); Confirm(xmlElem, secDBConfig, true); // Close the primary btree database. hashDB.Close(); }
private void Config(HashDatabaseConfig cfg) { base.Config(cfg); /* * Database.Config calls set_flags, but that does not get the Hash * specific flags. No harm in calling it again. */ db.set_flags(cfg.flags); if (cfg.BlobDir != null && cfg.Env == null) { db.set_ext_file_dir(cfg.BlobDir); } if (cfg.ExternalFileDir != null && cfg.Env == null) { db.set_ext_file_dir(cfg.ExternalFileDir); } if (cfg.blobThresholdIsSet) { db.set_ext_file_threshold(cfg.BlobThreshold, 0); } if (cfg.HashFunction != null) { HashFunction = cfg.HashFunction; } // The duplicate comparison function cannot change. if (cfg.DuplicateCompare != null) { DupCompare = cfg.DuplicateCompare; } if (cfg.fillFactorIsSet) { db.set_h_ffactor(cfg.FillFactor); } if (cfg.nelemIsSet) { db.set_h_nelem(cfg.TableSize); } if (cfg.HashComparison != null) { Compare = cfg.HashComparison; } if (cfg.partitionIsSet) { nparts = cfg.NParts; Partition = cfg.Partition; if (Partition == null) { doPartitionRef = null; } else { doPartitionRef = new BDB_PartitionDelegate(doPartition); } partitionKeys = cfg.PartitionKeys; IntPtr[] ptrs = null; if (partitionKeys != null) { int size = (int)nparts - 1; ptrs = new IntPtr[size]; for (int i = 0; i < size; i++) { ptrs[i] = DBT.getCPtr( DatabaseEntry.getDBT(partitionKeys[i])).Handle; } } db.set_partition(nparts, ptrs, doPartitionRef); } }
/// <summary> /// Instantiate a new HashDatabase object and open the database /// represented by <paramref name="Filename"/>. /// </summary> /// <remarks> /// <para> /// If <paramref name="Filename"/> is null, the database is strictly /// temporary and cannot be opened by any other thread of control, thus /// the database can only be accessed by sharing the single database /// object that created it, in circumstances where doing so is safe. /// </para> /// <para> /// If <paramref name="txn"/> is null, but /// <see cref="DatabaseConfig.AutoCommit"/> is set, the operation will /// be implicitly transaction protected. Note that transactionally /// protected operations on a datbase object requires the object itself /// be transactionally protected during its open. Also note that the /// transaction must be committed before the object is closed. /// </para> /// </remarks> /// <param name="Filename"> /// The name of an underlying file that will be used to back the /// database. In-memory databases never intended to be preserved on disk /// may be created by setting this parameter to null. /// </param> /// <param name="cfg">The database's configuration</param> /// <param name="txn"> /// If the operation is part of an application-specified transaction, /// <paramref name="txn"/> is a Transaction object returned from /// <see cref="DatabaseEnvironment.BeginTransaction"/>; if /// the operation is part of a Berkeley DB Concurrent Data Store group, /// <paramref name="txn"/> is a handle returned from /// <see cref="DatabaseEnvironment.BeginCDSGroup"/>; otherwise null. /// </param> /// <returns>A new, open database object</returns> public static HashDatabase Open( string Filename, HashDatabaseConfig cfg, Transaction txn) { return Open(Filename, null, cfg, txn); }
public void TestPutNoDuplicateWithTxn() { testName = "TestPutNoDuplicateWithTxn"; testHome = testFixtureHome + "/" + testName; Configuration.ClearDir(testHome); // Open an environment. DatabaseEnvironmentConfig envConfig = new DatabaseEnvironmentConfig(); envConfig.Create = true; envConfig.UseLogging = true; envConfig.UseMPool = true; envConfig.UseTxns = true; DatabaseEnvironment env = DatabaseEnvironment.Open( testHome, envConfig); // Open a hash database within a transaction. Transaction txn = env.BeginTransaction(); HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; dbConfig.Duplicates = DuplicatesPolicy.SORTED; dbConfig.Env = env; HashDatabase db = HashDatabase.Open(testName + ".db", dbConfig, txn); DatabaseEntry dbt = new DatabaseEntry(BitConverter.GetBytes((int)100)); db.PutNoDuplicate(dbt, dbt, txn); try { db.PutNoDuplicate(dbt, dbt, txn); } catch (KeyExistException) { throw new ExpectedTestException(); } finally { // Close all. db.Close(); txn.Commit(); env.Close(); } }
private void GetCursorWithConfig(string dbFile, string dbName, CursorConfig cfg, DatabaseType type) { Database db; Cursor cursor; Configuration.ClearDir(testHome); if (type == DatabaseType.BTREE) { BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; db = BTreeDatabase.Open(dbFile, dbName, dbConfig); cursor = ((BTreeDatabase)db).Cursor(cfg); } else if (type == DatabaseType.HASH) { HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; db = (HashDatabase)HashDatabase.Open(dbFile, dbName, dbConfig); cursor = ((HashDatabase)db).Cursor(cfg); } else if (type == DatabaseType.QUEUE) { QueueDatabaseConfig dbConfig = new QueueDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; dbConfig.Length = 100; db = QueueDatabase.Open(dbFile, dbConfig); cursor = ((QueueDatabase)db).Cursor(cfg); } else if (type == DatabaseType.RECNO) { RecnoDatabaseConfig dbConfig = new RecnoDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; db = RecnoDatabase.Open(dbFile, dbName, dbConfig); cursor = ((RecnoDatabase)db).Cursor(cfg); } else throw new TestException(); if (cfg.Priority != null) Assert.AreEqual(cursor.Priority, cfg.Priority); else Assert.AreEqual(CachePriority.DEFAULT, cursor.Priority); Cursor dupCursor = cursor.Duplicate(false); Assert.AreEqual(cursor.Priority, dupCursor.Priority); cursor.Close(); db.Close(); }
public void TestPutNoDuplicateWithUnsortedDuplicate() { testName = "TestPutNoDuplicateWithUnsortedDuplicate"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig hashConfig = new HashDatabaseConfig(); hashConfig.Creation = CreatePolicy.ALWAYS; hashConfig.Duplicates = DuplicatesPolicy.UNSORTED; hashConfig.ErrorPrefix = testName; HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig); DatabaseEntry key, data; key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1")); data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1")); try { hashDB.PutNoDuplicate(key, data); } catch (DatabaseException) { throw new ExpectedTestException(); } finally { hashDB.Close(); } }
public void TestOpenExistingHashDB() { testName = "TestOpenExistingHashDB"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig hashConfig = new HashDatabaseConfig(); hashConfig.Creation = CreatePolicy.ALWAYS; HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig); hashDB.Close(); DatabaseConfig dbConfig = new DatabaseConfig(); Database db = Database.Open(dbFileName, dbConfig); Assert.AreEqual(db.Type, DatabaseType.HASH); db.Close(); }
/// <summary> /// Instantiate a new HashDatabase object and open the database /// represented by <paramref name="Filename"/>. /// </summary> /// <remarks> /// <para> /// If <paramref name="Filename"/> is null, the database is strictly /// temporary and cannot be opened by any other thread of control, thus /// the database can only be accessed by sharing the single database /// object that created it, in circumstances where doing so is safe. /// </para> /// <para> /// If <paramref name="txn"/> is null, but /// <see cref="DatabaseConfig.AutoCommit"/> is set, the operation will /// be implicitly transaction protected. Note that transactionally /// protected operations on a datbase object requires the object itself /// be transactionally protected during its open. Also note that the /// transaction must be committed before the object is closed. /// </para> /// </remarks> /// <param name="Filename"> /// The name of an underlying file that will be used to back the /// database. In-memory databases never intended to be preserved on disk /// may be created by setting this parameter to null. /// </param> /// <param name="cfg">The database's configuration</param> /// <param name="txn"> /// If the operation is part of an application-specified transaction, /// <paramref name="txn"/> is a Transaction object returned from /// <see cref="DatabaseEnvironment.BeginTransaction"/>; if /// the operation is part of a Berkeley DB Concurrent Data Store group, /// <paramref name="txn"/> is a handle returned from /// <see cref="DatabaseEnvironment.BeginCDSGroup"/>; otherwise null. /// </param> /// <returns>A new, open database object</returns> public static HashDatabase Open( string Filename, HashDatabaseConfig cfg, Transaction txn) { return(Open(Filename, null, cfg, txn)); }
public void TestForeignKeyDelete(DatabaseType dbtype, ForeignKeyDeleteAction action) { SetUpTest(true); string dbFileName = testHome + "/" + testName + ".db"; string fdbFileName = testHome + "/" + testName + "foreign.db"; string sdbFileName = testHome + "/" + testName + "sec.db"; Database primaryDB, fdb; SecondaryDatabase secDB; // Open primary database. if (dbtype == DatabaseType.BTREE) { BTreeDatabaseConfig btConfig = new BTreeDatabaseConfig(); btConfig.Creation = CreatePolicy.ALWAYS; primaryDB = BTreeDatabase.Open(dbFileName, btConfig); fdb = BTreeDatabase.Open(fdbFileName, btConfig); } else if (dbtype == DatabaseType.HASH) { HashDatabaseConfig hConfig = new HashDatabaseConfig(); hConfig.Creation = CreatePolicy.ALWAYS; primaryDB = HashDatabase.Open(dbFileName, hConfig); fdb = HashDatabase.Open(fdbFileName, hConfig); } else if (dbtype == DatabaseType.QUEUE) { QueueDatabaseConfig qConfig = new QueueDatabaseConfig(); qConfig.Creation = CreatePolicy.ALWAYS; qConfig.Length = 4; primaryDB = QueueDatabase.Open(dbFileName, qConfig); fdb = QueueDatabase.Open(fdbFileName, qConfig); } else if (dbtype == DatabaseType.RECNO) { RecnoDatabaseConfig rConfig = new RecnoDatabaseConfig(); rConfig.Creation = CreatePolicy.ALWAYS; primaryDB = RecnoDatabase.Open(dbFileName, rConfig); fdb = RecnoDatabase.Open(fdbFileName, rConfig); } else { throw new ArgumentException("Invalid DatabaseType"); } // Open secondary database. if (dbtype == DatabaseType.BTREE) { SecondaryBTreeDatabaseConfig secbtConfig = new SecondaryBTreeDatabaseConfig(primaryDB, new SecondaryKeyGenDelegate(SecondaryKeyGen)); secbtConfig.Creation = CreatePolicy.ALWAYS; secbtConfig.Duplicates = DuplicatesPolicy.SORTED; if (action == ForeignKeyDeleteAction.NULLIFY) secbtConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify)); else secbtConfig.SetForeignKeyConstraint(fdb, action); secDB = SecondaryBTreeDatabase.Open(sdbFileName, secbtConfig); } else if (dbtype == DatabaseType.HASH) { SecondaryHashDatabaseConfig sechConfig = new SecondaryHashDatabaseConfig(primaryDB, new SecondaryKeyGenDelegate(SecondaryKeyGen)); sechConfig.Creation = CreatePolicy.ALWAYS; sechConfig.Duplicates = DuplicatesPolicy.SORTED; if (action == ForeignKeyDeleteAction.NULLIFY) sechConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify)); else sechConfig.SetForeignKeyConstraint(fdb, action); secDB = SecondaryHashDatabase.Open(sdbFileName, sechConfig); } else if (dbtype == DatabaseType.QUEUE) { SecondaryQueueDatabaseConfig secqConfig = new SecondaryQueueDatabaseConfig(primaryDB, new SecondaryKeyGenDelegate(SecondaryKeyGen)); secqConfig.Creation = CreatePolicy.ALWAYS; secqConfig.Length = 4; if (action == ForeignKeyDeleteAction.NULLIFY) secqConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify)); else secqConfig.SetForeignKeyConstraint(fdb, action); secDB = SecondaryQueueDatabase.Open(sdbFileName, secqConfig); } else if (dbtype == DatabaseType.RECNO) { SecondaryRecnoDatabaseConfig secrConfig = new SecondaryRecnoDatabaseConfig(primaryDB, new SecondaryKeyGenDelegate(SecondaryKeyGen)); secrConfig.Creation = CreatePolicy.ALWAYS; if (action == ForeignKeyDeleteAction.NULLIFY) secrConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify)); else secrConfig.SetForeignKeyConstraint(fdb, action); secDB = SecondaryRecnoDatabase.Open(sdbFileName, secrConfig); } else { throw new ArgumentException("Invalid DatabaseType"); } /* Use integer keys for Queue/Recno support. */ fdb.Put(new DatabaseEntry(BitConverter.GetBytes(100)), new DatabaseEntry(BitConverter.GetBytes(1001))); fdb.Put(new DatabaseEntry(BitConverter.GetBytes(200)), new DatabaseEntry(BitConverter.GetBytes(2002))); fdb.Put(new DatabaseEntry(BitConverter.GetBytes(300)), new DatabaseEntry(BitConverter.GetBytes(3003))); primaryDB.Put(new DatabaseEntry(BitConverter.GetBytes(1)), new DatabaseEntry(BitConverter.GetBytes(100))); primaryDB.Put(new DatabaseEntry(BitConverter.GetBytes(2)), new DatabaseEntry(BitConverter.GetBytes(200))); if (dbtype == DatabaseType.BTREE || dbtype == DatabaseType.HASH) primaryDB.Put(new DatabaseEntry(BitConverter.GetBytes(3)), new DatabaseEntry(BitConverter.GetBytes(100))); try { fdb.Delete(new DatabaseEntry(BitConverter.GetBytes(100))); } catch (ForeignConflictException) { Assert.AreEqual(action, ForeignKeyDeleteAction.ABORT); } if (action == ForeignKeyDeleteAction.ABORT) { Assert.IsTrue(secDB.Exists(new DatabaseEntry(BitConverter.GetBytes(100)))); Assert.IsTrue(primaryDB.Exists(new DatabaseEntry(BitConverter.GetBytes(1)))); Assert.IsTrue(fdb.Exists(new DatabaseEntry(BitConverter.GetBytes(100)))); } else if (action == ForeignKeyDeleteAction.CASCADE) { try { Assert.IsFalse(secDB.Exists(new DatabaseEntry(BitConverter.GetBytes(100)))); } catch (KeyEmptyException) { Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO); } try { Assert.IsFalse(primaryDB.Exists(new DatabaseEntry(BitConverter.GetBytes(1)))); } catch (KeyEmptyException) { Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO); } try { Assert.IsFalse(fdb.Exists(new DatabaseEntry(BitConverter.GetBytes(100)))); } catch (KeyEmptyException) { Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO); } } else if (action == ForeignKeyDeleteAction.NULLIFY) { try { Assert.IsFalse(secDB.Exists(new DatabaseEntry(BitConverter.GetBytes(100)))); } catch (KeyEmptyException) { Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO); } Assert.IsTrue(primaryDB.Exists(new DatabaseEntry(BitConverter.GetBytes(1)))); try { Assert.IsFalse(fdb.Exists(new DatabaseEntry(BitConverter.GetBytes(100)))); } catch (KeyEmptyException) { Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO); } } // Close secondary database. secDB.Close(); // Close primary database. primaryDB.Close(); // Close foreign database fdb.Close(); }
public void TestOpenNewHashDB() { testName = "TestOpenNewHashDB"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); XmlElement xmlElem = Configuration.TestSetUp(testFixtureName, testName); HashDatabaseConfig hashConfig = new HashDatabaseConfig(); HashDatabaseConfigTest.Config(xmlElem, ref hashConfig, true); HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig); Confirm(xmlElem, hashDB, true); hashDB.Close(); }
private void DeleteMultipleAndMultipleKey(string dbFileName, string dbName, DatabaseType type, bool mulKey) { List<DatabaseEntry> kList = new List<DatabaseEntry>(); List<uint> rList = new List<uint>(); List<KeyValuePair<DatabaseEntry, DatabaseEntry>> pList = new List<KeyValuePair<DatabaseEntry, DatabaseEntry>>(); DatabaseEntry key; Database db; SecondaryDatabase secDb; Configuration.ClearDir(testHome); if (type == DatabaseType.BTREE) { BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; db = BTreeDatabase.Open( dbFileName, dbName, dbConfig); SecondaryBTreeDatabaseConfig secDbConfig = new SecondaryBTreeDatabaseConfig(db, null); secDbConfig.Creation = CreatePolicy.IF_NEEDED; secDbConfig.Duplicates = DuplicatesPolicy.SORTED; secDbConfig.KeyGen = new SecondaryKeyGenDelegate(SecondaryKeyGen); secDb = SecondaryBTreeDatabase.Open( dbFileName, dbName + "_sec", secDbConfig); } else if (type == DatabaseType.HASH) { HashDatabaseConfig dbConfig = new HashDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; db = HashDatabase.Open( dbFileName, dbName, dbConfig); SecondaryHashDatabaseConfig secDbConfig = new SecondaryHashDatabaseConfig(db, null); secDbConfig.Creation = CreatePolicy.IF_NEEDED; secDbConfig.Duplicates = DuplicatesPolicy.SORTED; secDbConfig.KeyGen = new SecondaryKeyGenDelegate(SecondaryKeyGen); secDb = SecondaryHashDatabase.Open( dbFileName, dbName + "_sec", secDbConfig); } else if (type == DatabaseType.QUEUE) { QueueDatabaseConfig dbConfig = new QueueDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; dbConfig.Length = 4; db = QueueDatabase.Open(dbFileName, dbConfig); SecondaryQueueDatabaseConfig secDbConfig = new SecondaryQueueDatabaseConfig(db, null); secDbConfig.Creation = CreatePolicy.IF_NEEDED; secDbConfig.Length = 4; secDbConfig.KeyGen = new SecondaryKeyGenDelegate(SecondaryKeyGen); secDb = SecondaryQueueDatabase.Open( dbFileName + "_sec", secDbConfig); } else if (type == DatabaseType.RECNO) { RecnoDatabaseConfig dbConfig = new RecnoDatabaseConfig(); dbConfig.Creation = CreatePolicy.IF_NEEDED; db = RecnoDatabase.Open( dbFileName, dbName, dbConfig); SecondaryRecnoDatabaseConfig secDbConfig = new SecondaryRecnoDatabaseConfig(db, null); secDbConfig.Creation = CreatePolicy.IF_NEEDED; secDbConfig.KeyGen = new SecondaryKeyGenDelegate(SecondaryKeyGen); secDb = SecondaryRecnoDatabase.Open( dbFileName, dbName + "_sec", secDbConfig); } else throw new TestException(); for (uint i = 1; i <= 100; i++) { key = new DatabaseEntry( BitConverter.GetBytes(i)); if (i >= 50 && i < 60) kList.Add(key); else if (i > 80) pList.Add(new KeyValuePair< DatabaseEntry, DatabaseEntry>( key, key)); else if (type == DatabaseType.QUEUE || type == DatabaseType.RECNO) rList.Add(i); db.Put(key, key); } if (mulKey) { // Create bulk buffer for key/value pairs. MultipleKeyDatabaseEntry pBuff; if (type == DatabaseType.BTREE) pBuff = new MultipleKeyDatabaseEntry( pList, false); else if (type == DatabaseType.HASH) pBuff = new MultipleKeyDatabaseEntry( pList, false); else if (type == DatabaseType.QUEUE) pBuff = new MultipleKeyDatabaseEntry( pList, true); else pBuff = new MultipleKeyDatabaseEntry( pList, true); // Bulk delete with the key/value pair bulk buffer. secDb.Delete(pBuff); foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>pair in pList) { try { db.GetBoth(pair.Key, pair.Value); throw new TestException(); } catch (NotFoundException e1) { if (type == DatabaseType.QUEUE) throw e1; } catch (KeyEmptyException e2) { if (type == DatabaseType.BTREE || type == DatabaseType.HASH || type == DatabaseType.RECNO) throw e2; } } /* * Dump the database to verify that 80 records * remain after bulk delete. */ Assert.AreEqual(80, db.Truncate()); } else { // Create bulk buffer for key. MultipleDatabaseEntry kBuff; if (type == DatabaseType.BTREE) kBuff = new MultipleDatabaseEntry( kList, false); else if (type == DatabaseType.HASH) kBuff = new MultipleDatabaseEntry( kList, false); else if (type == DatabaseType.QUEUE) kBuff = new MultipleDatabaseEntry( kList, true); else kBuff = new MultipleDatabaseEntry( kList, true); /* * Bulk delete in secondary database with key * buffer. Primary records that the deleted * records in secondar database should be * deleted as well. */ secDb.Delete(kBuff); foreach (DatabaseEntry dbt in kList) { try { db.Get(dbt); throw new TestException(); } catch (NotFoundException e1) { if (type == DatabaseType.QUEUE || type == DatabaseType.RECNO) throw e1; } catch (KeyEmptyException e2) { if (type == DatabaseType.BTREE || type == DatabaseType.HASH) throw e2; } } /* * Bulk delete in secondary database with recno * based key buffer. */ if (type == DatabaseType.QUEUE || type == DatabaseType.RECNO) { MultipleDatabaseEntry rBuff = new MultipleDatabaseEntry(rList); secDb.Delete(rBuff); Assert.AreEqual(20, db.Truncate()); } } secDb.Close(); db.Close(); }
public void TestPutNoDuplicate() { testName = "TestPutNoDuplicate"; testHome = testFixtureHome + "/" + testName; string dbFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); HashDatabaseConfig hashConfig = new HashDatabaseConfig(); hashConfig.Creation = CreatePolicy.ALWAYS; hashConfig.Duplicates = DuplicatesPolicy.SORTED; hashConfig.TableSize = 20; HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig); DatabaseEntry key, data; for (int i = 1; i <= 10; i++) { key = new DatabaseEntry(BitConverter.GetBytes(i)); data = new DatabaseEntry(BitConverter.GetBytes(i)); hashDB.PutNoDuplicate(key, data); } Assert.IsTrue(hashDB.Exists( new DatabaseEntry(BitConverter.GetBytes((int)5)))); hashDB.Close(); }