static internal DB_COMPACT getDB_COMPACT(CompactConfig compactData) { if (compactData == null) return null; if (compactData.cdata == null) compactData.doCompaction(); return compactData.cdata; }
/// <summary> /// Compact the database, and optionally return unused database pages to /// the underlying filesystem. /// </summary> /// <remarks> /// <para> /// If <paramref name="txn"/> is non-null, then the operation is /// performed using that transaction. In this event, large sections of /// the tree may be locked during the course of the transaction. /// </para> /// <para> /// If <paramref name="txn"/> is null, but the operation occurs in a /// transactional database, the operation is implicitly transaction /// protected using multiple transactions. These transactions are /// periodically committed to avoid locking large sections of the tree. /// Any deadlocks encountered cause the compaction operation to be /// retried from the point of the last transaction commit. /// </para> /// </remarks> /// <param name="cdata">Compact configuration parameters</param> /// <param name="txn"> /// If the operation is part of an application-specified transaction, /// <paramref name="txn"/> is a Transaction object returned from /// <see cref="DatabaseEnvironment.BeginTransaction"/>; if /// the operation is part of a Berkeley DB Concurrent Data Store group, /// <paramref name="txn"/> is a handle returned from /// <see cref="DatabaseEnvironment.BeginCDSGroup"/>; otherwise null. /// </param> /// <returns>Compact operation statistics</returns> public CompactData Compact(CompactConfig cdata, Transaction txn) { DatabaseEntry end = null; if (cdata.returnEnd) end = new DatabaseEntry(); db.compact(Transaction.getDB_TXN(txn), cdata.start, cdata.stop, CompactConfig.getDB_COMPACT(cdata), cdata.flags, end); return new CompactData(CompactConfig.getDB_COMPACT(cdata), end); }
/// <summary> /// Compact the database, and optionally return unused database pages to /// the underlying filesystem. /// </summary> /// <remarks> /// If the operation occurs in a transactional database, the operation /// is implicitly transaction protected using multiple /// transactions. These transactions are periodically committed to /// avoid locking large sections of the tree. Any deadlocks encountered /// cause the compaction operation to be retried from the point of the /// last transaction commit. /// </remarks> /// <param name="cdata">Compact configuration parameters</param> /// <returns>Compact operation statistics</returns> public CompactData Compact(CompactConfig cdata) { return Compact(cdata, null); }
public void TestCompact() { testName = "TestCompact"; testHome = testFixtureHome + "/" + testName; string recnoDBFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); RecnoDatabaseConfig recnoConfig = new RecnoDatabaseConfig(); recnoConfig.Creation = CreatePolicy.ALWAYS; recnoConfig.Length = 512; DatabaseEntry key, data; RecnoDatabase recnoDB; using (recnoDB = RecnoDatabase.Open( recnoDBFileName, recnoConfig)) { for (int i = 1; i <= 5000; i++) { data = new DatabaseEntry( BitConverter.GetBytes(i)); recnoDB.Append(data); } for (int i = 1; i <= 5000; i++) { if (i > 500 && (i % 5 != 0)) { key = new DatabaseEntry( BitConverter.GetBytes(i)); recnoDB.Delete(key); } } int startInt = 1; int stopInt = 2500; DatabaseEntry start, stop; start = new DatabaseEntry( BitConverter.GetBytes(startInt)); stop = new DatabaseEntry( BitConverter.GetBytes(stopInt)); Assert.IsTrue(recnoDB.Exists(start)); Assert.IsTrue(recnoDB.Exists(stop)); CompactConfig cCfg = new CompactConfig(); cCfg.start = start; cCfg.stop = stop; cCfg.FillPercentage = 30; cCfg.Pages = 1; cCfg.returnEnd = true; cCfg.Timeout = 5000; cCfg.TruncatePages = true; CompactData compactData = recnoDB.Compact(cCfg); Assert.IsNotNull(compactData.End); Assert.AreNotEqual(0, compactData.PagesExamined); } }
public void TestCompactWithoutTxn() { int i, nRecs; nRecs = 10000; testName = "TestCompactWithoutTxn"; testHome = testFixtureHome + "/" + testName; string btreeDBFileName = testHome + "/" + testName + ".db"; Configuration.ClearDir(testHome); BTreeDatabaseConfig btreeDBConfig = new BTreeDatabaseConfig(); btreeDBConfig.Creation = CreatePolicy.ALWAYS; // The minimum page size btreeDBConfig.PageSize = 512; btreeDBConfig.BTreeCompare = new EntryComparisonDelegate(dbIntCompare); using (BTreeDatabase btreeDB = BTreeDatabase.Open( btreeDBFileName, btreeDBConfig)) { DatabaseEntry key; DatabaseEntry data; // Fill the database with entries from 0 to 9999 for (i = 0; i < nRecs; i++) { key = new DatabaseEntry( BitConverter.GetBytes(i)); data = new DatabaseEntry( BitConverter.GetBytes(i)); btreeDB.Put(key, data); } /* * Delete entries below 500, between 3000 and * 5000 and above 7000 */ for (i = 0; i < nRecs; i++) if (i < 500 || i > 7000 || (i < 5000 && i > 3000)) { key = new DatabaseEntry( BitConverter.GetBytes(i)); btreeDB.Delete(key); } btreeDB.Sync(); long fileSize = new FileInfo( btreeDBFileName).Length; // Compact database CompactConfig cCfg = new CompactConfig(); cCfg.FillPercentage = 30; cCfg.Pages = 10; cCfg.Timeout = 1000; cCfg.TruncatePages = true; cCfg.start = new DatabaseEntry( BitConverter.GetBytes(1)); cCfg.stop = new DatabaseEntry( BitConverter.GetBytes(7000)); CompactData compactData = btreeDB.Compact(cCfg); Assert.IsFalse((compactData.Deadlocks == 0) && (compactData.Levels == 0) && (compactData.PagesExamined == 0) && (compactData.PagesFreed == 0) && (compactData.PagesTruncated == 0)); btreeDB.Sync(); long compactedFileSize = new FileInfo(btreeDBFileName).Length; Assert.Less(compactedFileSize, fileSize); } }
// Sorted alpha by method name /// <summary> /// Compact the database, and optionally return unused database pages to /// the underlying filesystem. /// </summary> /// <remarks> /// If the operation occurs in a transactional database, the operation /// will be implicitly transaction protected using multiple /// transactions. These transactions will be periodically committed to /// avoid locking large sections of the tree. Any deadlocks encountered /// cause the compaction operation to be retried from the point of the /// last transaction commit. /// </remarks> /// <param name="cdata">Compact configuration parameters</param> /// <returns>Compact operation statistics</returns> public CompactData Compact(CompactConfig cdata) { return(Compact(cdata, null)); }
public void TestCompactWithoutTxn() { int i, nRecs; nRecs = 1000; testName = "TestCompactWithoutTxn"; SetUpTest(true); string btreeDBFileName = testHome + "/" + testName + ".db"; BTreeDatabaseConfig btreeDBConfig = new BTreeDatabaseConfig(); btreeDBConfig.Creation = CreatePolicy.ALWAYS; // The minimum page size btreeDBConfig.PageSize = 512; btreeDBConfig.BTreeCompare = new EntryComparisonDelegate(dbIntCompare); using (BTreeDatabase btreeDB = BTreeDatabase.Open( btreeDBFileName, btreeDBConfig)) { DatabaseEntry key; DatabaseEntry data; // Fill the database with entries from 0 to 999 for (i = 0; i < nRecs; i++) { key = new DatabaseEntry( BitConverter.GetBytes(i)); data = new DatabaseEntry( ASCIIEncoding.ASCII.GetBytes(Configuration.RandomString(100))); btreeDB.Put(key, data); } /* * Delete entries below 50, between 300 and * 500 and above 700 */ for (i = 0; i < nRecs; i++) if (i < (int)(nRecs * 0.05) || i > (int)(nRecs * 0.7) || (i < (int)(nRecs * 0.5) && i > (int)(nRecs * 0.3))) { key = new DatabaseEntry( BitConverter.GetBytes(i)); btreeDB.Delete(key); } btreeDB.Sync(); long fileSize = new FileInfo( btreeDBFileName).Length; // Compact database CompactConfig cCfg = new CompactConfig(); cCfg.FillPercentage = 80; cCfg.Pages = 4; cCfg.Timeout = 1000; cCfg.TruncatePages = true; cCfg.start = new DatabaseEntry( BitConverter.GetBytes(1)); cCfg.stop = new DatabaseEntry( BitConverter.GetBytes(7000)); CompactData compactData = btreeDB.Compact(cCfg); // Verify output statistics fields. Assert.AreEqual(0, compactData.Deadlocks); Assert.LessOrEqual(0, compactData.EmptyBuckets); Assert.LessOrEqual(0, compactData.Levels); Assert.Less(0, compactData.PagesExamined); Assert.Less(0, compactData.PagesFreed); Assert.Less(compactData.PagesFreed, compactData.PagesTruncated); btreeDB.Sync(); long compactedFileSize = new FileInfo(btreeDBFileName).Length; Assert.Less(compactedFileSize, fileSize); } }