public Store(string path) { var families = new ColumnFamilies(); db = RocksDb.OpenReadOnly(Options.Default, Path.GetFullPath(path), families, false); defaultFamily = db.GetDefaultColumnFamily(); }
static void Main(string[] args) { string temp = Path.GetTempPath(); string path = Environment.ExpandEnvironmentVariables(Path.Combine(temp, "rocksdb_cf_example")); var options = new DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true); var columnFamilies = new ColumnFamilies { { "reverse", new ColumnFamilyOptions() }, }; using (var db = RocksDb.Open(options, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); db.Put("one", "uno"); db.Put("two", "dos"); db.Put("three", "tres"); db.Put("uno", "one", cf: reverse); db.Put("dos", "two", cf: reverse); db.Put("tres", "three", cf: reverse); } using (var db = RocksDb.Open(options, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); string uno = db.Get("one"); string one = db.Get("uno", cf: reverse); string nada; nada = db.Get("uno"); nada = db.Get("one", cf: reverse); } using (var db = RocksDb.Open(options, path, columnFamilies)) { db.DropColumnFamily("reverse"); var reverse = db.CreateColumnFamily(new ColumnFamilyOptions(), "reverse"); var nada = db.Get("uno", cf: reverse); db.Put("red", "rouge", cf: reverse); } using (var db = RocksDb.Open(options, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); var nada = db.Get("uno", cf: reverse); var rouge = db.Get("red", cf: reverse); } using (var db = RocksDb.OpenReadOnly(options, path, columnFamilies, false)) { string uno = db.Get("one"); } }
public CheckpointStore(string path) { db = RocksDb.OpenReadOnly(new DbOptions(), path, RocksDbStore.ColumnFamilies, false); var metadataColumnHandle = db.GetColumnFamily(RocksDbStore.METADATA_FAMILY); blocks = new DataTracker <UInt256, BlockState>(db, RocksDbStore.BLOCK_FAMILY); transactions = new DataTracker <UInt256, TransactionState>(db, RocksDbStore.TX_FAMILY); accounts = new DataTracker <UInt160, AccountState>(db, RocksDbStore.ACCOUNT_FAMILY); _unspentCoins = new DataTracker <UInt256, UnspentCoinState>(db, RocksDbStore.UNSPENT_COIN_FAMILY); spentCoins = new DataTracker <UInt256, SpentCoinState>(db, RocksDbStore.SPENT_COIN_FAMILY); validators = new DataTracker <ECPoint, ValidatorState>(db, RocksDbStore.VALIDATOR_FAMILY); assets = new DataTracker <UInt256, AssetState>(db, RocksDbStore.ASSET_FAMILY); contracts = new DataTracker <UInt160, ContractState>(db, RocksDbStore.CONTRACT_FAMILY); storages = new DataTracker <StorageKey, StorageItem>(db, RocksDbStore.STORAGE_FAMILY); headerHashList = new DataTracker <UInt32Wrapper, HeaderHashList>(db, RocksDbStore.HEADER_HASH_LIST_FAMILY); validatorsCount = new MetadataTracker <ValidatorsCountState>(db, RocksDbStore.VALIDATORS_COUNT_KEY, metadataColumnHandle); blockHashIndex = new MetadataTracker <HashIndexState>(db, RocksDbStore.CURRENT_BLOCK_KEY, metadataColumnHandle); headerHashIndex = new MetadataTracker <HashIndexState>(db, RocksDbStore.CURRENT_HEADER_KEY, metadataColumnHandle); }
public override void Execute() { var popts = new PlaneDBOptions().DisableJournal(); if (!string.IsNullOrEmpty(Owner.Passphrase)) { popts = popts.EnableEncryption(Owner.Passphrase); } else if (Owner.Compressed) { popts = popts.EnableCompression(); } if (!string.IsNullOrEmpty(Owner.Tablespace)) { popts = popts.UsingTableSpace(Owner.Tablespace); } if (From == null) { throw new GetOptException("No from"); } if (To == null) { throw new GetOptException("No to"); } using var rocks = RocksDb.OpenReadOnly(new DbOptions(), From.FullName, false); using var plane = new PlaneDB(To, FileMode.OpenOrCreate, popts); plane.OnFlushMemoryTable += (_, __) => Console.WriteLine("Flushed memory table"); plane.OnMergedTables += (_, __) => Console.WriteLine("Merged tables"); plane.Clear(); var iter = rocks.NewIterator(); plane.MassInsert(() => CopyFromRocks(plane, iter)); Console.WriteLine($"{copyCount:N0} entries copied in total"); }
public void FunctionalTest() { string temp = Path.GetTempPath(); var testdb = Path.Combine(temp, "functional_test"); string path = Environment.ExpandEnvironmentVariables(testdb); if (Directory.Exists(testdb)) { Directory.Delete(testdb, true); } var options = new DbOptions() .SetCreateIfMissing(true) .EnableStatistics(); // Using standard open using (var db = RocksDb.Open(options, path)) { // With strings string value = db.Get("key"); db.Put("key", "value"); Assert.Equal("value", db.Get("key")); Assert.Null(db.Get("non-existent-key")); db.Remove("key"); Assert.Null(db.Get("value")); // With bytes db.Put(Encoding.UTF8.GetBytes("key"), Encoding.UTF8.GetBytes("value")); Assert.True(BinaryComparer.Default.Equals(Encoding.UTF8.GetBytes("value"), db.Get(Encoding.UTF8.GetBytes("key")))); // non-existent kiey Assert.Null(db.Get(new byte[] { 0, 1, 2 })); db.Remove(Encoding.UTF8.GetBytes("key")); Assert.Null(db.Get(Encoding.UTF8.GetBytes("key"))); db.Put(Encoding.UTF8.GetBytes("key"), new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 }); // With buffers var buffer = new byte[100]; long length = db.Get(Encoding.UTF8.GetBytes("key"), buffer, 0, buffer.Length); Assert.Equal(8, length); Assert.Equal(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 }, buffer.Take((int)length).ToList()); // Write batches // With strings using (WriteBatch batch = new WriteBatch() .Put("one", "uno") .Put("two", "deuce") .Put("two", "dos") .Put("three", "tres")) { db.Write(batch); } Assert.Equal("uno", db.Get("one")); // With save point using (WriteBatch batch = new WriteBatch()) { batch .Put("hearts", "red") .Put("diamonds", "red"); batch.SetSavePoint(); batch .Put("clubs", "black"); batch.SetSavePoint(); batch .Put("spades", "black"); batch.RollbackToSavePoint(); db.Write(batch); } Assert.Equal("red", db.Get("diamonds")); Assert.Equal("black", db.Get("clubs")); Assert.Null(db.Get("spades")); // With bytes var utf8 = Encoding.UTF8; using (WriteBatch batch = new WriteBatch() .Put(utf8.GetBytes("four"), new byte[] { 4, 4, 4 }) .Put(utf8.GetBytes("five"), new byte[] { 5, 5, 5 })) { db.Write(batch); } Assert.True(BinaryComparer.Default.Equals(new byte[] { 4, 4, 4 }, db.Get(utf8.GetBytes("four")))); // Snapshots using (var snapshot = db.CreateSnapshot()) { var before = db.Get("one"); db.Put("one", "1"); var useSnapshot = new ReadOptions() .SetSnapshot(snapshot); // the database value was written Assert.Equal("1", db.Get("one")); // but the snapshot still sees the old version var after = db.Get("one", readOptions: useSnapshot); Assert.Equal(before, after); } var two = db.Get("two"); Assert.Equal("dos", two); // Iterators using (var iterator = db.NewIterator( readOptions: new ReadOptions() .SetIterateUpperBound("t") )) { iterator.Seek("k"); Assert.True(iterator.Valid()); Assert.Equal("key", iterator.StringKey()); iterator.Next(); Assert.True(iterator.Valid()); Assert.Equal("one", iterator.StringKey()); Assert.Equal("1", iterator.StringValue()); iterator.Next(); Assert.False(iterator.Valid()); } // MultiGet var multiGetResult = db.MultiGet(new[] { "two", "three", "nine" }); Assert.Equal( expected: new[] { new KeyValuePair <string, string>("two", "dos"), new KeyValuePair <string, string>("three", "tres"), new KeyValuePair <string, string>("nine", null) }, actual: multiGetResult ); } // Test with column families var optionsCf = new DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true); var columnFamilies = new ColumnFamilies { { "reverse", new ColumnFamilyOptions() }, }; using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); db.Put("one", "uno"); db.Put("two", "dos"); db.Put("three", "tres"); db.Put("uno", "one", cf: reverse); db.Put("dos", "two", cf: reverse); db.Put("tres", "three", cf: reverse); } // Test Cf Delete using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); db.Put("cuatro", "four", cf: reverse); db.Put("cinco", "five", cf: reverse); Assert.Equal("four", db.Get("cuatro", cf: reverse)); Assert.Equal("five", db.Get("cinco", cf: reverse)); byte[] keyBytes = Encoding.UTF8.GetBytes("cuatro"); db.Remove(keyBytes, reverse); db.Remove("cinco", reverse); Assert.Null(db.Get("cuatro", cf: reverse)); Assert.Null(db.Get("cinco", cf: reverse)); } // Test list { var list = RocksDb.ListColumnFamilies(optionsCf, path); Assert.Equal(new[] { "default", "reverse" }, list.ToArray()); } // Test reopen with column families using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); Assert.Equal("uno", db.Get("one")); Assert.Equal("one", db.Get("uno", cf: reverse)); Assert.Null(db.Get("uno")); Assert.Null(db.Get("one", cf: reverse)); } // Test dropping and creating column family using (var db = RocksDb.Open(options, path, columnFamilies)) { db.DropColumnFamily("reverse"); var reverse = db.CreateColumnFamily(new ColumnFamilyOptions(), "reverse"); Assert.Null(db.Get("uno", cf: reverse)); db.Put("red", "rouge", cf: reverse); Assert.Equal("rouge", db.Get("red", cf: reverse)); } // Test reopen after drop and create using (var db = RocksDb.Open(options, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); Assert.Null(db.Get("uno", cf: reverse)); Assert.Equal("rouge", db.Get("red", cf: reverse)); } // Test read only using (var db = RocksDb.OpenReadOnly(options, path, columnFamilies, false)) { Assert.Equal("uno", db.Get("one")); } // Test SstFileWriter { var envOpts = new EnvOptions(); var ioOpts = new ColumnFamilyOptions(); var sst = new SstFileWriter(envOpts, ioOpts); var filename = Path.Combine(temp, "test.sst"); if (File.Exists(filename)) { File.Delete(filename); } sst.Open(filename); sst.Add("four", "quatro"); sst.Add("one", "uno"); sst.Add("two", "dos"); sst.Finish(); using (var db = RocksDb.Open(options, path, columnFamilies)) { Assert.NotEqual("four", db.Get("four")); var ingestOptions = new IngestExternalFileOptions() .SetMoveFiles(true); db.IngestExternalFiles(new string[] { filename }, ingestOptions); Assert.Equal("quatro", db.Get("four")); } } // test comparator unsafe { var comparator = new IntegerStringComparator(); var opts = new ColumnFamilyOptions() .SetComparator(comparator); var filename = Path.Combine(temp, "test.sst"); if (File.Exists(filename)) { File.Delete(filename); } var sst = new SstFileWriter(ioOptions: opts); sst.Open(filename); sst.Add("111", "111"); sst.Add("1001", "1001"); // this order is only allowed using an integer comparator sst.Finish(); } // test write batch with index { var wbwi = new WriteBatchWithIndex(reservedBytes: 1024); wbwi.Put("one", "un"); wbwi.Put("two", "deux"); var oneValueIn = Encoding.UTF8.GetBytes("one"); var oneValueOut = wbwi.Get("one"); Assert.Equal("un", oneValueOut); using (var db = RocksDb.Open(options, path, columnFamilies)) { var oneCombinedOut = wbwi.Get(db, "one"); var threeCombinedOut = wbwi.Get(db, "three"); Assert.Equal("un", oneCombinedOut); Assert.Equal("tres", threeCombinedOut); using (var wbIterator = wbwi.NewIterator(db.NewIterator())) { wbIterator.Seek("o"); Assert.True(wbIterator.Valid()); var itkey = wbIterator.StringKey(); Assert.Equal("one", itkey); var itval = wbIterator.StringValue(); Assert.Equal("un", itval); wbIterator.Next(); Assert.True(wbIterator.Valid()); itkey = wbIterator.StringKey(); Assert.Equal("three", itkey); itval = wbIterator.StringValue(); Assert.Equal("tres", itval); wbIterator.Next(); Assert.True(wbIterator.Valid()); itkey = wbIterator.StringKey(); Assert.Equal("two", itkey); itval = wbIterator.StringValue(); Assert.Equal("deux", itval); wbIterator.Next(); Assert.False(wbIterator.Valid()); } db.Write(wbwi); var oneDbOut = wbwi.Get("one"); Assert.Equal("un", oneDbOut); } } // compact range { using (var db = RocksDb.Open(options, path, columnFamilies)) { db.CompactRange("o", "tw"); } } }
/// <summary> /// Provides access to and/or creates a RocksDb persistent key-value store. /// </summary> /// <param name="storeDirectory"> /// The directory containing the key-value store. /// </param> /// <param name="defaultColumnKeyTracked"> /// Whether the default column should be key-tracked. /// This will create two columns for the same data, /// one with just keys and the other with key and value. /// </param> /// <param name="additionalColumns"> /// The names of any additional column families in the key-value store. /// If no additional column families are provided, all entries will be stored /// in the default column. /// Column families are analogous to tables in relational databases. /// </param> /// <param name="additionalKeyTrackedColumns"> /// The names of any additional column families in the key-value store that /// should also be key-tracked. This will create two columns for the same data, /// one with just keys and the other with key and value. /// Column families are analogous to tables in relational databases. /// </param> /// <param name="readOnly"> /// Whether the database should be opened read-only. This prevents modifications and /// creating unnecessary metadata files related to write sessions. /// </param> /// <param name="dropMismatchingColumns"> /// If a store already exists at the given directory, whether any columns that mismatch the the columns that were passed into the constructor /// should be dropped. This will cause data loss and can only be applied in read-write mode. /// </param> /// <param name="rotateLogs"> /// Have RocksDb rotate logs, useful for debugging performance issues. It will rotate logs every 12 hours, /// up to a maximum of 60 logs (i.e. 30 days). When the maximum amount of logs is reached, the oldest logs /// are overwritten in a circular fashion. /// /// Every time the RocksDb instance is open, the current log file is truncated, which means that if you /// open the DB more than once in a 12 hour period, you will only have partial information. /// </param> public RocksDbStore( string storeDirectory, bool defaultColumnKeyTracked = false, IEnumerable <string> additionalColumns = null, IEnumerable <string> additionalKeyTrackedColumns = null, bool readOnly = false, bool dropMismatchingColumns = false, bool rotateLogs = false) { m_storeDirectory = storeDirectory; m_defaults.DbOptions = new DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true) // The background compaction threads run in low priority, so they should not hamper the rest of // the system. The number of cores in the system is what we want here according to official docs, // and we are setting this to the number of logical processors, which may be higher. .SetMaxBackgroundCompactions(Environment.ProcessorCount) .SetMaxBackgroundFlushes(1) .IncreaseParallelism(Environment.ProcessorCount / 2) // Ensure we have performance statistics for profiling .EnableStatistics(); // A small comment on things tested that did not work: // * SetAllowMmapReads(true) and SetAllowMmapWrites(true) produce a dramatic performance drop // * SetUseDirectReads(true) disables the OS cache, and although that's good for random point lookups, // it produces a dramatic performance drop otherwise. m_defaults.WriteOptions = new WriteOptions() // Disable the write ahead log to reduce disk IO. The write ahead log // is used to recover the store on crashes, so a crash will lose some writes. // Writes will be made in-memory only until the write buffer size // is reached and then they will be flushed to storage files. .DisableWal(1) // This option is off by default, but just making sure that the C# wrapper // doesn't change anything. The idea is that the DB won't wait for fsync to // return before acknowledging the write as successful. This affects // correctness, because a write may be ACKd before it is actually on disk, // but it is much faster. .SetSync(false); var blockBasedTableOptions = new BlockBasedTableOptions() // Use a bloom filter to help reduce read amplification on point lookups. 10 bits per key yields a // ~1% false positive rate as per the RocksDB documentation. This builds one filter per SST, which // means its optimized for not having a key. .SetFilterPolicy(BloomFilterPolicy.Create(10, false)) // Use a hash index in SST files to speed up point lookup. .SetIndexType(BlockBasedTableIndexType.HashSearch) // Whether to use the whole key or a prefix of it (obtained through the prefix extractor below). // Since the prefix extractor is a no-op, better performance is achieved by turning this off (i.e. // setting it to true). .SetWholeKeyFiltering(true); m_defaults.ColumnFamilyOptions = new ColumnFamilyOptions() .SetBlockBasedTableFactory(blockBasedTableOptions) .SetPrefixExtractor(SliceTransform.CreateNoOp()); if (rotateLogs) { // Maximum number of information log files m_defaults.DbOptions.SetKeepLogFileNum(60); // Do not rotate information logs based on file size m_defaults.DbOptions.SetMaxLogFileSize(0); // How long before we rotate the current information log file m_defaults.DbOptions.SetLogFileTimeToRoll((ulong)TimeSpan.FromHours(12).Seconds); } m_columns = new Dictionary <string, ColumnFamilyInfo>(); additionalColumns = additionalColumns ?? CollectionUtilities.EmptyArray <string>(); additionalKeyTrackedColumns = additionalKeyTrackedColumns ?? CollectionUtilities.EmptyArray <string>(); // The columns that exist in the store on disk may not be in sync with the columns being passed into the constructor HashSet <string> existingColumns; try { existingColumns = new HashSet <string>(RocksDb.ListColumnFamilies(m_defaults.DbOptions, m_storeDirectory)); } catch (RocksDbException) { // If there is no existing store, an exception will be thrown, ignore it existingColumns = new HashSet <string>(); } // In read-only mode, open all existing columns in the store without attempting to validate it against the expected column families if (readOnly) { var columnFamilies = new ColumnFamilies(); foreach (var name in existingColumns) { columnFamilies.Add(name, m_defaults.ColumnFamilyOptions); } m_store = RocksDb.OpenReadOnly(m_defaults.DbOptions, m_storeDirectory, columnFamilies, errIfLogFileExists: false); } else { // For read-write mode, column families may be added, so set up column families schema var columnsSchema = new HashSet <string>(additionalColumns); // Default column columnsSchema.Add(ColumnFamilies.DefaultName); // For key-tracked column familiies, create two columns: // 1: Normal column of { key : value } // 2: Key-tracking column of { key : empty-value } if (defaultColumnKeyTracked) { // To be robust to the RocksDB-selected default column name changing, // just name the default column's key-tracking column KeyColumnSuffix columnsSchema.Add(KeyColumnSuffix); } foreach (var name in additionalKeyTrackedColumns) { columnsSchema.Add(name); columnsSchema.Add(name + KeyColumnSuffix); } // Figure out which columns are not part of the schema var outsideSchemaColumns = new List <string>(existingColumns.Except(columnsSchema)); // RocksDB requires all columns in the store to be opened in read-write mode, so merge existing columns // with the columns schema that was passed into the constructor existingColumns.UnionWith(columnsSchema); var columnFamilies = new ColumnFamilies(); foreach (var name in existingColumns) { columnFamilies.Add(name, m_defaults.ColumnFamilyOptions); } m_store = RocksDb.Open(m_defaults.DbOptions, m_storeDirectory, columnFamilies); // Provide an opportunity to update the store to the new column family schema if (dropMismatchingColumns) { foreach (var name in outsideSchemaColumns) { m_store.DropColumnFamily(name); existingColumns.Remove(name); } } } var userFacingColumns = existingColumns.Where(name => !name.EndsWith(KeyColumnSuffix)); foreach (var name in userFacingColumns) { var isKeyTracked = existingColumns.Contains(name + KeyColumnSuffix); m_columns.Add(name, new ColumnFamilyInfo() { Handle = m_store.GetColumnFamily(name), UseKeyTracking = isKeyTracked, KeyHandle = isKeyTracked ? m_store.GetColumnFamily(name + KeyColumnSuffix) : null, }); } m_columns.TryGetValue(ColumnFamilies.DefaultName, out m_defaultColumnFamilyInfo); }
/// <summary> /// Provides access to and/or creates a RocksDb persistent key-value store. /// </summary> public RocksDbStore(RocksDbStoreArguments arguments) { m_storeDirectory = arguments.StoreDirectory; m_openBulkLoad = arguments.OpenBulkLoad; m_defaults.DbOptions = new DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true) // The background compaction threads run in low priority, so they should not hamper the rest of // the system. The number of cores in the system is what we want here according to official docs, // and we are setting this to the number of logical processors, which may be higher. // See: https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#parallelism-options #if !PLATFORM_OSX .SetMaxBackgroundCompactions(Environment.ProcessorCount) .SetMaxBackgroundFlushes(1) #else // The memtable uses significant chunks of available system memory on macOS, we increase the number // of background flushing threads (low priority) and set the DB write buffer size. This allows for // up to 128 MB in memtables across all column families before we flush to disk. .SetMaxBackgroundCompactions(Environment.ProcessorCount / 4) .SetMaxBackgroundFlushes(Environment.ProcessorCount / 4) .SetDbWriteBufferSize(128 << 20) #endif .IncreaseParallelism(Environment.ProcessorCount / 2); if (arguments.EnableStatistics) { m_defaults.DbOptions.EnableStatistics(); } if (arguments.OpenBulkLoad) { m_defaults.DbOptions.PrepareForBulkLoad(); } // Maximum number of information log files if (arguments.RotateLogsNumFiles != null) { m_defaults.DbOptions.SetKeepLogFileNum(arguments.RotateLogsNumFiles.Value); } // Do not rotate information logs based on file size if (arguments.RotateLogsMaxFileSizeBytes != null) { m_defaults.DbOptions.SetMaxLogFileSize(arguments.RotateLogsMaxFileSizeBytes.Value); } // How long before we rotate the current information log file if (arguments.RotateLogsMaxAge != null) { m_defaults.DbOptions.SetLogFileTimeToRoll((ulong)arguments.RotateLogsMaxAge.Value.Seconds); } if (arguments.FastOpen) { // max_file_opening_threads is defaulted to 16, so no need to update here. RocksDbSharp.Native.Instance.rocksdb_options_set_skip_stats_update_on_db_open(m_defaults.DbOptions.Handle, true); } if (arguments.DisableAutomaticCompactions) { m_defaults.DbOptions.SetDisableAutoCompactions(1); } // A small comment on things tested that did not work: // * SetAllowMmapReads(true) and SetAllowMmapWrites(true) produce a dramatic performance drop // * SetUseDirectReads(true) disables the OS cache, and although that's good for random point lookups, // it produces a dramatic performance drop otherwise. m_defaults.WriteOptions = new WriteOptions() // Disable the write ahead log to reduce disk IO. The write ahead log // is used to recover the store on crashes, so a crash will lose some writes. // Writes will be made in-memory only until the write buffer size // is reached and then they will be flushed to storage files. .DisableWal(1) // This option is off by default, but just making sure that the C# wrapper // doesn't change anything. The idea is that the DB won't wait for fsync to // return before acknowledging the write as successful. This affects // correctness, because a write may be ACKd before it is actually on disk, // but it is much faster. .SetSync(false); var blockBasedTableOptions = new BlockBasedTableOptions() // Use a bloom filter to help reduce read amplification on point lookups. 10 bits per key yields a // ~1% false positive rate as per the RocksDB documentation. This builds one filter per SST, which // means its optimized for not having a key. .SetFilterPolicy(BloomFilterPolicy.Create(10, false)) // Use a hash index in SST files to speed up point lookup. .SetIndexType(BlockBasedTableIndexType.HashSearch) // Whether to use the whole key or a prefix of it (obtained through the prefix extractor below). // Since the prefix extractor is a no-op, better performance is achieved by turning this off (i.e. // setting it to true). .SetWholeKeyFiltering(true); m_defaults.ColumnFamilyOptions = new ColumnFamilyOptions() #if PLATFORM_OSX // As advised by the official documentation, LZ4 is the preferred compression algorithm, our RocksDB // dynamic library has been compiled to support this on macOS. Fallback to Snappy on other systems (default). .SetCompression(CompressionTypeEnum.rocksdb_lz4_compression) #endif .SetBlockBasedTableFactory(blockBasedTableOptions) .SetPrefixExtractor(SliceTransform.CreateNoOp()); m_columns = new Dictionary <string, ColumnFamilyInfo>(); // The columns that exist in the store on disk may not be in sync with the columns being passed into the constructor HashSet <string> existingColumns; try { existingColumns = new HashSet <string>(RocksDb.ListColumnFamilies(m_defaults.DbOptions, m_storeDirectory)); } catch (RocksDbException) { // If there is no existing store, an exception will be thrown, ignore it existingColumns = new HashSet <string>(); } // In read-only mode, open all existing columns in the store without attempting to validate it against the expected column families if (arguments.ReadOnly) { var columnFamilies = new ColumnFamilies(); foreach (var name in existingColumns) { columnFamilies.Add(name, m_defaults.ColumnFamilyOptions); } m_store = RocksDb.OpenReadOnly(m_defaults.DbOptions, m_storeDirectory, columnFamilies, errIfLogFileExists: false); } else { // For read-write mode, column families may be added, so set up column families schema var additionalColumns = arguments.AdditionalColumns ?? CollectionUtilities.EmptyArray <string>(); var columnsSchema = new HashSet <string>(additionalColumns); // Default column columnsSchema.Add(ColumnFamilies.DefaultName); // For key-tracked column familiies, create two columns: // 1: Normal column of { key : value } // 2: Key-tracking column of { key : empty-value } if (arguments.DefaultColumnKeyTracked) { // To be robust to the RocksDB-selected default column name changing, // just name the default column's key-tracking column KeyColumnSuffix columnsSchema.Add(KeyColumnSuffix); } var additionalKeyTrackedColumns = arguments.AdditionalKeyTrackedColumns ?? CollectionUtilities.EmptyArray <string>(); foreach (var name in additionalKeyTrackedColumns) { columnsSchema.Add(name); columnsSchema.Add(name + KeyColumnSuffix); } // Figure out which columns are not part of the schema var outsideSchemaColumns = new List <string>(existingColumns.Except(columnsSchema)); // RocksDB requires all columns in the store to be opened in read-write mode, so merge existing columns // with the columns schema that was passed into the constructor existingColumns.UnionWith(columnsSchema); var columnFamilies = new ColumnFamilies(); foreach (var name in existingColumns) { columnFamilies.Add(name, m_defaults.ColumnFamilyOptions); } m_store = RocksDb.Open(m_defaults.DbOptions, m_storeDirectory, columnFamilies); // Provide an opportunity to update the store to the new column family schema if (arguments.DropMismatchingColumns) { foreach (var name in outsideSchemaColumns) { m_store.DropColumnFamily(name); existingColumns.Remove(name); } } } var userFacingColumns = existingColumns.Where(name => !name.EndsWith(KeyColumnSuffix)); foreach (var name in userFacingColumns) { var isKeyTracked = existingColumns.Contains(name + KeyColumnSuffix); m_columns.Add(name, new ColumnFamilyInfo() { Handle = m_store.GetColumnFamily(name), UseKeyTracking = isKeyTracked, KeyHandle = isKeyTracked ? m_store.GetColumnFamily(name + KeyColumnSuffix) : null, }); } m_columns.TryGetValue(ColumnFamilies.DefaultName, out m_defaultColumnFamilyInfo); }
public void FunctionalTest() { string temp = Path.GetTempPath(); var testdir = Path.Combine(temp, "functional_test"); var testdb = Path.Combine(testdir, "main"); var testcp = Path.Combine(testdir, "cp"); var path = Environment.ExpandEnvironmentVariables(testdb); var cppath = Environment.ExpandEnvironmentVariables(testcp); if (Directory.Exists(testdir)) { Directory.Delete(testdir, true); } Directory.CreateDirectory(testdir); var options = new DbOptions() .SetCreateIfMissing(true) .EnableStatistics(); // Using standard open using (var db = RocksDb.Open(options, path)) { // With strings string value = db.Get("key"); db.Put("key", "value"); Assert.Equal("value", db.Get("key")); Assert.Null(db.Get("non-existent-key")); db.Remove("key"); Assert.Null(db.Get("value")); // With bytes db.Put(Encoding.UTF8.GetBytes("key"), Encoding.UTF8.GetBytes("value")); Assert.True(BinaryComparer.Default.Equals(Encoding.UTF8.GetBytes("value"), db.Get(Encoding.UTF8.GetBytes("key")))); // non-existent kiey Assert.Null(db.Get(new byte[] { 0, 1, 2 })); db.Remove(Encoding.UTF8.GetBytes("key")); Assert.Null(db.Get(Encoding.UTF8.GetBytes("key"))); db.Put(Encoding.UTF8.GetBytes("key"), new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 }); // With buffers var buffer = new byte[100]; long length = db.Get(Encoding.UTF8.GetBytes("key"), buffer, 0, buffer.Length); Assert.Equal(8, length); Assert.Equal(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 }, buffer.Take((int)length).ToList()); buffer = new byte[5]; length = db.Get(Encoding.UTF8.GetBytes("key"), buffer, 0, buffer.Length); Assert.Equal(8, length); Assert.Equal(new byte[] { 0, 1, 2, 3, 4 }, buffer.Take((int)Math.Min(buffer.Length, length))); length = db.Get(Encoding.UTF8.GetBytes("bogus"), buffer, 0, buffer.Length); Assert.Equal(-1, length); // Write batches // With strings using (WriteBatch batch = new WriteBatch() .Put("one", "uno") .Put("two", "deuce") .Put("two", "dos") .Put("three", "tres")) { db.Write(batch); } Assert.Equal("uno", db.Get("one")); // With save point using (WriteBatch batch = new WriteBatch()) { batch .Put("hearts", "red") .Put("diamonds", "red"); batch.SetSavePoint(); batch .Put("clubs", "black"); batch.SetSavePoint(); batch .Put("spades", "black"); batch.RollbackToSavePoint(); db.Write(batch); } Assert.Equal("red", db.Get("diamonds")); Assert.Equal("black", db.Get("clubs")); Assert.Null(db.Get("spades")); // Save a checkpoint using (var cp = db.Checkpoint()) { cp.Save(cppath); } // With bytes var utf8 = Encoding.UTF8; using (WriteBatch batch = new WriteBatch() .Put(utf8.GetBytes("four"), new byte[] { 4, 4, 4 }) .Put(utf8.GetBytes("five"), new byte[] { 5, 5, 5 })) { db.Write(batch); } Assert.True(BinaryComparer.Default.Equals(new byte[] { 4, 4, 4 }, db.Get(utf8.GetBytes("four")))); // Snapshots using (var snapshot = db.CreateSnapshot()) { var before = db.Get("one"); db.Put("one", "1"); var useSnapshot = new ReadOptions() .SetSnapshot(snapshot); // the database value was written Assert.Equal("1", db.Get("one")); // but the snapshot still sees the old version var after = db.Get("one", readOptions: useSnapshot); Assert.Equal(before, after); } var two = db.Get("two"); Assert.Equal("dos", two); // Iterators using (var iterator = db.NewIterator( readOptions: new ReadOptions() .SetIterateUpperBound("t") )) { iterator.Seek("k"); Assert.True(iterator.Valid()); Assert.Equal("key", iterator.StringKey()); iterator.Next(); Assert.True(iterator.Valid()); Assert.Equal("one", iterator.StringKey()); Assert.Equal("1", iterator.StringValue()); iterator.Next(); Assert.False(iterator.Valid()); } // MultiGet var multiGetResult = db.MultiGet(new[] { "two", "three", "nine" }); Assert.Equal( expected: new[] { new KeyValuePair <string, string>("two", "dos"), new KeyValuePair <string, string>("three", "tres"), new KeyValuePair <string, string>("nine", null) }, actual: multiGetResult ); } // Test reading checkpointed db using (var cpdb = RocksDb.Open(options, cppath)) { Assert.Equal("red", cpdb.Get("diamonds")); Assert.Equal("black", cpdb.Get("clubs")); Assert.Null(cpdb.Get("spades")); // Checkpoint occurred before these changes: Assert.Null(cpdb.Get("four")); } // Test with column families var optionsCf = new DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true); var columnFamilies = new ColumnFamilies { { "reverse", new ColumnFamilyOptions() }, }; using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); db.Put("one", "uno"); db.Put("two", "dos"); db.Put("three", "tres"); db.Put("uno", "one", cf: reverse); db.Put("dos", "two", cf: reverse); db.Put("tres", "three", cf: reverse); } // Test Cf Delete using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); db.Put("cuatro", "four", cf: reverse); db.Put("cinco", "five", cf: reverse); Assert.Equal("four", db.Get("cuatro", cf: reverse)); Assert.Equal("five", db.Get("cinco", cf: reverse)); byte[] keyBytes = Encoding.UTF8.GetBytes("cuatro"); db.Remove(keyBytes, reverse); db.Remove("cinco", reverse); Assert.Null(db.Get("cuatro", cf: reverse)); Assert.Null(db.Get("cinco", cf: reverse)); } // Test list { var list = RocksDb.ListColumnFamilies(optionsCf, path); Assert.Equal(new[] { "default", "reverse" }, list.ToArray()); } // Test reopen with column families using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); Assert.Equal("uno", db.Get("one")); Assert.Equal("one", db.Get("uno", cf: reverse)); Assert.Null(db.Get("uno")); Assert.Null(db.Get("one", cf: reverse)); } // Test dropping and creating column family using (var db = RocksDb.Open(options, path, columnFamilies)) { db.DropColumnFamily("reverse"); var reverse = db.CreateColumnFamily(new ColumnFamilyOptions(), "reverse"); Assert.Null(db.Get("uno", cf: reverse)); db.Put("red", "rouge", cf: reverse); Assert.Equal("rouge", db.Get("red", cf: reverse)); } // Test reopen after drop and create using (var db = RocksDb.Open(options, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); Assert.Null(db.Get("uno", cf: reverse)); Assert.Equal("rouge", db.Get("red", cf: reverse)); } // Test read only using (var db = RocksDb.OpenReadOnly(options, path, columnFamilies, false)) { Assert.Equal("uno", db.Get("one")); } // Test SstFileWriter { using (var writer = new SstFileWriter()) { } var envOpts = new EnvOptions(); var ioOpts = new ColumnFamilyOptions(); using (var sst = new SstFileWriter(envOpts, ioOpts)) { var filename = Path.Combine(temp, "test.sst"); if (File.Exists(filename)) { File.Delete(filename); } sst.Open(filename); sst.Add("four", "quatro"); sst.Add("one", "uno"); sst.Add("two", "dos"); sst.Finish(); using (var db = RocksDb.Open(options, path, columnFamilies)) { Assert.NotEqual("four", db.Get("four")); var ingestOptions = new IngestExternalFileOptions() .SetMoveFiles(true); db.IngestExternalFiles(new string[] { filename }, ingestOptions); Assert.Equal("quatro", db.Get("four")); } } } // test comparator unsafe { var opts = new ColumnFamilyOptions() .SetComparator(new IntegerStringComparator()); var filename = Path.Combine(temp, "test.sst"); if (File.Exists(filename)) { File.Delete(filename); } using (var sst = new SstFileWriter(ioOptions: opts)) { sst.Open(filename); sst.Add("111", "111"); sst.Add("1001", "1001"); // this order is only allowed using an integer comparator sst.Finish(); } } // test write batch with index { var wbwi = new WriteBatchWithIndex(reservedBytes: 1024); wbwi.Put("one", "un"); wbwi.Put("two", "deux"); var oneValueIn = Encoding.UTF8.GetBytes("one"); var oneValueOut = wbwi.Get("one"); Assert.Equal("un", oneValueOut); using (var db = RocksDb.Open(options, path, columnFamilies)) { var oneCombinedOut = wbwi.Get(db, "one"); var threeCombinedOut = wbwi.Get(db, "three"); Assert.Equal("un", oneCombinedOut); Assert.Equal("tres", threeCombinedOut); using (var wbIterator = wbwi.NewIterator(db.NewIterator())) { wbIterator.Seek("o"); Assert.True(wbIterator.Valid()); var itkey = wbIterator.StringKey(); Assert.Equal("one", itkey); var itval = wbIterator.StringValue(); Assert.Equal("un", itval); wbIterator.Next(); Assert.True(wbIterator.Valid()); itkey = wbIterator.StringKey(); Assert.Equal("three", itkey); itval = wbIterator.StringValue(); Assert.Equal("tres", itval); wbIterator.Next(); Assert.True(wbIterator.Valid()); itkey = wbIterator.StringKey(); Assert.Equal("two", itkey); itval = wbIterator.StringValue(); Assert.Equal("deux", itval); wbIterator.Next(); Assert.False(wbIterator.Valid()); } db.Write(wbwi); var oneDbOut = wbwi.Get("one"); Assert.Equal("un", oneDbOut); } } // compact range { using (var db = RocksDb.Open(options, path, columnFamilies)) { db.CompactRange("o", "tw"); } } // Smoke test various options { var dbname = "test-options"; if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } var optsTest = (DbOptions) new RocksDbSharp.DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true) .SetBlockBasedTableFactory(new BlockBasedTableOptions().SetBlockCache(Cache.CreateLru(1024 * 1024))); GC.Collect(); using (var db = RocksDbSharp.RocksDb.Open(optsTest, dbname)) { } if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } } // Smoke test OpenWithTtl { var dbname = "test-with-ttl"; if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } var optsTest = (DbOptions) new RocksDbSharp.DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true); using (var db = RocksDbSharp.RocksDb.OpenWithTtl(optsTest, dbname, 1)) { } if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } } // Smoke test MergeOperator { var dbname = "test-merge-operator"; if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } var optsTest = (DbOptions) new RocksDbSharp.DbOptions() .SetCreateIfMissing(true) .SetMergeOperator(MergeOperators.Create( name: "test-merge-operator", partialMerge: (key, keyLength, operandsList, operandsListLength, numOperands, success, newValueLength) => IntPtr.Zero, fullMerge: (key, keyLength, existingValue, existingValueLength, operandsList, operandsListLength, numOperands, success, newValueLength) => IntPtr.Zero, deleteValue: (value, valueLength) => { } )); GC.Collect(); using (var db = RocksDbSharp.RocksDb.Open(optsTest, dbname)) { } if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } } // Test that GC does not cause access violation on Comparers { var dbname = "test-av-error"; if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } options = new RocksDbSharp.DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true); var sc = new RocksDbSharp.StringComparator(StringComparer.InvariantCultureIgnoreCase); columnFamilies = new RocksDbSharp.ColumnFamilies { { "cf1", new RocksDbSharp.ColumnFamilyOptions() .SetComparator(sc) }, }; GC.Collect(); using (var db = RocksDbSharp.RocksDb.Open(options, dbname, columnFamilies)) { } if (Directory.Exists(dbname)) { Directory.Delete(dbname, true); } } }
/// <summary> /// Provides access to and/or creates a RocksDb persistent key-value store. /// </summary> /// <param name="storeDirectory"> /// The directory containing the key-value store. /// </param> /// <param name="defaultColumnKeyTracked"> /// Whether the default column should be key-tracked. /// This will create two columns for the same data, /// one with just keys and the other with key and value. /// </param> /// <param name="additionalColumns"> /// The names of any additional column families in the key-value store. /// If no additional column families are provided, all entries will be stored /// in the default column. /// Column families are analogous to tables in relational databases. /// </param> /// <param name="additionalKeyTrackedColumns"> /// The names of any additional column families in the key-value store that /// should also be key-tracked. This will create two columns for the same data, /// one with just keys and the other with key and value. /// Column families are analogous to tables in relational databases. /// </param> /// <param name="readOnly"> /// Whether the database should be opened read-only. This prevents modifications and /// creating unnecessary metadata files related to write sessions. /// </param> /// <param name="dropMismatchingColumns"> /// If a store already exists at the given directory, whether any columns that mismatch the the columns that were passed into the constructor /// should be dropped. This will cause data loss and can only be applied in read-write mode. /// </param> public RocksDbStore( string storeDirectory, bool defaultColumnKeyTracked = false, IEnumerable <string> additionalColumns = null, IEnumerable <string> additionalKeyTrackedColumns = null, bool readOnly = false, bool dropMismatchingColumns = false) { m_storeDirectory = storeDirectory; m_defaults.DbOptions = new DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true); // Disable the write ahead log to reduce disk IO. The write ahead log // is used to recover the store on crashes, so a crash will lose some writes. // Writes will be made in-memory only until the write buffer size // is reached and then they will be flushed to storage files. m_defaults.WriteOptions = new WriteOptions().DisableWal(1); m_defaults.ColumnFamilyOptions = new ColumnFamilyOptions(); m_columns = new Dictionary <string, ColumnFamilyInfo>(); additionalColumns = additionalColumns ?? CollectionUtilities.EmptyArray <string>(); additionalKeyTrackedColumns = additionalKeyTrackedColumns ?? CollectionUtilities.EmptyArray <string>(); // The columns that exist in the store on disk may not be in sync with the columns being passed into the constructor HashSet <string> existingColumns; try { existingColumns = new HashSet <string>(RocksDb.ListColumnFamilies(m_defaults.DbOptions, m_storeDirectory)); } catch (RocksDbException) { // If there is no existing store, an exception will be thrown, ignore it existingColumns = new HashSet <string>(); } // In read-only mode, open all existing columns in the store without attempting to validate it against the expected column families if (readOnly) { var columnFamilies = new ColumnFamilies(); foreach (var name in existingColumns) { columnFamilies.Add(name, m_defaults.ColumnFamilyOptions); } m_store = RocksDb.OpenReadOnly(m_defaults.DbOptions, m_storeDirectory, columnFamilies, errIfLogFileExists: false); } else { // For read-write mode, column families may be added, so set up column families schema var columnsSchema = new HashSet <string>(additionalColumns); // Default column columnsSchema.Add(ColumnFamilies.DefaultName); // For key-tracked column familiies, create two columns: // 1: Normal column of { key : value } // 2: Key-tracking column of { key : empty-value } if (defaultColumnKeyTracked) { // To be robust to the RocksDB-selected default column name changing, // just name the default column's key-tracking column KeyColumnSuffix columnsSchema.Add(KeyColumnSuffix); } foreach (var name in additionalKeyTrackedColumns) { columnsSchema.Add(name); columnsSchema.Add(name + KeyColumnSuffix); } // Figure out which columns are not part of the schema var outsideSchemaColumns = new List <string>(existingColumns.Except(columnsSchema)); // RocksDB requires all columns in the store to be opened in read-write mode, so merge existing columns // with the columns schema that was passed into the constructor existingColumns.UnionWith(columnsSchema); var columnFamilies = new ColumnFamilies(); foreach (var name in existingColumns) { columnFamilies.Add(name, m_defaults.ColumnFamilyOptions); } m_store = RocksDb.Open(m_defaults.DbOptions, m_storeDirectory, columnFamilies); // Provide an opportunity to update the store to the new column family schema if (dropMismatchingColumns) { foreach (var name in outsideSchemaColumns) { m_store.DropColumnFamily(name); existingColumns.Remove(name); } } } var userFacingColumns = existingColumns.Where(name => !name.EndsWith(KeyColumnSuffix)); foreach (var name in userFacingColumns) { var isKeyTracked = existingColumns.Contains(name + KeyColumnSuffix); m_columns.Add(name, new ColumnFamilyInfo() { Handle = m_store.GetColumnFamily(name), UseKeyTracking = isKeyTracked, KeyHandle = isKeyTracked ? m_store.GetColumnFamily(name + KeyColumnSuffix) : null, }); } m_columns.TryGetValue(ColumnFamilies.DefaultName, out m_defaultColumnFamilyInfo); }
public void FunctionalTest() { string temp = Path.GetTempPath(); var testdb = Path.Combine(temp, "functional_test"); string path = Environment.ExpandEnvironmentVariables(testdb); if (Directory.Exists(testdb)) { Directory.Delete(testdb, true); } var options = new DbOptions() .SetCreateIfMissing(true) .EnableStatistics(); // Using standard open using (var db = RocksDb.Open(options, path)) { // With strings string value = db.Get("key"); db.Put("key", "value"); Assert.AreEqual("value", db.Get("key")); Assert.IsNull(db.Get("non-existent-key")); db.Remove("key"); Assert.IsNull(db.Get("value")); // With bytes db.Put(Encoding.UTF8.GetBytes("key"), Encoding.UTF8.GetBytes("value")); Assert.IsTrue(BinaryComparer.Default.Equals(Encoding.UTF8.GetBytes("value"), db.Get(Encoding.UTF8.GetBytes("key")))); // non-existent kiey Assert.IsNull(db.Get(new byte[] { 0, 1, 2 })); db.Remove(Encoding.UTF8.GetBytes("key")); Assert.IsNull(db.Get(Encoding.UTF8.GetBytes("key"))); db.Put(Encoding.UTF8.GetBytes("key"), new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 }); // With buffers var buffer = new byte[100]; long length = db.Get(Encoding.UTF8.GetBytes("key"), buffer, 0, buffer.Length); Assert.AreEqual(8, length); CollectionAssert.AreEqual(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 }, buffer.Take((int)length).ToList()); // Write batches // With strings using (WriteBatch batch = new WriteBatch() .Put("one", "uno") .Put("two", "deuce") .Put("two", "dos") .Put("three", "tres")) { db.Write(batch); } Assert.AreEqual("uno", db.Get("one")); // With bytes var utf8 = Encoding.UTF8; using (WriteBatch batch = new WriteBatch() .Put(utf8.GetBytes("four"), new byte[] { 4, 4, 4 }) .Put(utf8.GetBytes("five"), new byte[] { 5, 5, 5 })) { db.Write(batch); } Assert.IsTrue(BinaryComparer.Default.Equals(new byte[] { 4, 4, 4 }, db.Get(utf8.GetBytes("four")))); // Snapshots using (var snapshot = db.CreateSnapshot()) { var before = db.Get("one"); db.Put("one", "1"); var useSnapshot = new ReadOptions() .SetSnapshot(snapshot); // the database value was written Assert.AreEqual("1", db.Get("one")); // but the snapshot still sees the old version var after = db.Get("one", readOptions: useSnapshot); Assert.AreEqual(before, after); } var two = db.Get("two"); Assert.AreEqual("dos", two); // Iterators using (var iterator = db.NewIterator( readOptions: new ReadOptions() .SetIterateUpperBound("t") )) { iterator.Seek("k"); Assert.IsTrue(iterator.Valid()); Assert.AreEqual("key", iterator.StringKey()); iterator.Next(); Assert.IsTrue(iterator.Valid()); Assert.AreEqual("one", iterator.StringKey()); Assert.AreEqual("1", iterator.StringValue()); iterator.Next(); Assert.IsFalse(iterator.Valid()); } } // Test with column families var optionsCf = new DbOptions() .SetCreateIfMissing(true) .SetCreateMissingColumnFamilies(true); var columnFamilies = new ColumnFamilies { { "reverse", new ColumnFamilyOptions() }, }; using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); db.Put("one", "uno"); db.Put("two", "dos"); db.Put("three", "tres"); db.Put("uno", "one", cf: reverse); db.Put("dos", "two", cf: reverse); db.Put("tres", "three", cf: reverse); } // Test reopen with column families using (var db = RocksDb.Open(optionsCf, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); Assert.AreEqual("uno", db.Get("one")); Assert.AreEqual("one", db.Get("uno", cf: reverse)); Assert.IsNull(db.Get("uno")); Assert.IsNull(db.Get("one", cf: reverse)); } // Test dropping and creating column family using (var db = RocksDb.Open(options, path, columnFamilies)) { db.DropColumnFamily("reverse"); var reverse = db.CreateColumnFamily(new ColumnFamilyOptions(), "reverse"); Assert.IsNull(db.Get("uno", cf: reverse)); db.Put("red", "rouge", cf: reverse); Assert.AreEqual("rouge", db.Get("red", cf: reverse)); } // Test reopen after drop and create using (var db = RocksDb.Open(options, path, columnFamilies)) { var reverse = db.GetColumnFamily("reverse"); Assert.IsNull(db.Get("uno", cf: reverse)); Assert.AreEqual("rouge", db.Get("red", cf: reverse)); } // Test read only using (var db = RocksDb.OpenReadOnly(options, path, columnFamilies, false)) { Assert.AreEqual("uno", db.Get("one")); } }
public static RocksDb OpenReadOnlyDb(string path) { var columnFamilies = GetColumnFamilies(path); return(RocksDb.OpenReadOnly(new DbOptions(), path, columnFamilies, false)); }