Beispiel #1
0
        public RocksDbContext(string path = "ChainLachain")
        {
            _dbpath       = path;
            _writeOptions = new WriteOptions();
            _writeOptions.DisableWal(0);
            _writeOptions.SetSync(true);

            var options = new DbOptions().SetCreateIfMissing();

            _rocksDb = RocksDb.Open(options, _dbpath);
        }
Beispiel #2
0
        private DbOptions BuildOptions(IDbConfig dbConfig)
        {
            var tableOptions = new BlockBasedTableOptions();

            tableOptions.SetBlockSize(16 * 1024);
            tableOptions.SetPinL0FilterAndIndexBlocksInCache(true);
            tableOptions.SetCacheIndexAndFilterBlocks(ReadConfig <bool>(dbConfig, nameof(dbConfig.CacheIndexAndFilterBlocks)));

            tableOptions.SetFilterPolicy(BloomFilterPolicy.Create(10, true));
            tableOptions.SetFormatVersion(2);

            var blockCacheSize = ReadConfig <ulong>(dbConfig, nameof(dbConfig.BlockCacheSize));
            var cache          = Native.Instance.rocksdb_cache_create_lru(new UIntPtr(blockCacheSize));

            tableOptions.SetBlockCache(cache);

            var options = new DbOptions();

            options.SetCreateIfMissing(true);
            options.SetAdviseRandomOnOpen(true);
            options.OptimizeForPointLookup(blockCacheSize); // I guess this should be the one option controlled by the DB size property - bind it to LRU cache size
            //options.SetCompression(CompressionTypeEnum.rocksdb_snappy_compression);
            //options.SetLevelCompactionDynamicLevelBytes(true);

            /*
             * Multi-Threaded Compactions
             * Compactions are needed to remove multiple copies of the same key that may occur if an application overwrites an existing key. Compactions also process deletions of keys. Compactions may occur in multiple threads if configured appropriately.
             * The entire database is stored in a set of sstfiles. When a memtable is full, its content is written out to a file in Level-0 (L0). RocksDB removes duplicate and overwritten keys in the memtable when it is flushed to a file in L0. Some files are periodically read in and merged to form larger files - this is called compaction.
             * The overall write throughput of an LSM database directly depends on the speed at which compactions can occur, especially when the data is stored in fast storage like SSD or RAM. RocksDB may be configured to issue concurrent compaction requests from multiple threads. It is observed that sustained write rates may increase by as much as a factor of 10 with multi-threaded compaction when the database is on SSDs, as compared to single-threaded compactions.
             * TKS: Observed 500MB/s compared to ~100MB/s between multithreaded and single thread compactions on my machine (processor count is returning 12 for 6 cores with hyperthreading)
             * TKS: CPU goes to insane 30% usage on idle - compacting only app
             */
            options.SetMaxBackgroundCompactions(Environment.ProcessorCount);

            //options.SetMaxOpenFiles(32);
            options.SetWriteBufferSize(ReadConfig <ulong>(dbConfig, nameof(dbConfig.WriteBufferSize)));
            options.SetMaxWriteBufferNumber((int)ReadConfig <uint>(dbConfig, nameof(dbConfig.WriteBufferNumber)));
            options.SetMinWriteBufferNumberToMerge(2);
            options.SetBlockBasedTableFactory(tableOptions);

            options.SetMaxBackgroundFlushes(Environment.ProcessorCount);
            options.IncreaseParallelism(Environment.ProcessorCount);
            options.SetRecycleLogFileNum(dbConfig.RecycleLogFileNum); // potential optimization for reusing allocated log files


//            options.SetLevelCompactionDynamicLevelBytes(true); // only switch on on empty DBs
            _writeOptions = new WriteOptions();
            _writeOptions.SetSync(dbConfig.WriteAheadLogSync); // potential fix for corruption on hard process termination, may cause performance degradation

            return(options);
        }