Example #1
0
        private DbOptions BuildOptions(IDbConfig dbConfig)
        {
            var tableOptions = new BlockBasedTableOptions();

            tableOptions.SetBlockSize(16 * 1024);
            tableOptions.SetPinL0FilterAndIndexBlocksInCache(true);
            tableOptions.SetCacheIndexAndFilterBlocks(ReadConfig <bool>(dbConfig, nameof(dbConfig.CacheIndexAndFilterBlocks)));

            tableOptions.SetFilterPolicy(BloomFilterPolicy.Create(10, true));
            tableOptions.SetFormatVersion(2);

            var blockCacheSize = ReadConfig <ulong>(dbConfig, nameof(dbConfig.BlockCacheSize));
            var cache          = Native.Instance.rocksdb_cache_create_lru(new UIntPtr(blockCacheSize));

            tableOptions.SetBlockCache(cache);

            var options = new DbOptions();

            options.SetCreateIfMissing(true);
            options.SetAdviseRandomOnOpen(true);
            options.OptimizeForPointLookup(blockCacheSize); // I guess this should be the one option controlled by the DB size property - bind it to LRU cache size
            //options.SetCompression(CompressionTypeEnum.rocksdb_snappy_compression);
            //options.SetLevelCompactionDynamicLevelBytes(true);

            /*
             * Multi-Threaded Compactions
             * Compactions are needed to remove multiple copies of the same key that may occur if an application overwrites an existing key. Compactions also process deletions of keys. Compactions may occur in multiple threads if configured appropriately.
             * The entire database is stored in a set of sstfiles. When a memtable is full, its content is written out to a file in Level-0 (L0). RocksDB removes duplicate and overwritten keys in the memtable when it is flushed to a file in L0. Some files are periodically read in and merged to form larger files - this is called compaction.
             * The overall write throughput of an LSM database directly depends on the speed at which compactions can occur, especially when the data is stored in fast storage like SSD or RAM. RocksDB may be configured to issue concurrent compaction requests from multiple threads. It is observed that sustained write rates may increase by as much as a factor of 10 with multi-threaded compaction when the database is on SSDs, as compared to single-threaded compactions.
             * TKS: Observed 500MB/s compared to ~100MB/s between multithreaded and single thread compactions on my machine (processor count is returning 12 for 6 cores with hyperthreading)
             * TKS: CPU goes to insane 30% usage on idle - compacting only app
             */
            options.SetMaxBackgroundCompactions(Environment.ProcessorCount);

            //options.SetMaxOpenFiles(32);
            options.SetWriteBufferSize(ReadConfig <ulong>(dbConfig, nameof(dbConfig.WriteBufferSize)));
            options.SetMaxWriteBufferNumber((int)ReadConfig <uint>(dbConfig, nameof(dbConfig.WriteBufferNumber)));
            options.SetMinWriteBufferNumberToMerge(2);
            options.SetBlockBasedTableFactory(tableOptions);

            options.SetMaxBackgroundFlushes(Environment.ProcessorCount);
            options.IncreaseParallelism(Environment.ProcessorCount);
            options.SetRecycleLogFileNum(dbConfig.RecycleLogFileNum); // potential optimization for reusing allocated log files


//            options.SetLevelCompactionDynamicLevelBytes(true); // only switch on on empty DBs
            _writeOptions = new WriteOptions();
            _writeOptions.SetSync(dbConfig.WriteAheadLogSync); // potential fix for corruption on hard process termination, may cause performance degradation

            return(options);
        }
Example #2
0
        public FullDbOnTheRocks(string dbPath) // TODO: check column families
        {
            if (!Directory.Exists(dbPath))
            {
                Directory.CreateDirectory(dbPath);
            }

            // options are based mainly from EtheruemJ at the moment

            BlockBasedTableOptions tableOptions = new BlockBasedTableOptions();

            //tableOptions.SetPinL0FilterAndIndexBlocksInCache(true);
            tableOptions.SetBlockSize(16 * 1024);
            //tableOptions.SetCacheIndexAndFilterBlocks(true);
            tableOptions.SetFilterPolicy(BloomFilterPolicy.Create(10, false));
            tableOptions.SetFormatVersion(2);

            DbOptions options = new DbOptions();

            options.SetCreateIfMissing(true);
            options.OptimizeForPointLookup(32);
            //options.SetCompression(CompressionTypeEnum.rocksdb_snappy_compression);
            //options.SetLevelCompactionDynamicLevelBytes(true);
            //options.SetMaxBackgroundCompactions(4);
            //options.SetMaxBackgroundFlushes(2);
            //options.SetMaxOpenFiles(32);
            //options.SetDbWriteBufferSize(1024 * 1024 * 16);
            options.SetWriteBufferSize(1024 * 1024 * 16);
            options.SetMaxWriteBufferNumber(6);
            options.SetMinWriteBufferNumberToMerge(2);
            options.SetBlockBasedTableFactory(tableOptions);

            //SliceTransform transform = SliceTransform.CreateFixedPrefix(16);
            //options.SetPrefixExtractor(transform);

            _db = DbsByPath.GetOrAdd(dbPath, path => RocksDb.Open(options, path));

            if (dbPath.EndsWith(DiscoveryNodesDbPath))
            {
                _dbInstance = DbInstance.DiscoveryNodes;
            }
            else if (dbPath.EndsWith(PeersDbPath))
            {
                _dbInstance = DbInstance.Peers;
            }
        }
Example #3
0
        /// <summary>
        /// Create rocksdb config and open rocksdb database.
        /// </summary>
        /// <param name="context"></param>
        protected void OpenDatabase(ProcessorContext context)
        {
            DbOptions           dbOptions           = new DbOptions();
            ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();

            writeOptions = new WriteOptions();
            BlockBasedTableOptions tableConfig = new BlockBasedTableOptions();

            RocksDbOptions rocksDbOptions = new RocksDbOptions(dbOptions, columnFamilyOptions);

            tableConfig.SetBlockCache(RocksDbSharp.Cache.CreateLru(BLOCK_CACHE_SIZE));
            tableConfig.SetBlockSize(BLOCK_SIZE);
            tableConfig.SetFilterPolicy(BloomFilterPolicy.Create());

            rocksDbOptions.SetOptimizeFiltersForHits(1);
            rocksDbOptions.SetBlockBasedTableFactory(tableConfig);
            rocksDbOptions.SetCompression(COMPRESSION_TYPE);
            rocksDbOptions.SetWriteBufferSize(WRITE_BUFFER_SIZE);
            rocksDbOptions.SetCompactionStyle(COMPACTION_STYLE);
            rocksDbOptions.SetMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
            rocksDbOptions.SetCreateIfMissing(true);
            rocksDbOptions.SetErrorIfExists(false);
            rocksDbOptions.SetInfoLogLevel(RocksLogLevel.ERROR);
            // this is the recommended way to increase parallelism in RocksDb
            // note that the current implementation of setIncreaseParallelism affects the number
            // of compaction threads but not flush threads (the latter remains one). Also
            // the parallelism value needs to be at least two because of the code in
            // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
            // subtracts one from the value passed to determine the number of compaction threads
            // (this could be a bug in the RocksDB code and their devs have been contacted).
            rocksDbOptions.IncreaseParallelism(Math.Max(Environment.ProcessorCount, 2));

            // TODO : wrap writeOptions in rocksDbOptions too
            writeOptions.DisableWal(1);

            context.Configuration.RocksDbConfigHandler?.Invoke(Name, rocksDbOptions);
            rocksDbOptions.SetMinWriteBufferNumberToMerge(2);

            DbDir = new DirectoryInfo(Path.Combine(context.StateDir, parentDir, Name));

            Directory.CreateDirectory(DbDir.FullName);

            OpenRocksDb(dbOptions, columnFamilyOptions);

            IsOpen = true;
        }