/// <summary> /// Base size of blocks of entries. As entries gets written to a BlockStorage, they are buffered up to this size, then sorted and written out. /// As blocks gets merged into bigger blocks, this is still the size of the read buffer for each block no matter its size. /// Each thread has its own buffer when writing and each thread has <seealso cref="mergeFactor"/> buffers when merging. /// The memory usage will be at its biggest during merge and a total memory usage sum can be calculated like so: /// /// blockSize * numberOfPopulationWorkers * <seealso cref="mergeFactor"/> /// /// where typically <seealso cref="BatchingMultipleIndexPopulator"/> controls the number of population workers. The setting /// `unsupported.dbms.multi_threaded_schema_index_population_enabled` controls whether or not the multi-threaded <seealso cref="BatchingMultipleIndexPopulator"/> /// is used, otherwise a single-threaded populator is used instead. /// </summary> public static int ParseBlockSize() { long blockSize = ByteUnit.parse(FeatureToggles.getString(typeof(BlockBasedIndexPopulator), BLOCK_SIZE_NAME, "1M")); Preconditions.checkArgument(blockSize >= 20 && blockSize < int.MaxValue, "Block size need to fit in int. Was " + blockSize); return(( int )blockSize); }