Use a PlainTableConfig for in memory rocksdb database

Signed-off-by: Ameziane H. <ameziane.hamlat@consensys.net>
besu-for-fleet
Ameziane H. 3 weeks ago
parent f82bb7d6b7
commit 3f6bc95662
  1. 42
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/segmented/RocksDBColumnarKeyValueStorage.java

@ -55,6 +55,7 @@ import org.rocksdb.DBOptions;
import org.rocksdb.Env; import org.rocksdb.Env;
import org.rocksdb.LRUCache; import org.rocksdb.LRUCache;
import org.rocksdb.Options; import org.rocksdb.Options;
import org.rocksdb.PlainTableConfig;
import org.rocksdb.ReadOptions; import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB; import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException; import org.rocksdb.RocksDBException;
@ -70,8 +71,6 @@ import org.slf4j.LoggerFactory;
public abstract class RocksDBColumnarKeyValueStorage implements SegmentedKeyValueStorage { public abstract class RocksDBColumnarKeyValueStorage implements SegmentedKeyValueStorage {
private static final Logger LOG = LoggerFactory.getLogger(RocksDBColumnarKeyValueStorage.class); private static final Logger LOG = LoggerFactory.getLogger(RocksDBColumnarKeyValueStorage.class);
private static final int ROCKSDB_FORMAT_VERSION = 5;
private static final long ROCKSDB_BLOCK_SIZE = 32768;
/** RocksDb blockcache size when using the high spec option */ /** RocksDb blockcache size when using the high spec option */
protected static final long ROCKSDB_BLOCKCACHE_SIZE_HIGH_SPEC = 1_073_741_824L; protected static final long ROCKSDB_BLOCKCACHE_SIZE_HIGH_SPEC = 1_073_741_824L;
@ -164,7 +163,7 @@ public abstract class RocksDBColumnarKeyValueStorage implements SegmentedKeyValu
.forEach(trimmedSegments::remove); .forEach(trimmedSegments::remove);
columnDescriptors = columnDescriptors =
trimmedSegments.stream() trimmedSegments.stream()
.map(segment -> createColumnDescriptor(segment, configuration)) .map(segment -> createColumnDescriptor(segment))
.collect(Collectors.toList()); .collect(Collectors.toList());
setGlobalOptions(configuration, stats); setGlobalOptions(configuration, stats);
@ -181,19 +180,18 @@ public abstract class RocksDBColumnarKeyValueStorage implements SegmentedKeyValu
* options to apply to the corresponding Column Family * options to apply to the corresponding Column Family
* *
* @param segment the segment identifier * @param segment the segment identifier
* @param configuration RocksDB configuration
* @return a column family descriptor * @return a column family descriptor
*/ */
private ColumnFamilyDescriptor createColumnDescriptor( private ColumnFamilyDescriptor createColumnDescriptor(
final SegmentIdentifier segment, final RocksDBConfiguration configuration) { final SegmentIdentifier segment) {
BlockBasedTableConfig basedTableConfig = createBlockBasedTableConfig(segment, configuration); PlainTableConfig plainTableConfig = createPlainTableConfig();
final var options = final var options =
new ColumnFamilyOptions() new ColumnFamilyOptions()
.setTtl(0) .setTtl(0)
.setCompressionType(CompressionType.LZ4_COMPRESSION) .setCompressionType(CompressionType.LZ4_COMPRESSION)
.setTableFormatConfig(basedTableConfig); .setTableFormatConfig(plainTableConfig);
if (segment.containsStaticData()) { if (segment.containsStaticData()) {
options options
@ -206,30 +204,16 @@ public abstract class RocksDBColumnarKeyValueStorage implements SegmentedKeyValu
return new ColumnFamilyDescriptor(segment.getId(), options); return new ColumnFamilyDescriptor(segment.getId(), options);
} }
/*** private PlainTableConfig createPlainTableConfig() {
* Create a Block Base Table configuration for each segment, depending on the configuration in place return new PlainTableConfig()
* and the segment itself .setBloomBitsPerKey(10) // Optional: can use bloom filters to speed up key lookups
* .setHashTableRatio(0.75) // Default hash table ratio
* @param segment The segment related to the column family .setIndexSparseness(16) // Adjust the density of the index
* @param config RocksDB configuration .setEncodingType(PlainTableConfig.DEFAULT_ENCODING_TYPE) // Use prefix encoding for efficient memory use
* @return Block Base Table configuration .setFullScanMode(false); // Optimized for in-memory full scan
*/
private BlockBasedTableConfig createBlockBasedTableConfig(
final SegmentIdentifier segment, final RocksDBConfiguration config) {
final LRUCache cache =
new LRUCache(
config.isHighSpec() && segment.isEligibleToHighSpecFlag()
? ROCKSDB_BLOCKCACHE_SIZE_HIGH_SPEC
: config.getCacheCapacity());
return new BlockBasedTableConfig()
.setFormatVersion(ROCKSDB_FORMAT_VERSION)
.setBlockCache(cache)
.setFilterPolicy(new BloomFilter(10, false))
.setPartitionFilters(true)
.setCacheIndexAndFilterBlocks(false)
.setBlockSize(ROCKSDB_BLOCK_SIZE);
} }
/*** /***
* Set Global options (DBOptions) * Set Global options (DBOptions)
* *

Loading…
Cancel
Save