Database metadata refactor (#6555)

Signed-off-by: Fabio Di Fabio <fabio.difabio@consensys.net>
Co-authored-by: Gabriel Fukushima <gabrielfukushima@gmail.com>
pull/6616/head
Fabio Di Fabio 9 months ago committed by GitHub
parent 3538c55a37
commit e0bedff962
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      CHANGELOG.md
  2. 2
      besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java
  3. 2
      besu/src/main/java/org/hyperledger/besu/cli/options/stable/DataStorageOptions.java
  4. 135
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/RevertMetadataSubCommand.java
  5. 3
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/StorageSubCommand.java
  6. 2
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java
  7. 2
      besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java
  8. 5
      besu/src/main/java/org/hyperledger/besu/services/BesuConfigurationImpl.java
  9. 2
      besu/src/test/java/org/hyperledger/besu/cli/BesuCommandTest.java
  10. 2
      besu/src/test/java/org/hyperledger/besu/cli/options/stable/DataStorageOptionsTest.java
  11. 2
      besu/src/test/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogHelperTest.java
  12. 2
      besu/src/test/java/org/hyperledger/besu/controller/BesuControllerBuilderTest.java
  13. 2
      besu/src/test/java/org/hyperledger/besu/services/TraceServiceImplTest.java
  14. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/graphql/AbstractEthGraphQLHttpServiceTest.java
  15. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/AbstractJsonRpcHttpServiceTest.java
  16. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/bonsai/DebugTraceJsonRpcHttpBySpecTest.java
  17. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/bonsai/EthByzantiumJsonRpcHttpBySpecTest.java
  18. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/bonsai/TraceJsonRpcHttpBySpecTest.java
  19. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/forest/DebugTraceJsonRpcHttpBySpecTest.java
  20. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/forest/EthByzantiumJsonRpcHttpBySpecTest.java
  21. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/forest/TraceJsonRpcHttpBySpecTest.java
  22. 2
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/filter/EthJsonRpcHttpServiceTest.java
  23. 44
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java
  24. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueStorageProvider.java
  25. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/storage/BonsaiWorldStateKeyValueStorage.java
  26. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/common/GenesisWorldStateProvider.java
  27. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/forest/storage/ForestWorldStateKeyValueStorage.java
  28. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java
  29. 41
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageFormat.java
  30. 1
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorage.java
  31. 2
      ethereum/core/src/test-support/java/org/hyperledger/besu/ethereum/core/BlockchainSetupUtil.java
  32. 6
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/AbstractIsolationTests.java
  33. 2
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/storage/BonsaiWorldStateKeyValueStorageTest.java
  34. 2
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/storage/flat/FlatDbStrategyProviderTest.java
  35. 2
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/trielog/TrieLogFactoryTests.java
  36. 2
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java
  37. 2
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java
  38. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/EthProtocolManagerTest.java
  39. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/EthProtocolManagerTestUtil.java
  40. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/ethtaskutils/AbstractMessageTaskTest.java
  41. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/manager/task/SnapProtocolManagerTestUtil.java
  42. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/AbstractBlockPropagationManagerTest.java
  43. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/BonsaiBlockPropagationManagerTest.java
  44. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/ChainHeadTrackerTest.java
  45. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/DownloadHeadersStepTest.java
  46. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/ForestBlockPropagationManagerTest.java
  47. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/RangeHeadersFetcherTest.java
  48. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/checkpointsync/CheckPointSyncChainDownloaderTest.java
  49. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/DownloadReceiptsStepTest.java
  50. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncActionsTest.java
  51. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncChainDownloaderTest.java
  52. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloaderTest.java
  53. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/PivotBlockConfirmerTest.java
  54. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/PivotBlockRetrieverTest.java
  55. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/worldstate/FastWorldDownloadStateTest.java
  56. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fullsync/FullSyncChainDownloaderTest.java
  57. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fullsync/FullSyncChainDownloaderTotalTerminalDifficultyTest.java
  58. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fullsync/FullSyncDownloaderTest.java
  59. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fullsync/FullSyncTargetManagerTest.java
  60. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadStateTest.java
  61. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/StorageTrieNodeHealingRequestTest.java
  62. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/tasks/PersistBlockTaskTest.java
  63. 2
      plugin-api/build.gradle
  64. 7
      plugin-api/src/main/java/org/hyperledger/besu/plugin/services/BesuConfiguration.java
  65. 23
      plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/DataStorageFormat.java
  66. 8
      plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/SegmentIdentifier.java
  67. 1
      plugins/rocksdb/build.gradle
  68. 132
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValuePrivacyStorageFactory.java
  69. 281
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactory.java
  70. 78
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/BaseVersionedStorageFormat.java
  71. 248
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadata.java
  72. 84
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/PrivacyVersionedStorageFormat.java
  73. 43
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/VersionedStorageFormat.java
  74. 70
      plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValuePrivacyStorageFactoryTest.java
  75. 162
      plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/RocksDBKeyValueStorageFactoryTest.java
  76. 91
      plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/Utils.java
  77. 55
      plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/configuration/DatabaseMetadataTest.java
  78. 97
      plugins/rocksdb/src/test/java/org/hyperledger/besu/plugin/services/storage/rocksdb/segmented/RocksDBColumnarKeyValueStorageTest.java
  79. 507
      testutil/src/main/java/org/hyperledger/besu/kvstore/AbstractKeyValueStorageTest.java

@ -3,11 +3,13 @@
## 24.2.1-SNAPSHOT
### Breaking Changes
- RocksDB database metadata format has changed to be more expressive, the migration of an existing metadata file to the new format is automatic at startup. Before performing a downgrade to a previous version it is mandatory to revert to the original format using the subcommand `besu --data-path=/path/to/besu/datadir storage revert-metadata v2-to-v1`.
### Deprecations
### Additions and Improvements
- Extend `Blockchain` service [#6592](https://github.com/hyperledger/besu/pull/6592)
- RocksDB database metadata refactoring [#6555](https://github.com/hyperledger/besu/pull/6555)
### Bug fixes

@ -141,7 +141,6 @@ import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.evm.precompile.AbstractAltBnPrecompiledContract;
import org.hyperledger.besu.evm.precompile.BigIntegerModularExponentiationPrecompiledContract;
import org.hyperledger.besu.evm.precompile.KZGPointEvalPrecompiledContract;
@ -172,6 +171,7 @@ import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.plugin.services.txselection.PluginTransactionSelectorFactory;

@ -25,8 +25,8 @@ import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.
import org.hyperledger.besu.cli.options.CLIOptions;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.List;

@ -0,0 +1,135 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.subcommands.storage;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.OptionalInt;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine;
import picocli.CommandLine.Command;
import picocli.CommandLine.ParentCommand;
/** The revert metadata to v1 subcommand. */
@Command(
name = "revert-metadata",
description = "Revert database metadata to previous format",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
subcommands = RevertMetadataSubCommand.v2ToV1.class)
public class RevertMetadataSubCommand implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(RevertMetadataSubCommand.class);
private static final String METADATA_FILENAME = "DATABASE_METADATA.json";
private static final ObjectMapper MAPPER =
new ObjectMapper()
.registerModule(new Jdk8Module())
.setSerializationInclusion(JsonInclude.Include.NON_ABSENT)
.enable(SerializationFeature.INDENT_OUTPUT);
@SuppressWarnings("unused")
@ParentCommand
private StorageSubCommand parentCommand;
@SuppressWarnings("unused")
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
@Override
public void run() {
spec.commandLine().usage(System.out);
}
@Command(
name = "v2-to-v1",
description = "Revert a database metadata v2 format to v1 format",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class)
static class v2ToV1 implements Runnable {
@SuppressWarnings("unused")
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
@SuppressWarnings("unused")
@ParentCommand
private RevertMetadataSubCommand parentCommand;
@Override
public void run() {
final Path dataDir = parentCommand.parentCommand.besuCommand.dataDir();
final File dbMetadata = dataDir.resolve(METADATA_FILENAME).toFile();
if (!dbMetadata.exists()) {
String errMsg =
String.format(
"Could not find database metadata file %s, check your data dir %s",
dbMetadata, dataDir);
LOG.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
try {
final var root = MAPPER.readTree(dbMetadata);
if (!root.has("v2")) {
String errMsg =
String.format("Database metadata file %s is not in v2 format", dbMetadata);
LOG.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
final var v2Obj = root.get("v2");
if (!v2Obj.has("format")) {
String errMsg =
String.format(
"Database metadata file %s is malformed, \"format\" field not found", dbMetadata);
LOG.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
final var formatField = v2Obj.get("format").asText();
final OptionalInt maybePrivacyVersion =
v2Obj.has("privacyVersion")
? OptionalInt.of(v2Obj.get("privacyVersion").asInt())
: OptionalInt.empty();
final DataStorageFormat dataStorageFormat = DataStorageFormat.valueOf(formatField);
final int v1Version =
switch (dataStorageFormat) {
case FOREST -> 1;
case BONSAI -> 2;
};
@JsonSerialize
record V1(int version, OptionalInt privacyVersion) {}
MAPPER.writeValue(dbMetadata, new V1(v1Version, maybePrivacyVersion));
LOG.info("Successfully reverted database metadata from v2 to v1 in {}", dbMetadata);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
}

@ -48,7 +48,8 @@ import picocli.CommandLine.Spec;
subcommands = {
StorageSubCommand.RevertVariablesStorage.class,
RocksDbSubCommand.class,
TrieLogSubCommand.class
TrieLogSubCommand.class,
RevertMetadataSubCommand.class
})
public class StorageSubCommand implements Runnable {

@ -28,7 +28,7 @@ import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.io.IOException;
import java.io.PrintWriter;

@ -91,7 +91,6 @@ import org.hyperledger.besu.ethereum.trie.forest.pruner.MarkSweepPruner;
import org.hyperledger.besu.ethereum.trie.forest.pruner.Pruner;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
@ -99,6 +98,7 @@ import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.permissioning.NodeMessagePermissioningProvider;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.txselection.PluginTransactionSelectorFactory;
import org.hyperledger.besu.plugin.services.txvalidator.PluginTransactionValidatorFactory;

@ -18,6 +18,7 @@ import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.nio.file.Path;
@ -58,8 +59,8 @@ public class BesuConfigurationImpl implements BesuConfiguration {
}
@Override
public int getDatabaseVersion() {
return dataStorageConfiguration.getDataStorageFormat().getDatabaseVersion();
public DataStorageFormat getDatabaseFormat() {
return dataStorageConfiguration.getDataStorageFormat();
}
@Override

@ -32,8 +32,8 @@ import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfigura
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.GOERLI_DISCOVERY_URL;
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.MAINNET_BOOTSTRAP_NODES;
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.MAINNET_DISCOVERY_URL;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageFormat.BONSAI;
import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI;
import static org.junit.Assume.assumeThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.contains;

@ -20,8 +20,8 @@ import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.
import org.hyperledger.besu.cli.options.AbstractCLIOptionsTest;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.Test;

@ -19,7 +19,7 @@ import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageFormat.BONSAI;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;

@ -48,7 +48,6 @@ import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValu
import org.hyperledger.besu.ethereum.trie.bonsai.worldview.BonsaiWorldState;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
@ -56,6 +55,7 @@ import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import java.math.BigInteger;

@ -31,7 +31,6 @@ import org.hyperledger.besu.ethereum.api.query.BlockchainQueries;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.evm.log.Log;
import org.hyperledger.besu.evm.worldstate.WorldView;
@ -40,6 +39,7 @@ import org.hyperledger.besu.plugin.data.BlockHeader;
import org.hyperledger.besu.plugin.data.BlockTraceResult;
import org.hyperledger.besu.plugin.data.TransactionTraceResult;
import org.hyperledger.besu.plugin.services.TraceService;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.tracer.BlockAwareOperationTracer;
import java.util.HashSet;

@ -35,8 +35,8 @@ import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.Capability;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.data.SyncStatus;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.nio.file.Path;
import java.util.Collections;

@ -45,10 +45,10 @@ import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.p2p.network.P2PNetwork;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.Capability;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.nat.NatService;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.testutil.BlockTestUtil.ChainResources;
import java.math.BigInteger;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.bonsai;
import org.hyperledger.besu.ethereum.api.jsonrpc.AbstractJsonRpcHttpBySpecTest;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeEach;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.bonsai;
import org.hyperledger.besu.ethereum.api.jsonrpc.AbstractJsonRpcHttpBySpecTest;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeEach;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.bonsai;
import org.hyperledger.besu.ethereum.api.jsonrpc.AbstractJsonRpcHttpBySpecTest;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeEach;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.forest;
import org.hyperledger.besu.ethereum.api.jsonrpc.AbstractJsonRpcHttpBySpecTest;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeEach;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.forest;
import org.hyperledger.besu.ethereum.api.jsonrpc.AbstractJsonRpcHttpBySpecTest;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeEach;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.forest;
import org.hyperledger.besu.ethereum.api.jsonrpc.AbstractJsonRpcHttpBySpecTest;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeEach;

@ -21,7 +21,7 @@ import org.hyperledger.besu.ethereum.api.jsonrpc.AbstractJsonRpcHttpServiceTest;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.io.IOException;

@ -14,24 +14,27 @@
*/
package org.hyperledger.besu.ethereum.storage.keyvalue;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.FOREST;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import java.nio.charset.StandardCharsets;
import org.bouncycastle.util.Arrays;
import java.util.EnumSet;
public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
DEFAULT("default".getBytes(StandardCharsets.UTF_8)),
BLOCKCHAIN(new byte[] {1}, true, true),
WORLD_STATE(new byte[] {2}, new int[] {0, 1}, false, true, false),
WORLD_STATE(new byte[] {2}, EnumSet.of(FOREST), false, true, false),
PRIVATE_TRANSACTIONS(new byte[] {3}),
PRIVATE_STATE(new byte[] {4}),
PRUNING_STATE(new byte[] {5}, new int[] {0, 1}),
ACCOUNT_INFO_STATE(new byte[] {6}, new int[] {2}, false, true, false),
CODE_STORAGE(new byte[] {7}, new int[] {2}),
ACCOUNT_STORAGE_STORAGE(new byte[] {8}, new int[] {2}, false, true, false),
TRIE_BRANCH_STORAGE(new byte[] {9}, new int[] {2}, false, true, false),
TRIE_LOG_STORAGE(new byte[] {10}, new int[] {2}, true, false, true),
PRUNING_STATE(new byte[] {5}, EnumSet.of(FOREST)),
ACCOUNT_INFO_STATE(new byte[] {6}, EnumSet.of(BONSAI), false, true, false),
CODE_STORAGE(new byte[] {7}, EnumSet.of(BONSAI)),
ACCOUNT_STORAGE_STORAGE(new byte[] {8}, EnumSet.of(BONSAI), false, true, false),
TRIE_BRANCH_STORAGE(new byte[] {9}, EnumSet.of(BONSAI), false, true, false),
TRIE_LOG_STORAGE(new byte[] {10}, EnumSet.of(BONSAI), true, false, true),
VARIABLES(new byte[] {11}), // formerly GOQUORUM_PRIVATE_WORLD_STATE
// previously supported GoQuorum private states
@ -46,32 +49,37 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
CHAIN_PRUNER_STATE(new byte[] {18});
private final byte[] id;
private final int[] versionList;
private final EnumSet<DataStorageFormat> formats;
private final boolean containsStaticData;
private final boolean eligibleToHighSpecFlag;
private final boolean staticDataGarbageCollectionEnabled;
KeyValueSegmentIdentifier(final byte[] id) {
this(id, new int[] {0, 1, 2});
this(id, EnumSet.allOf(DataStorageFormat.class));
}
KeyValueSegmentIdentifier(
final byte[] id, final boolean containsStaticData, final boolean eligibleToHighSpecFlag) {
this(id, new int[] {0, 1, 2}, containsStaticData, eligibleToHighSpecFlag, false);
this(
id,
EnumSet.allOf(DataStorageFormat.class),
containsStaticData,
eligibleToHighSpecFlag,
false);
}
KeyValueSegmentIdentifier(final byte[] id, final int[] versionList) {
this(id, versionList, false, false, false);
KeyValueSegmentIdentifier(final byte[] id, final EnumSet<DataStorageFormat> formats) {
this(id, formats, false, false, false);
}
KeyValueSegmentIdentifier(
final byte[] id,
final int[] versionList,
final EnumSet<DataStorageFormat> formats,
final boolean containsStaticData,
final boolean eligibleToHighSpecFlag,
final boolean staticDataGarbageCollectionEnabled) {
this.id = id;
this.versionList = versionList;
this.formats = formats;
this.containsStaticData = containsStaticData;
this.eligibleToHighSpecFlag = eligibleToHighSpecFlag;
this.staticDataGarbageCollectionEnabled = staticDataGarbageCollectionEnabled;
@ -103,7 +111,7 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
}
@Override
public boolean includeInDatabaseVersion(final int version) {
return Arrays.contains(versionList, version);
public boolean includeInDatabaseFormat(final DataStorageFormat format) {
return formats.contains(format);
}
}

@ -22,10 +22,10 @@ import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;

@ -27,12 +27,12 @@ import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.flat.FlatDbStrategy;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.flat.FlatDbStrategyProvider;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.evm.account.AccountStorageEntry;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;

@ -27,9 +27,9 @@ import org.hyperledger.besu.ethereum.trie.bonsai.worldview.BonsaiWorldState;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.forest.worldview.ForestMutableWorldState;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage;

@ -16,9 +16,9 @@ package org.hyperledger.besu.ethereum.trie.forest.storage;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
import org.hyperledger.besu.util.Subscribers;

@ -16,6 +16,8 @@
package org.hyperledger.besu.ethereum.worldstate;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.immutables.value.Value;
@Value.Immutable

@ -1,41 +0,0 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.worldstate;
public enum DataStorageFormat {
FOREST(1), // Original format. Store all tries
BONSAI(2); // New format. Store one trie, and trie logs to roll forward and backward.
private final int databaseVersion;
DataStorageFormat(final int databaseVersion) {
this.databaseVersion = databaseVersion;
}
public int getDatabaseVersion() {
return databaseVersion;
}
public static String getName(final int databaseVersion) {
for (DataStorageFormat format : DataStorageFormat.values()) {
if (format.getDatabaseVersion() == databaseVersion) {
return format.name();
}
}
return "Unknown";
}
}

@ -15,6 +15,7 @@
package org.hyperledger.besu.ethereum.worldstate;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.Collection;
import java.util.Collections;

@ -36,10 +36,10 @@ import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSpec;
import org.hyperledger.besu.ethereum.mainnet.ScheduleBasedBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.util.RawBlockIterator;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.testutil.BlockTestUtil;
import org.hyperledger.besu.testutil.BlockTestUtil.ChainResources;

@ -72,6 +72,7 @@ import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBFactoryConfiguration;
@ -189,7 +190,6 @@ public abstract class AbstractIsolationTests {
8388608 /*CACHE_CAPACITY*/,
false),
Arrays.asList(KeyValueSegmentIdentifier.values()),
2,
RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS))
.withCommonConfiguration(
new BesuConfiguration() {
@ -205,8 +205,8 @@ public abstract class AbstractIsolationTests {
}
@Override
public int getDatabaseVersion() {
return 2;
public DataStorageFormat getDatabaseFormat() {
return DataStorageFormat.BONSAI;
}
@Override

@ -38,11 +38,11 @@ import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.StorageEntriesCollector;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;

@ -21,10 +21,10 @@ import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage;

@ -24,8 +24,8 @@ import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockHeaderTestFixture;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.trielogs.TrieLog;
import org.hyperledger.besu.plugin.services.trielogs.TrieLogFactory;

@ -21,8 +21,8 @@ import org.hyperledger.besu.ethereum.eth.sync.ChainDownloader;
import org.hyperledger.besu.ethereum.eth.sync.TrailingPeerRequirements;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.StalledDownloadException;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.tasks.TaskCollection;
import org.hyperledger.besu.util.ExceptionUtils;

@ -28,10 +28,10 @@ import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.AccountRangeDataR
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.tasks.InMemoryTasksPriorityQueues;
import java.time.Clock;

@ -71,10 +71,10 @@ import org.hyperledger.besu.ethereum.p2p.rlpx.wire.DefaultMessage;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.MessageData;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.RawMessage;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.messages.DisconnectMessage.DisconnectReason;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.testutil.TestClock;
import java.math.BigInteger;

@ -35,9 +35,9 @@ import org.hyperledger.besu.ethereum.forkid.ForkIdManager;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.DefaultMessage;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.MessageData;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.testutil.DeterministicEthScheduler;
import org.hyperledger.besu.testutil.DeterministicEthScheduler.TimeoutPolicy;
import org.hyperledger.besu.testutil.TestClock;

@ -42,9 +42,9 @@ import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolFactory;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.testutil.DeterministicEthScheduler;
import org.hyperledger.besu.testutil.TestClock;

@ -20,8 +20,8 @@ import org.hyperledger.besu.ethereum.eth.manager.EthPeers;
import org.hyperledger.besu.ethereum.eth.manager.EthProtocolManager;
import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer;
import org.hyperledger.besu.ethereum.eth.manager.snap.SnapProtocolManager;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.Collections;

@ -58,9 +58,9 @@ import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSpec;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.testutil.TestClock;
import java.util.Collections;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.eth.sync;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;

@ -29,9 +29,9 @@ import org.hyperledger.besu.ethereum.eth.manager.EthProtocolManagerTestUtil;
import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer;
import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer.Responder;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.stream.Stream;

@ -30,8 +30,8 @@ import org.hyperledger.besu.ethereum.eth.sync.range.RangeHeaders;
import org.hyperledger.besu.ethereum.eth.sync.range.SyncTargetRange;
import org.hyperledger.besu.ethereum.mainnet.HeaderValidationMode;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.ArrayList;
import java.util.Collections;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.eth.sync;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockchainSetupUtil;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;

@ -33,9 +33,9 @@ import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncState;
import org.hyperledger.besu.ethereum.eth.sync.range.RangeHeadersFetcher;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.List;
import java.util.concurrent.CompletableFuture;

@ -36,9 +36,9 @@ import org.hyperledger.besu.ethereum.eth.sync.fastsync.checkpoint.Checkpoint;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.checkpoint.ImmutableCheckpoint;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;

@ -31,8 +31,8 @@ import org.hyperledger.besu.ethereum.eth.manager.EthProtocolManager;
import org.hyperledger.besu.ethereum.eth.manager.EthProtocolManagerTestUtil;
import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.List;
import java.util.concurrent.CompletableFuture;

@ -40,10 +40,10 @@ import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.ArrayList;
import java.util.List;

@ -35,9 +35,9 @@ import org.hyperledger.besu.ethereum.eth.sync.ChainDownloader;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.locks.LockSupport;

@ -34,8 +34,8 @@ import org.hyperledger.besu.ethereum.eth.sync.fastsync.worldstate.FastWorldState
import org.hyperledger.besu.ethereum.eth.sync.fastsync.worldstate.NodeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.StalledDownloadException;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.tasks.TaskCollection;
import java.nio.file.Path;

@ -32,9 +32,9 @@ import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer.Responder;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.PivotBlockConfirmer.ContestedPivotBlockException;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;

@ -34,9 +34,9 @@ import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer.Responder;
import org.hyperledger.besu.ethereum.eth.peervalidation.PeerValidator;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.util.ExceptionUtils;
import java.util.Optional;

@ -29,9 +29,9 @@ import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloadProce
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import org.hyperledger.besu.services.tasks.InMemoryTasksPriorityQueues;
import org.hyperledger.besu.testutil.TestClock;

@ -42,9 +42,9 @@ import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.MessageData;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.messages.DisconnectMessage.DisconnectReason;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.ArrayList;
import java.util.List;

@ -31,9 +31,9 @@ import org.hyperledger.besu.ethereum.eth.sync.ChainDownloader;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Stream;

@ -29,9 +29,9 @@ import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.TrailingPeerRequirements;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.stream.Stream;

@ -35,9 +35,9 @@ import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncTarget;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;

@ -41,9 +41,9 @@ import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloadProce
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import org.hyperledger.besu.services.tasks.InMemoryTasksPriorityQueues;
import org.hyperledger.besu.testutil.TestClock;

@ -25,10 +25,10 @@ import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import java.util.List;

@ -29,9 +29,9 @@ import org.hyperledger.besu.ethereum.eth.manager.EthProtocolManagerTestUtil;
import org.hyperledger.besu.ethereum.eth.sync.tasks.exceptions.InvalidBlockException;
import org.hyperledger.besu.ethereum.mainnet.HeaderValidationMode;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.Arrays;
import java.util.Collections;

@ -69,7 +69,7 @@ Calculated : ${currentHash}
tasks.register('checkAPIChanges', FileStateChecker) {
description = "Checks that the API for the Plugin-API project does not change without deliberate thought"
files = sourceSets.main.allJava.files
knownHash = 'Pi7Veo9W9kmjDJJNB89UTUXbYyRmoN6osK/tD163h3E='
knownHash = 'jvIsInEUQ/NaxWCGkQIb72tuZsx5288Ownn2F6rsvjQ='
}
check.dependsOn('checkAPIChanges')

@ -16,6 +16,7 @@ package org.hyperledger.besu.plugin.services;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.plugin.Unstable;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.nio.file.Path;
@ -37,12 +38,12 @@ public interface BesuConfiguration extends BesuService {
Path getDataPath();
/**
* Database version. This sets the list of segmentIdentifiers that should be initialized.
* Database format. This sets the list of segmentIdentifiers that should be initialized.
*
* @return Database version.
* @return Database format.
*/
@Unstable
int getDatabaseVersion();
DataStorageFormat getDatabaseFormat();
/**
* The runtime value of the min gas price

@ -0,0 +1,23 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.plugin.services.storage;
/** Supported database storage format */
public enum DataStorageFormat {
/** Original format. Store all tries */
FOREST,
/** New format. Store one trie, and trie logs to roll forward and backward */
BONSAI;
}

@ -39,12 +39,12 @@ public interface SegmentIdentifier {
/**
* Not all segments are in all DB versions. This queries the segment to see if it is in the DB
* version.
* format.
*
* @param version Version of the DB
* @return true if the segment is in that DB version
* @param format Version of the DB
* @return true if the segment is in that DB format
*/
default boolean includeInDatabaseVersion(final int version) {
default boolean includeInDatabaseFormat(final DataStorageFormat format) {
return true;
}

@ -41,6 +41,7 @@ dependencies {
implementation project(':util')
implementation 'com.fasterxml.jackson.core:jackson-databind'
implementation 'com.fasterxml.jackson.datatype:jackson-datatype-jdk8'
implementation 'com.google.guava:guava'
implementation 'info.picocli:picocli'
implementation 'io.opentelemetry:opentelemetry-api'

@ -22,11 +22,15 @@ import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactor
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.PrivacyVersionedStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.VersionedStorageFormat;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.EnumSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import org.slf4j.Logger;
@ -39,12 +43,13 @@ import org.slf4j.LoggerFactory;
public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStorageFactory {
private static final Logger LOG =
LoggerFactory.getLogger(RocksDBKeyValuePrivacyStorageFactory.class);
private static final int DEFAULT_VERSION = 1;
private static final Set<Integer> SUPPORTED_VERSIONS = Set.of(1);
private static final Set<PrivacyVersionedStorageFormat> SUPPORTED_VERSIONS =
EnumSet.of(
PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES,
PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES);
private static final String PRIVATE_DATABASE_PATH = "private";
private final RocksDBKeyValueStorageFactory publicFactory;
private Integer databaseVersion;
private DatabaseMetadata databaseMetadata;
/**
* Instantiates a new RocksDb key value privacy storage factory.
@ -66,9 +71,9 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
final BesuConfiguration commonConfiguration,
final MetricsSystem metricsSystem)
throws StorageException {
if (databaseVersion == null) {
if (databaseMetadata == null) {
try {
databaseVersion = readDatabaseVersion(commonConfiguration);
databaseMetadata = readDatabaseMetadata(commonConfiguration);
} catch (final IOException e) {
throw new StorageException("Failed to retrieve the RocksDB database meta version", e);
}
@ -83,9 +88,9 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
final BesuConfiguration commonConfiguration,
final MetricsSystem metricsSystem)
throws StorageException {
if (databaseVersion == null) {
if (databaseMetadata == null) {
try {
databaseVersion = readDatabaseVersion(commonConfiguration);
databaseMetadata = readDatabaseMetadata(commonConfiguration);
} catch (final IOException e) {
throw new StorageException("Failed to retrieve the RocksDB database meta version", e);
}
@ -114,37 +119,118 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
* private database exists there may be a "privacyVersion" field in the metadata file otherwise
* use the default version
*/
private int readDatabaseVersion(final BesuConfiguration commonConfiguration) throws IOException {
private DatabaseMetadata readDatabaseMetadata(final BesuConfiguration commonConfiguration)
throws IOException {
final Path dataDir = commonConfiguration.getDataPath();
final boolean privacyDatabaseExists =
commonConfiguration.getStoragePath().resolve(PRIVATE_DATABASE_PATH).toFile().exists();
final int privacyDatabaseVersion;
if (privacyDatabaseExists) {
privacyDatabaseVersion = DatabaseMetadata.lookUpFrom(dataDir).maybePrivacyVersion().orElse(1);
LOG.info(
"Existing private database detected at {}. Version {}", dataDir, privacyDatabaseVersion);
final boolean privacyMetadataExists = DatabaseMetadata.isPresent(dataDir);
DatabaseMetadata privacyMetadata;
if (privacyDatabaseExists && !privacyMetadataExists) {
throw new StorageException(
"Privacy database exists but metadata file not found, without it there is no safe way to open the database");
}
if (privacyMetadataExists) {
final var existingPrivacyMetadata = DatabaseMetadata.lookUpFrom(dataDir);
final var maybeExistingPrivacyVersion =
existingPrivacyMetadata.getVersionedStorageFormat().getPrivacyVersion();
if (maybeExistingPrivacyVersion.isEmpty()) {
privacyMetadata = existingPrivacyMetadata.upgradeToPrivacy();
privacyMetadata.writeToDirectory(dataDir);
LOG.info(
"Upgraded existing database at {} to privacy database. Metadata {}",
dataDir,
existingPrivacyMetadata);
} else {
privacyMetadata = existingPrivacyMetadata;
final int existingPrivacyVersion = maybeExistingPrivacyVersion.getAsInt();
final var runtimeVersion =
PrivacyVersionedStorageFormat.defaultForNewDB(commonConfiguration.getDatabaseFormat());
if (existingPrivacyVersion > runtimeVersion.getPrivacyVersion().getAsInt()) {
final var maybeDowngradedMetadata =
handleVersionDowngrade(dataDir, privacyMetadata, runtimeVersion);
if (maybeDowngradedMetadata.isPresent()) {
privacyMetadata = maybeDowngradedMetadata.get();
privacyMetadata.writeToDirectory(dataDir);
}
} else if (existingPrivacyVersion < runtimeVersion.getPrivacyVersion().getAsInt()) {
final var maybeUpgradedMetadata =
handleVersionUpgrade(dataDir, privacyMetadata, runtimeVersion);
if (maybeUpgradedMetadata.isPresent()) {
privacyMetadata = maybeUpgradedMetadata.get();
privacyMetadata.writeToDirectory(dataDir);
}
} else {
LOG.info("Existing privacy database at {}. Metadata {}", dataDir, privacyMetadata);
}
}
} else {
privacyDatabaseVersion = DEFAULT_VERSION;
privacyMetadata = DatabaseMetadata.defaultForNewPrivateDb();
LOG.info(
"No existing private database detected at {}. Using version {}",
"No existing private database at {}. Using default metadata for new db {}",
dataDir,
privacyDatabaseVersion);
privacyMetadata);
Files.createDirectories(dataDir);
new DatabaseMetadata(publicFactory.getDefaultVersion(), privacyDatabaseVersion)
.writeToDirectory(dataDir);
privacyMetadata.writeToDirectory(dataDir);
}
if (!SUPPORTED_VERSIONS.contains(privacyDatabaseVersion)) {
final String message = "Unsupported RocksDB Metadata version of: " + privacyDatabaseVersion;
if (!SUPPORTED_VERSIONS.contains(privacyMetadata.getVersionedStorageFormat())) {
final String message = "Unsupported RocksDB Metadata version of: " + privacyMetadata;
LOG.error(message);
throw new StorageException(message);
}
return privacyDatabaseVersion;
return privacyMetadata;
}
private Optional<DatabaseMetadata> handleVersionDowngrade(
final Path dataDir,
final DatabaseMetadata existingPrivacyMetadata,
final VersionedStorageFormat runtimeVersion) {
// here we put the code, or the messages, to perform an automated, or manual, downgrade of the
// database, if supported, otherwise we just prevent Besu from starting since it will not
// recognize the newer version.
// In case we do an automated downgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// for the moment there are supported automated downgrades, so we just fail.
String error =
String.format(
"Database unsafe downgrade detect: DB at %s is %s with version %s but version %s is expected. "
+ "Please check your config and review release notes for supported downgrade procedures.",
dataDir,
existingPrivacyMetadata.getVersionedStorageFormat().getFormat().name(),
existingPrivacyMetadata.getVersionedStorageFormat().getVersion(),
runtimeVersion.getVersion());
throw new StorageException(error);
}
private Optional<DatabaseMetadata> handleVersionUpgrade(
final Path dataDir,
final DatabaseMetadata existingPrivacyMetadata,
final VersionedStorageFormat runtimeVersion) {
// here we put the code, or the messages, to perform an automated, or manual, upgrade of the
// database.
// In case we do an automated upgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// for the moment there are no planned automated upgrades, so we just fail.
String error =
String.format(
"Database unsafe upgrade detect: DB at %s is %s with version %s but version %s is expected. "
+ "Please check your config and review release notes for supported upgrade procedures.",
dataDir,
existingPrivacyMetadata.getVersionedStorageFormat().getFormat().name(),
existingPrivacyMetadata.getVersionedStorageFormat().getVersion(),
runtimeVersion.getVersion());
throw new StorageException(error);
}
@Override
public int getVersion() {
return databaseVersion;
return databaseMetadata.getVersionedStorageFormat().getPrivacyVersion().getAsInt();
}
}

@ -14,7 +14,9 @@
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.FOREST_WITH_VARIABLES;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.exception.StorageException;
@ -22,10 +24,12 @@ import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfigurationBuilder;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBFactoryConfiguration;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.VersionedStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.OptimisticRocksDBColumnarKeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.RocksDBColumnarKeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.TransactionDBRocksDBColumnarKeyValueStorage;
@ -34,8 +38,9 @@ import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageAdapter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@ -49,13 +54,11 @@ import org.slf4j.LoggerFactory;
public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
private static final Logger LOG = LoggerFactory.getLogger(RocksDBKeyValueStorageFactory.class);
private static final int DEFAULT_VERSION = 1;
private static final Set<Integer> SUPPORTED_VERSIONS = Set.of(1, 2);
private static final EnumSet<BaseVersionedStorageFormat> SUPPORTED_VERSIONED_FORMATS =
EnumSet.of(FOREST_WITH_VARIABLES, BONSAI_WITH_VARIABLES);
private static final String NAME = "rocksdb";
private final RocksDBMetricsFactory rocksDBMetricsFactory;
private final int defaultVersion;
private Integer databaseVersion;
private DatabaseMetadata databaseMetadata;
private RocksDBColumnarKeyValueStorage segmentedStorage;
private RocksDBConfiguration rocksDBConfiguration;
@ -69,19 +72,16 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
* @param configuration the configuration
* @param configuredSegments the segments
* @param ignorableSegments the ignorable segments
* @param defaultVersion the default version
* @param rocksDBMetricsFactory the rocks db metrics factory
*/
public RocksDBKeyValueStorageFactory(
final Supplier<RocksDBFactoryConfiguration> configuration,
final List<SegmentIdentifier> configuredSegments,
final List<SegmentIdentifier> ignorableSegments,
final int defaultVersion,
final RocksDBMetricsFactory rocksDBMetricsFactory) {
this.configuration = configuration;
this.configuredSegments = configuredSegments;
this.ignorableSegments = ignorableSegments;
this.defaultVersion = defaultVersion;
this.rocksDBMetricsFactory = rocksDBMetricsFactory;
}
@ -90,59 +90,13 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
*
* @param configuration the configuration
* @param configuredSegments the segments
* @param defaultVersion the default version
* @param rocksDBMetricsFactory the rocks db metrics factory
*/
public RocksDBKeyValueStorageFactory(
final Supplier<RocksDBFactoryConfiguration> configuration,
final List<SegmentIdentifier> configuredSegments,
final int defaultVersion,
final RocksDBMetricsFactory rocksDBMetricsFactory) {
this(configuration, configuredSegments, List.of(), defaultVersion, rocksDBMetricsFactory);
}
/**
* Instantiates a new Rocks db key value storage factory.
*
* @param configuration the configuration
* @param configuredSegments the segments
* @param ignorableSegments the ignorable segments
* @param rocksDBMetricsFactory the rocks db metrics factory
*/
public RocksDBKeyValueStorageFactory(
final Supplier<RocksDBFactoryConfiguration> configuration,
final List<SegmentIdentifier> configuredSegments,
final List<SegmentIdentifier> ignorableSegments,
final RocksDBMetricsFactory rocksDBMetricsFactory) {
this(
configuration,
configuredSegments,
ignorableSegments,
DEFAULT_VERSION,
rocksDBMetricsFactory);
}
/**
* Instantiates a new Rocks db key value storage factory.
*
* @param configuration the configuration
* @param configuredSegments the segments
* @param rocksDBMetricsFactory the rocks db metrics factory
*/
public RocksDBKeyValueStorageFactory(
final Supplier<RocksDBFactoryConfiguration> configuration,
final List<SegmentIdentifier> configuredSegments,
final RocksDBMetricsFactory rocksDBMetricsFactory) {
this(configuration, configuredSegments, List.of(), DEFAULT_VERSION, rocksDBMetricsFactory);
}
/**
* Gets default version.
*
* @return the default version
*/
int getDefaultVersion() {
return defaultVersion;
this(configuration, configuredSegments, List.of(), rocksDBMetricsFactory);
}
@Override
@ -166,8 +120,6 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
final BesuConfiguration commonConfiguration,
final MetricsSystem metricsSystem)
throws StorageException {
final boolean isForestStorageFormat =
DataStorageFormat.FOREST.getDatabaseVersion() == commonConfiguration.getDatabaseVersion();
if (requiresInit()) {
init(commonConfiguration);
}
@ -182,43 +134,43 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
.collect(Collectors.joining(", ")));
}
// It's probably a good idea for the creation logic to be entirely dependent on the database
// version. Introducing intermediate booleans that represent database properties and dispatching
// creation logic based on them is error-prone.
switch (databaseVersion) {
case 1, 2 -> {
if (segmentedStorage == null) {
final List<SegmentIdentifier> segmentsForVersion =
configuredSegments.stream()
.filter(segmentId -> segmentId.includeInDatabaseVersion(databaseVersion))
.collect(Collectors.toList());
if (isForestStorageFormat) {
LOG.debug("FOREST mode detected, using TransactionDB.");
segmentedStorage =
new TransactionDBRocksDBColumnarKeyValueStorage(
rocksDBConfiguration,
segmentsForVersion,
ignorableSegments,
metricsSystem,
rocksDBMetricsFactory);
} else {
LOG.debug("Using OptimisticTransactionDB.");
segmentedStorage =
new OptimisticRocksDBColumnarKeyValueStorage(
rocksDBConfiguration,
segmentsForVersion,
ignorableSegments,
metricsSystem,
rocksDBMetricsFactory);
}
if (segmentedStorage == null) {
final List<SegmentIdentifier> segmentsForFormat =
configuredSegments.stream()
.filter(
segmentId ->
segmentId.includeInDatabaseFormat(
databaseMetadata.getVersionedStorageFormat().getFormat()))
.toList();
// It's probably a good idea for the creation logic to be entirely dependent on the database
// version. Introducing intermediate booleans that represent database properties and
// dispatching
// creation logic based on them is error-prone.
switch (databaseMetadata.getVersionedStorageFormat().getFormat()) {
case FOREST -> {
LOG.debug("FOREST mode detected, using TransactionDB.");
segmentedStorage =
new TransactionDBRocksDBColumnarKeyValueStorage(
rocksDBConfiguration,
segmentsForFormat,
ignorableSegments,
metricsSystem,
rocksDBMetricsFactory);
}
case BONSAI -> {
LOG.debug("BONSAI mode detected, Using OptimisticTransactionDB.");
segmentedStorage =
new OptimisticRocksDBColumnarKeyValueStorage(
rocksDBConfiguration,
segmentsForFormat,
ignorableSegments,
metricsSystem,
rocksDBMetricsFactory);
}
return segmentedStorage;
}
default -> throw new IllegalStateException(
String.format(
"Developer error: A supported database version (%d) was detected but there is no associated creation logic.",
databaseVersion));
}
return segmentedStorage;
}
/**
@ -233,7 +185,7 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
private void init(final BesuConfiguration commonConfiguration) {
try {
databaseVersion = readDatabaseVersion(commonConfiguration);
databaseMetadata = readDatabaseMetadata(commonConfiguration);
} catch (final IOException e) {
final String message =
"Failed to retrieve the RocksDB database meta version: "
@ -251,45 +203,134 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
return segmentedStorage == null;
}
private int readDatabaseVersion(final BesuConfiguration commonConfiguration) throws IOException {
private DatabaseMetadata readDatabaseMetadata(final BesuConfiguration commonConfiguration)
throws IOException {
final Path dataDir = commonConfiguration.getDataPath();
final boolean databaseExists = commonConfiguration.getStoragePath().toFile().exists();
final boolean dataDirExists = dataDir.toFile().exists();
final int databaseVersion;
if (databaseExists) {
databaseVersion = DatabaseMetadata.lookUpFrom(dataDir).getVersion();
if (databaseVersion != commonConfiguration.getDatabaseVersion()) {
String error =
String.format(
"Mismatch: DB at %s is %s (Version %s) but config expects %s (Version %s). Please check your config.",
dataDir,
DataStorageFormat.getName(databaseVersion),
databaseVersion,
DataStorageFormat.getName(commonConfiguration.getDatabaseVersion()),
commonConfiguration.getDatabaseVersion());
throw new StorageException(error);
final boolean databaseExists = commonConfiguration.getStoragePath().toFile().exists();
final boolean metadataExists = DatabaseMetadata.isPresent(dataDir);
DatabaseMetadata metadata;
if (databaseExists && !metadataExists) {
throw new StorageException(
"Database exists but metadata file not found, without it there is no safe way to open the database");
}
if (metadataExists) {
metadata = DatabaseMetadata.lookUpFrom(dataDir);
if (!metadata
.getVersionedStorageFormat()
.getFormat()
.equals(commonConfiguration.getDatabaseFormat())) {
handleFormatMismatch(commonConfiguration, dataDir, metadata);
}
LOG.info(
"Existing database detected at {}. Version {}. Compacting database...",
dataDir,
databaseVersion);
final var runtimeVersion =
BaseVersionedStorageFormat.defaultForNewDB(commonConfiguration.getDatabaseFormat());
if (metadata.getVersionedStorageFormat().getVersion() > runtimeVersion.getVersion()) {
final var maybeDowngradedMetadata =
handleVersionDowngrade(dataDir, metadata, runtimeVersion);
if (maybeDowngradedMetadata.isPresent()) {
metadata = maybeDowngradedMetadata.get();
metadata.writeToDirectory(dataDir);
}
}
if (metadata.getVersionedStorageFormat().getVersion() < runtimeVersion.getVersion()) {
final var maybeUpgradedMetadata = handleVersionUpgrade(dataDir, metadata, runtimeVersion);
if (maybeUpgradedMetadata.isPresent()) {
metadata = maybeUpgradedMetadata.get();
metadata.writeToDirectory(dataDir);
}
}
LOG.info("Existing database at {}. Metadata {}. Processing WAL...", dataDir, metadata);
} else {
databaseVersion = commonConfiguration.getDatabaseVersion();
LOG.info("No existing database detected at {}. Using version {}", dataDir, databaseVersion);
metadata = DatabaseMetadata.defaultForNewDb(commonConfiguration.getDatabaseFormat());
LOG.info(
"No existing database at {}. Using default metadata for new db {}", dataDir, metadata);
if (!dataDirExists) {
Files.createDirectories(dataDir);
}
new DatabaseMetadata(databaseVersion).writeToDirectory(dataDir);
metadata.writeToDirectory(dataDir);
}
if (!SUPPORTED_VERSIONS.contains(databaseVersion)) {
final String message = "Unsupported RocksDB Metadata version of: " + databaseVersion;
if (!isSupportedVersionedFormat(metadata.getVersionedStorageFormat())) {
final String message = "Unsupported RocksDB metadata: " + metadata;
LOG.error(message);
throw new StorageException(message);
}
return databaseVersion;
return metadata;
}
private static void handleFormatMismatch(
final BesuConfiguration commonConfiguration,
final Path dataDir,
final DatabaseMetadata existingMetadata) {
String error =
String.format(
"Database format mismatch: DB at %s is %s but config expects %s. "
+ "Please check your config.",
dataDir,
existingMetadata.getVersionedStorageFormat().getFormat().name(),
commonConfiguration.getDatabaseFormat());
throw new StorageException(error);
}
private Optional<DatabaseMetadata> handleVersionDowngrade(
final Path dataDir,
final DatabaseMetadata existingMetadata,
final BaseVersionedStorageFormat runtimeVersion) {
// here we put the code, or the messages, to perform an automated, or manual, downgrade of the
// database, if supported, otherwise we just prevent Besu from starting since it will not
// recognize the newer version.
// In case we do an automated downgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// for the moment there are supported automated downgrades, so we just fail.
String error =
String.format(
"Database unsafe downgrade detect: DB at %s is %s with version %s but version %s is expected. "
+ "Please check your config and review release notes for supported downgrade procedures.",
dataDir,
existingMetadata.getVersionedStorageFormat().getFormat().name(),
existingMetadata.getVersionedStorageFormat().getVersion(),
runtimeVersion.getVersion());
throw new StorageException(error);
}
private Optional<DatabaseMetadata> handleVersionUpgrade(
final Path dataDir,
final DatabaseMetadata existingMetadata,
final BaseVersionedStorageFormat runtimeVersion) {
// here we put the code, or the messages, to perform an automated, or manual, upgrade of the
// database.
// In case we do an automated upgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// for the moment there are no planned automated upgrades, so we just fail.
String error =
String.format(
"Database unsafe downgrade detect: DB at %s is %s with version %s but version %s is expected. "
+ "Please check your config and review release notes for supported downgrade procedures.",
dataDir,
existingMetadata.getVersionedStorageFormat().getFormat().name(),
existingMetadata.getVersionedStorageFormat().getVersion(),
runtimeVersion.getVersion());
throw new StorageException(error);
}
private boolean isSupportedVersionedFormat(final VersionedStorageFormat versionedStorageFormat) {
return SUPPORTED_VERSIONED_FORMATS.stream()
.anyMatch(
vsf ->
vsf.getFormat().equals(versionedStorageFormat.getFormat())
&& vsf.getVersion() == versionedStorageFormat.getVersion());
}
@Override

@ -0,0 +1,78 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.OptionalInt;
/** Base versioned data storage format */
public enum BaseVersionedStorageFormat implements VersionedStorageFormat {
/** Original Forest version, not used since replace by FOREST_WITH_VARIABLES */
FOREST_ORIGINAL(DataStorageFormat.FOREST, 1),
/**
* Current Forest version, with blockchain variables in a dedicated column family, in order to
* make BlobDB more effective
*/
FOREST_WITH_VARIABLES(DataStorageFormat.FOREST, 2),
/** Original Bonsai version, not used since replace by BONSAI_WITH_VARIABLES */
BONSAI_ORIGINAL(DataStorageFormat.BONSAI, 1),
/**
* Current Bonsai version, with blockchain variables in a dedicated column family, in order to
* make BlobDB more effective
*/
BONSAI_WITH_VARIABLES(DataStorageFormat.BONSAI, 2);
private final DataStorageFormat format;
private final int version;
BaseVersionedStorageFormat(final DataStorageFormat format, final int version) {
this.format = format;
this.version = version;
}
/**
* Return the default version for new db for a specific format
*
* @param format data storage format
* @return the version to use for new db
*/
public static BaseVersionedStorageFormat defaultForNewDB(final DataStorageFormat format) {
return switch (format) {
case FOREST -> FOREST_WITH_VARIABLES;
case BONSAI -> BONSAI_WITH_VARIABLES;
};
}
@Override
public DataStorageFormat getFormat() {
return format;
}
@Override
public int getVersion() {
return version;
}
@Override
public OptionalInt getPrivacyVersion() {
return OptionalInt.empty();
}
@Override
public String toString() {
return "BaseVersionedStorageFormat{" + "format=" + format + ", version=" + version + '}';
}
}

@ -14,19 +14,23 @@
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Optional;
import java.util.Arrays;
import java.util.OptionalInt;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonGetter;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonSetter;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DatabindException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -35,79 +39,43 @@ public class DatabaseMetadata {
private static final Logger LOG = LoggerFactory.getLogger(DatabaseMetadata.class);
private static final String METADATA_FILENAME = "DATABASE_METADATA.json";
private static final ObjectMapper MAPPER = new ObjectMapper();
private final int version;
private Optional<Integer> privacyVersion;
/**
* Instantiates a new Database metadata.
*
* @param version the version
*/
@JsonCreator
public DatabaseMetadata(@JsonProperty("version") final int version) {
this(version, Optional.empty());
}
/**
* Instantiates a new Database metadata.
*
* @param version the version
* @param privacyVersion the privacy version
*/
public DatabaseMetadata(final int version, final Optional<Integer> privacyVersion) {
this.version = version;
this.privacyVersion = privacyVersion;
}
/**
* Instantiates a new Database metadata.
*
* @param version the version
* @param privacyVersion the privacy version
*/
public DatabaseMetadata(final int version, final int privacyVersion) {
this(version, Optional.of(privacyVersion));
private static final ObjectMapper MAPPER =
new ObjectMapper()
.registerModule(new Jdk8Module())
.setSerializationInclusion(JsonInclude.Include.NON_ABSENT)
.enable(SerializationFeature.INDENT_OUTPUT);
private final VersionedStorageFormat versionedStorageFormat;
private DatabaseMetadata(final VersionedStorageFormat versionedStorageFormat) {
this.versionedStorageFormat = versionedStorageFormat;
}
/**
* Gets version.
* Return the default metadata for new db for a specific format
*
* @return the version
* @param dataStorageFormat data storage format
* @return the metadata to use for new db
*/
public int getVersion() {
return version;
public static DatabaseMetadata defaultForNewDb(final DataStorageFormat dataStorageFormat) {
return new DatabaseMetadata(BaseVersionedStorageFormat.defaultForNewDB(dataStorageFormat));
}
/**
* Sets privacy version.
* Return the default metadata for new db when privacy feature is enabled
*
* @param privacyVersion the privacy version
* @return the metadata to use for new db
*/
@JsonSetter("privacyVersion")
public void setPrivacyVersion(final int privacyVersion) {
this.privacyVersion = Optional.of(privacyVersion);
public static DatabaseMetadata defaultForNewPrivateDb() {
return new DatabaseMetadata(PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
/**
* Gets privacy version.
* Return the version storage format contained in this metadata
*
* @return the privacy version
* @return version storage format
*/
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonGetter("privacyVersion")
public Integer getPrivacyVersion() {
return privacyVersion.orElse(null);
}
/**
* Maybe privacy version.
*
* @return the optional
*/
public Optional<Integer> maybePrivacyVersion() {
return privacyVersion;
public VersionedStorageFormat getVersionedStorageFormat() {
return versionedStorageFormat;
}
/**
@ -122,6 +90,17 @@ public class DatabaseMetadata {
return resolveDatabaseMetadata(getDefaultMetadataFile(dataDir));
}
/**
* Is the metadata file present in the specified data dir?
*
* @param dataDir the dir to search for the metadata file
* @return true is the metadata file exists, false otherwise
* @throws IOException if there is an error trying to access the metadata file
*/
public static boolean isPresent(final Path dataDir) throws IOException {
return getDefaultMetadataFile(dataDir).exists();
}
/**
* Write to directory.
*
@ -129,16 +108,17 @@ public class DatabaseMetadata {
* @throws IOException the io exception
*/
public void writeToDirectory(final Path dataDir) throws IOException {
try {
final DatabaseMetadata currentMetadata =
MAPPER.readValue(getDefaultMetadataFile(dataDir), DatabaseMetadata.class);
if (currentMetadata.maybePrivacyVersion().isPresent()) {
setPrivacyVersion(currentMetadata.getPrivacyVersion());
}
MAPPER.writeValue(getDefaultMetadataFile(dataDir), this);
} catch (FileNotFoundException fnfe) {
MAPPER.writeValue(getDefaultMetadataFile(dataDir), this);
}
writeToFile(getDefaultMetadataFile(dataDir));
}
private void writeToFile(final File file) throws IOException {
MAPPER.writeValue(
file,
new V2(
new MetadataV2(
versionedStorageFormat.getFormat(),
versionedStorageFormat.getVersion(),
versionedStorageFormat.getPrivacyVersion())));
}
private static File getDefaultMetadataFile(final Path dataDir) {
@ -147,15 +127,129 @@ public class DatabaseMetadata {
private static DatabaseMetadata resolveDatabaseMetadata(final File metadataFile)
throws IOException {
DatabaseMetadata databaseMetadata;
try {
databaseMetadata = MAPPER.readValue(metadataFile, DatabaseMetadata.class);
try {
return tryReadAndMigrateV1(metadataFile);
} catch (DatabindException dbe) {
return tryReadV2(metadataFile);
}
} catch (FileNotFoundException fnfe) {
databaseMetadata = new DatabaseMetadata(1, 1);
throw new StorageException(
"Database exists but metadata file "
+ metadataFile.toString()
+ " not found, without it there is no safe way to open the database",
fnfe);
} catch (JsonProcessingException jpe) {
throw new IllegalStateException(
String.format("Invalid metadata file %s", metadataFile.getAbsolutePath()), jpe);
}
return databaseMetadata;
}
private static DatabaseMetadata tryReadAndMigrateV1(final File metadataFile) throws IOException {
final V1 v1 = MAPPER.readValue(metadataFile, V1.class);
// when migrating from v1, this version will automatically migrate the db to the variables
// storage, so we use the `_WITH_VARIABLES` variants
final VersionedStorageFormat versionedStorageFormat;
if (v1.privacyVersion().isEmpty()) {
versionedStorageFormat =
switch (v1.version()) {
case 1 -> BaseVersionedStorageFormat.FOREST_WITH_VARIABLES;
case 2 -> BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES;
default -> throw new StorageException("Unsupported db version: " + v1.version());
};
} else {
versionedStorageFormat =
switch (v1.privacyVersion().getAsInt()) {
case 1 -> switch (v1.version()) {
case 1 -> PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES;
case 2 -> PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES;
default -> throw new StorageException("Unsupported db version: " + v1.version());
};
default -> throw new StorageException(
"Unsupported db privacy version: " + v1.privacyVersion().getAsInt());
};
}
final DatabaseMetadata metadataV2 = new DatabaseMetadata(versionedStorageFormat);
// writing the metadata will migrate to v2
metadataV2.writeToFile(metadataFile);
return metadataV2;
}
private static DatabaseMetadata tryReadV2(final File metadataFile) throws IOException {
final V2 v2 = MAPPER.readValue(metadataFile, V2.class);
return new DatabaseMetadata(fromV2(v2.v2));
}
private static VersionedStorageFormat fromV2(final MetadataV2 metadataV2) {
if (metadataV2.privacyVersion().isEmpty()) {
return Arrays.stream(BaseVersionedStorageFormat.values())
.filter(
vsf ->
vsf.getFormat().equals(metadataV2.format())
&& vsf.getVersion() == metadataV2.version())
.findFirst()
.orElseThrow(
() -> {
final String message = "Unsupported RocksDB metadata: " + metadataV2;
LOG.error(message);
throw new StorageException(message);
});
}
return Arrays.stream(PrivacyVersionedStorageFormat.values())
.filter(
vsf ->
vsf.getFormat().equals(metadataV2.format())
&& vsf.getVersion() == metadataV2.version()
&& vsf.getPrivacyVersion().equals(metadataV2.privacyVersion()))
.findFirst()
.orElseThrow(
() -> {
final String message = "Unsupported RocksDB metadata: " + metadataV2;
LOG.error(message);
throw new StorageException(message);
});
}
/**
* Update an existing base storage to support privacy feature
*
* @return the update metadata with the privacy support
*/
public DatabaseMetadata upgradeToPrivacy() {
return new DatabaseMetadata(
switch (versionedStorageFormat.getFormat()) {
case FOREST -> switch (versionedStorageFormat.getVersion()) {
case 1 -> PrivacyVersionedStorageFormat.FOREST_ORIGINAL;
case 2 -> PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES;
default -> throw new StorageException(
"Unsupported database with format FOREST and version "
+ versionedStorageFormat.getVersion());
};
case BONSAI -> switch (versionedStorageFormat.getVersion()) {
case 1 -> PrivacyVersionedStorageFormat.BONSAI_ORIGINAL;
case 2 -> PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES;
default -> throw new StorageException(
"Unsupported database with format BONSAI and version "
+ versionedStorageFormat.getVersion());
};
});
}
@Override
public String toString() {
return "versionedStorageFormat=" + versionedStorageFormat;
}
@JsonSerialize
@SuppressWarnings("unused")
private record V1(int version, OptionalInt privacyVersion) {}
@JsonSerialize
@SuppressWarnings("unused")
private record V2(MetadataV2 v2) {}
@JsonSerialize
@SuppressWarnings("unused")
private record MetadataV2(DataStorageFormat format, int version, OptionalInt privacyVersion) {}
}

@ -0,0 +1,84 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.OptionalInt;
/** Privacy enabled versioned data storage format */
public enum PrivacyVersionedStorageFormat implements VersionedStorageFormat {
/** Original Forest version, not used since replace by FOREST_WITH_VARIABLES */
FOREST_ORIGINAL(BaseVersionedStorageFormat.FOREST_ORIGINAL, 1),
/**
* Current Forest version, with blockchain variables in a dedicated column family, in order to
* make BlobDB more effective
*/
FOREST_WITH_VARIABLES(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES, 1),
/** Original Bonsai version, not used since replace by BONSAI_WITH_VARIABLES */
BONSAI_ORIGINAL(BaseVersionedStorageFormat.BONSAI_ORIGINAL, 1),
/**
* Current Bonsai version, with blockchain variables in a dedicated column family, in order to
* make BlobDB more effective
*/
BONSAI_WITH_VARIABLES(BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES, 1);
private final VersionedStorageFormat baseVersionedStorageFormat;
private final OptionalInt privacyVersion;
PrivacyVersionedStorageFormat(
final VersionedStorageFormat baseVersionedStorageFormat, final int privacyVersion) {
this.baseVersionedStorageFormat = baseVersionedStorageFormat;
this.privacyVersion = OptionalInt.of(privacyVersion);
}
/**
* Return the default version for new db for a specific format
*
* @param format data storage format
* @return the version to use for new db
*/
public static VersionedStorageFormat defaultForNewDB(final DataStorageFormat format) {
return switch (format) {
case FOREST -> FOREST_WITH_VARIABLES;
case BONSAI -> BONSAI_WITH_VARIABLES;
};
}
@Override
public DataStorageFormat getFormat() {
return baseVersionedStorageFormat.getFormat();
}
@Override
public int getVersion() {
return baseVersionedStorageFormat.getVersion();
}
@Override
public OptionalInt getPrivacyVersion() {
return privacyVersion;
}
@Override
public String toString() {
return "PrivateVersionedStorageFormat{"
+ "versionedStorageFormat="
+ baseVersionedStorageFormat
+ ", privacyVersion="
+ privacyVersion
+ '}';
}
}

@ -0,0 +1,43 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.OptionalInt;
/** Represent a specific version of a data storage format */
public interface VersionedStorageFormat {
/**
* Get the data storage format
*
* @return the data storage format
*/
DataStorageFormat getFormat();
/**
* Get the version of the data storage format
*
* @return the version of the data storage format
*/
int getVersion();
/**
* Get the version of the privacy db, in case the privacy feature is enabled, or empty otherwise
*
* @return the optional privacy version
*/
OptionalInt getPrivacyVersion();
}

@ -15,6 +15,7 @@
package org.hyperledger.besu.plugin.services.storage.rocksdb;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.FOREST;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.RocksDBColumnarKeyValueStorageTest.TestSegment;
import static org.mockito.Mockito.when;
@ -22,7 +23,9 @@ import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.PrivacyVersionedStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBFactoryConfiguration;
import java.nio.file.Files;
@ -37,9 +40,6 @@ import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class RocksDBKeyValuePrivacyStorageFactoryTest {
private static final int DEFAULT_VERSION = 1;
private static final int DEFAULT_PRIVACY_VERSION = 1;
@Mock private RocksDBFactoryConfiguration rocksDbConfiguration;
@Mock private BesuConfiguration commonConfiguration;
@TempDir private Path temporaryFolder;
@ -48,15 +48,16 @@ public class RocksDBKeyValuePrivacyStorageFactoryTest {
private final List<SegmentIdentifier> segments = List.of(TestSegment.DEFAULT, segment);
@Test
public void shouldDetectVersion1DatabaseIfNoMetadataFileFound() throws Exception {
public void shouldDetectVersion1MetadataIfPresent() throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
final Path tempPrivateDatabaseDir = tempDatabaseDir.resolve("private");
Files.createDirectories(tempPrivateDatabaseDir);
Files.createDirectories(tempDataDir);
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
when(commonConfiguration.getDatabaseVersion()).thenReturn(DEFAULT_VERSION);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
Utils.createDatabaseMetadataV1Privacy(
tempDataDir, PrivacyVersionedStorageFormat.FOREST_ORIGINAL);
final RocksDBKeyValuePrivacyStorageFactory storageFactory =
new RocksDBKeyValuePrivacyStorageFactory(
@ -66,23 +67,18 @@ public class RocksDBKeyValuePrivacyStorageFactoryTest {
RocksDBMetricsFactory.PRIVATE_ROCKS_DB_METRICS));
// Side effect is creation of the Metadata version file
storageFactory.create(segment, commonConfiguration, metricsSystem);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).maybePrivacyVersion()).isNotEmpty();
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersion()).isEqualTo(DEFAULT_VERSION);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).maybePrivacyVersion().get())
.isEqualTo(DEFAULT_VERSION);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
}
@Test
public void shouldCreateCorrectMetadataFileForLatestVersion() throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
when(commonConfiguration.getDatabaseVersion()).thenReturn(DEFAULT_VERSION);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
final RocksDBKeyValuePrivacyStorageFactory storageFactory =
new RocksDBKeyValuePrivacyStorageFactory(
@ -92,42 +88,44 @@ public class RocksDBKeyValuePrivacyStorageFactoryTest {
RocksDBMetricsFactory.PRIVATE_ROCKS_DB_METRICS));
// Side effect is creation of the Metadata version file
storageFactory.create(segment, commonConfiguration, metricsSystem);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).maybePrivacyVersion()).isNotEmpty();
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersion()).isEqualTo(DEFAULT_VERSION);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).maybePrivacyVersion().get())
.isEqualTo(DEFAULT_PRIVACY_VERSION);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
}
@Test
public void shouldUpdateCorrectMetadataFileForLatestVersion() throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
when(commonConfiguration.getDatabaseVersion()).thenReturn(DEFAULT_VERSION);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PRIVATE_ROCKS_DB_METRICS);
storageFactory.create(segment, commonConfiguration, metricsSystem);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).maybePrivacyVersion()).isEmpty();
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersion()).isEqualTo(DEFAULT_VERSION);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
storageFactory.close();
final RocksDBKeyValuePrivacyStorageFactory privacyStorageFactory =
new RocksDBKeyValuePrivacyStorageFactory(storageFactory);
privacyStorageFactory.create(segment, commonConfiguration, metricsSystem);
try (final var storage =
privacyStorageFactory.create(segment, commonConfiguration, metricsSystem)) {
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).maybePrivacyVersion()).isNotEmpty();
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
privacyStorageFactory.close();
}
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).maybePrivacyVersion().get())
.isEqualTo(DEFAULT_PRIVACY_VERSION);
private void mockCommonConfiguration(final Path tempDataDir, final Path tempDatabaseDir) {
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
when(commonConfiguration.getDatabaseFormat()).thenReturn(FOREST);
}
}

@ -17,24 +17,30 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatCode;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.assertj.core.api.Assertions.fail;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.FOREST;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBFactoryConfiguration;
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.RocksDBColumnarKeyValueStorageTest.TestSegment;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.DisabledOnOs;
import org.junit.jupiter.api.condition.OS;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.Mock;
@ -43,9 +49,6 @@ import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class RocksDBKeyValueStorageFactoryTest {
private static final String METADATA_FILENAME = "DATABASE_METADATA.json";
private static final int DEFAULT_VERSION = 1;
@Mock private RocksDBFactoryConfiguration rocksDbConfiguration;
@Mock private BesuConfiguration commonConfiguration;
@TempDir public Path temporaryFolder;
@ -54,80 +57,89 @@ public class RocksDBKeyValueStorageFactoryTest {
private final List<SegmentIdentifier> segments = List.of(TestSegment.DEFAULT, segment);
@Test
public void shouldCreateCorrectMetadataFileForLatestVersion() throws Exception {
public void shouldCreateCorrectMetadataFileForLatestVersionForNewDb() throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, FOREST);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
// Side effect is creation of the Metadata version file
storageFactory.create(segment, commonConfiguration, metricsSystem);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersion()).isEqualTo(DEFAULT_VERSION);
}
private void mockCommonConfiguration(final Path tempDataDir, final Path tempDatabaseDir) {
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
when(commonConfiguration.getDatabaseVersion()).thenReturn(DEFAULT_VERSION);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
// Side effect is creation of the Metadata version file
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
}
@Test
public void shouldDetectVersion1DatabaseIfNoMetadataFileFound() throws Exception {
public void shouldFailIfDbExistsAndNoMetadataFileFound() throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
Files.createDirectories(tempDatabaseDir);
Files.createDirectories(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, FOREST);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
storageFactory.create(segment, commonConfiguration, metricsSystem);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersion()).isEqualTo(DEFAULT_VERSION);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
fail("Must fail if db is present but metadata is not");
} catch (StorageException se) {
assertThat(se)
.hasMessage(
"Database exists but metadata file not found, without it there is no safe way to open the database");
}
}
@Test
public void shouldDetectCorrectVersionIfMetadataFileExists() throws Exception {
public void shouldDetectCorrectMetadataV1() throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
Files.createDirectories(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, BONSAI);
Utils.createDatabaseMetadataV1(tempDataDir, BONSAI);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
storageFactory.create(segment, commonConfiguration, metricsSystem);
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersion()).isEqualTo(DEFAULT_VERSION);
assertThat(storageFactory.isSegmentIsolationSupported()).isTrue();
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES);
assertThat(storageFactory.isSegmentIsolationSupported()).isTrue();
}
}
@Test
public void shouldDetectCorrectVersionInCaseOfRollback() throws Exception {
public void shouldFailInCaseOfUnmanagedRollback() throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
Files.createDirectories(tempDatabaseDir);
Files.createDirectories(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, BONSAI);
Utils.createDatabaseMetadataV1(tempDataDir, BONSAI);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, 2, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
storageFactory.create(segment, commonConfiguration, metricsSystem);
storageFactory.close();
Utils.createDatabaseMetadataV2(tempDataDir, BONSAI, 1);
final RocksDBKeyValueStorageFactory rolledbackStorageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, 1, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
rolledbackStorageFactory.create(segment, commonConfiguration, metricsSystem);
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
assertThatThrownBy(
() -> rolledbackStorageFactory.create(segment, commonConfiguration, metricsSystem))
.isInstanceOf(StorageException.class)
.hasMessageStartingWith("Database unsafe downgrade detect");
}
@Test
@ -136,9 +148,9 @@ public class RocksDBKeyValueStorageFactoryTest {
final Path tempDatabaseDir = temporaryFolder.resolve("db");
Files.createDirectories(tempDatabaseDir);
Files.createDirectories(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, FOREST);
new DatabaseMetadata(-1).writeToDirectory(tempDataDir);
Utils.createDatabaseMetadataV1(tempDataDir, 99);
assertThatThrownBy(
() ->
new RocksDBKeyValueStorageFactory(
@ -146,33 +158,23 @@ public class RocksDBKeyValueStorageFactoryTest {
segments,
RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS)
.create(segment, commonConfiguration, metricsSystem))
.isInstanceOf(StorageException.class);
.isInstanceOf(StorageException.class)
.hasMessageStartingWith("Unsupported db version");
}
@Test
public void shouldThrowExceptionWhenExistingDatabaseVersionDifferentFromConfig()
throws Exception {
public void shouldThrowExceptionWhenExistingDatabaseFormatDiffersFromConfig() throws Exception {
final int actualDatabaseVersion = DataStorageFormat.FOREST.getDatabaseVersion();
final int expectedDatabaseVersion = DataStorageFormat.BONSAI.getDatabaseVersion();
final DataStorageFormat actualDatabaseFormat = FOREST;
final DataStorageFormat expectedDatabaseFormat = BONSAI;
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
Files.createDirectories(tempDatabaseDir);
Files.createDirectories(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
when(commonConfiguration.getDatabaseVersion()).thenReturn(expectedDatabaseVersion);
new DatabaseMetadata(actualDatabaseVersion).writeToDirectory(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, BONSAI);
String exceptionMessage =
String.format(
"Mismatch: DB at %s is %s (Version %s) but config expects %s (Version %s). Please check your config.",
tempDataDir.toAbsolutePath(),
DataStorageFormat.getName(actualDatabaseVersion),
actualDatabaseVersion,
DataStorageFormat.getName(expectedDatabaseVersion),
expectedDatabaseVersion);
Utils.createDatabaseMetadataV2(tempDataDir, FOREST, 2);
assertThatThrownBy(
() ->
@ -182,22 +184,31 @@ public class RocksDBKeyValueStorageFactoryTest {
RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS)
.create(segment, commonConfiguration, metricsSystem))
.isInstanceOf(StorageException.class)
.hasMessage(exceptionMessage);
.hasMessage(
"Database format mismatch: DB at %s is %s but config expects %s. "
+ "Please check your config.",
tempDataDir.toAbsolutePath(), actualDatabaseFormat, expectedDatabaseFormat);
}
@Test
public void shouldSetSegmentationFieldDuringCreation() throws Exception {
public void shouldDetectCorrectMetadataV2AndSetSegmentationFieldDuringCreation()
throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
Files.createDirectories(tempDatabaseDir);
Files.createDirectories(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, FOREST);
Utils.createDatabaseMetadataV2(tempDataDir, FOREST, 2);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
storageFactory.create(segment, commonConfiguration, metricsSystem);
assertThatCode(storageFactory::isSegmentIsolationSupported).doesNotThrowAnyException();
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES);
assertThatCode(storageFactory::isSegmentIsolationSupported).doesNotThrowAnyException();
}
}
@Test
@ -206,11 +217,9 @@ public class RocksDBKeyValueStorageFactoryTest {
final Path tempDatabaseDir = temporaryFolder.resolve("db");
Files.createDirectories(tempDatabaseDir);
Files.createDirectories(tempDataDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, FOREST);
final String badVersion = "{\"🦄\":1}";
Files.write(
tempDataDir.resolve(METADATA_FILENAME), badVersion.getBytes(Charset.defaultCharset()));
Utils.createDatabaseMetadataRaw(tempDataDir, "{\"🦄\":1}");
assertThatThrownBy(
() ->
@ -219,11 +228,11 @@ public class RocksDBKeyValueStorageFactoryTest {
segments,
RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS)
.create(segment, commonConfiguration, metricsSystem))
.isInstanceOf(IllegalStateException.class);
.isInstanceOf(IllegalStateException.class)
.hasMessageStartingWith("Invalid metadata file");
;
final String badValue = "{\"version\":\"iomedae\"}";
Files.write(
tempDatabaseDir.resolve(METADATA_FILENAME), badValue.getBytes(Charset.defaultCharset()));
Utils.createDatabaseMetadataRaw(tempDataDir, "{\"version\"=1}");
assertThatThrownBy(
() ->
@ -232,16 +241,21 @@ public class RocksDBKeyValueStorageFactoryTest {
segments,
RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS)
.create(segment, commonConfiguration, metricsSystem))
.isInstanceOf(IllegalStateException.class);
.isInstanceOf(IllegalStateException.class)
.hasMessageStartingWith("Invalid metadata file");
;
}
@Test
@DisabledOnOs(OS.WINDOWS)
public void shouldCreateDBCorrectlyIfSymlink() throws Exception {
final Path tempRealDataDir = Files.createDirectories(temporaryFolder.resolve("real-data-dir"));
final Path tempSymLinkDataDir =
Files.createSymbolicLink(temporaryFolder.resolve("symlink-data-dir"), tempRealDataDir);
final Path tempDatabaseDir = temporaryFolder.resolve("db");
mockCommonConfiguration(tempSymLinkDataDir, tempDatabaseDir);
mockCommonConfiguration(tempSymLinkDataDir, tempDatabaseDir, FOREST);
Utils.createDatabaseMetadataV2(tempSymLinkDataDir, FOREST, 2);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
@ -249,8 +263,16 @@ public class RocksDBKeyValueStorageFactoryTest {
// Ensure that having created everything via a symlink data dir the DB meta-data has been
// created correctly
storageFactory.create(segment, commonConfiguration, metricsSystem);
assertThat(DatabaseMetadata.lookUpFrom(tempRealDataDir).getVersion())
.isEqualTo(DEFAULT_VERSION);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
assertThat(DatabaseMetadata.lookUpFrom(tempRealDataDir).getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
}
private void mockCommonConfiguration(
final Path tempDataDir, final Path tempDatabaseDir, final DataStorageFormat format) {
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
lenient().when(commonConfiguration.getDatabaseFormat()).thenReturn(format);
}
}

@ -0,0 +1,91 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.PrivacyVersionedStorageFormat;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
public class Utils {
public static final String METADATA_FILENAME = "DATABASE_METADATA.json";
public static void createDatabaseMetadataV1(
final Path tempDataDir, final DataStorageFormat dataStorageFormat) throws IOException {
createDatabaseMetadataV1(tempDataDir, dataStorageFormatToV1(dataStorageFormat));
}
public static void createDatabaseMetadataV1(final Path tempDataDir, final int version)
throws IOException {
final String content = "{\"version\":" + version + "}";
Files.write(tempDataDir.resolve(METADATA_FILENAME), content.getBytes(Charset.defaultCharset()));
}
public static void createDatabaseMetadataV1Privacy(
final Path tempDataDir, final PrivacyVersionedStorageFormat privacyVersionedStorageFormat)
throws IOException {
createDatabaseMetadataV1Privacy(
tempDataDir,
dataStorageFormatToV1(privacyVersionedStorageFormat.getFormat()),
privacyVersionedStorageFormat.getPrivacyVersion().getAsInt());
}
public static void createDatabaseMetadataV1Privacy(
final Path tempDataDir, final int version, final int privacyVersion) throws IOException {
final String content =
"{\"version\":" + version + ",\"privacyVersion\":" + privacyVersion + "}";
Files.write(tempDataDir.resolve(METADATA_FILENAME), content.getBytes(Charset.defaultCharset()));
}
public static void createDatabaseMetadataV2(
final Path tempDataDir, final DataStorageFormat dataStorageFormat, final int version)
throws IOException {
final String content =
"{\"v2\":{\"format\":\"" + dataStorageFormat + "\",\"version\":" + version + "}}";
Files.write(tempDataDir.resolve(METADATA_FILENAME), content.getBytes(Charset.defaultCharset()));
}
public static void createDatabaseMetadataV2Privacy(
final Path tempDataDir,
final DataStorageFormat dataStorageFormat,
final int version,
final int privacyVersion)
throws IOException {
final String content =
"{\"v2\":{\"format\":\""
+ dataStorageFormat
+ "\",\"version\":"
+ version
+ ",\"privacyVersion\":"
+ privacyVersion
+ "}}";
Files.write(tempDataDir.resolve(METADATA_FILENAME), content.getBytes(Charset.defaultCharset()));
}
public static void createDatabaseMetadataRaw(final Path tempDataDir, final String content)
throws IOException {
Files.write(tempDataDir.resolve(METADATA_FILENAME), content.getBytes(Charset.defaultCharset()));
}
private static int dataStorageFormatToV1(final DataStorageFormat dataStorageFormat) {
return switch (dataStorageFormat) {
case FOREST -> 1;
case BONSAI -> 2;
};
}
}

@ -17,6 +17,8 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
@ -28,31 +30,56 @@ class DatabaseMetadataTest {
@TempDir public Path temporaryFolder;
@Test
void getVersion() {
final DatabaseMetadata databaseMetadata = new DatabaseMetadata(42);
assertThat(databaseMetadata).isNotNull();
assertThat(databaseMetadata.getVersion()).isEqualTo(42);
void readingMetadataV1() throws Exception {
final Path tempDataDir = createAndWrite("data", "DATABASE_METADATA.json", "{\"version\":2}");
final DatabaseMetadata databaseMetadata = DatabaseMetadata.lookUpFrom(tempDataDir);
assertThat(databaseMetadata.getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES);
}
@Test
void readingMetadataV1Privacy() throws Exception {
final Path tempDataDir =
createAndWrite("data", "DATABASE_METADATA.json", "{\"version\":1,\"privacyVersion\":1}");
final DatabaseMetadata databaseMetadata = DatabaseMetadata.lookUpFrom(tempDataDir);
assertThat(databaseMetadata.getVersionedStorageFormat())
.isEqualTo(PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
@Test
void metaFileShouldMayContain() throws Exception {
void readingMetadataV2() throws Exception {
final Path tempDataDir =
createAndWrite(
"data", "DATABASE_METADATA.json", "{\"version\":42 , \"privacyVersion\":55}");
"data", "DATABASE_METADATA.json", "{\"v2\":{\"format\":\"FOREST\",\"version\":2}}");
final DatabaseMetadata databaseMetadata = DatabaseMetadata.lookUpFrom(tempDataDir);
assertThat(databaseMetadata).isNotNull();
assertThat(databaseMetadata.getVersion()).isEqualTo(42);
assertThat(databaseMetadata.maybePrivacyVersion()).isNotEmpty();
assertThat(databaseMetadata.maybePrivacyVersion().get()).isEqualTo(55);
assertThat(databaseMetadata.getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
@Test
void metaFileShouldBeSoughtIntoDataDirFirst() throws Exception {
final Path tempDataDir = createAndWrite("data", "DATABASE_METADATA.json", "{\"version\":42}");
void readingMetadataV2Privacy() throws Exception {
final Path tempDataDir =
createAndWrite(
"data",
"DATABASE_METADATA.json",
"{\"v2\":{\"format\":\"FOREST\",\"version\":2,\"privacyVersion\":1}}");
final DatabaseMetadata databaseMetadata = DatabaseMetadata.lookUpFrom(tempDataDir);
assertThat(databaseMetadata).isNotNull();
assertThat(databaseMetadata.getVersion()).isEqualTo(42);
assertThat(databaseMetadata.getVersionedStorageFormat())
.isEqualTo(PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES);
}
@Test
void unsupportedMetadata() throws Exception {
final Path tempDataDir = createAndWrite("data", "DATABASE_METADATA.json", "{\"version\":42}");
try {
DatabaseMetadata.lookUpFrom(tempDataDir);
} catch (final StorageException se) {
assertThat(se).hasMessage("Unsupported db version: 42");
}
}
private Path createAndWrite(final String dir, final String file, final String content)

@ -308,58 +308,59 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
// Actual call
final SegmentedKeyValueStorage store =
try (final SegmentedKeyValueStorage store =
createSegmentedStore(
folder,
metricsSystemMock,
List.of(TestSegment.DEFAULT, TestSegment.FOO),
List.of(TestSegment.EXPERIMENTAL));
KeyValueStorage keyValueStorage = new SegmentedKeyValueStorageAdapter(TestSegment.FOO, store);
// Assertions
assertThat(keyValueStorage).isNotNull();
verify(metricsSystemMock, times(4))
.createLabelledTimer(
eq(BesuMetricCategory.KVSTORE_ROCKSDB),
labelledTimersMetricsNameArgs.capture(),
labelledTimersHelpArgs.capture(),
any());
assertThat(labelledTimersMetricsNameArgs.getAllValues())
.containsExactly(
"read_latency_seconds",
"remove_latency_seconds",
"write_latency_seconds",
"commit_latency_seconds");
assertThat(labelledTimersHelpArgs.getAllValues())
.containsExactly(
"Latency for read from RocksDB.",
"Latency of remove requests from RocksDB.",
"Latency for write to RocksDB.",
"Latency for commits to RocksDB.");
verify(metricsSystemMock, times(2))
.createLongGauge(
eq(BesuMetricCategory.KVSTORE_ROCKSDB),
longGaugesMetricsNameArgs.capture(),
longGaugesHelpArgs.capture(),
any(LongSupplier.class));
assertThat(longGaugesMetricsNameArgs.getAllValues())
.containsExactly("rocks_db_table_readers_memory_bytes", "rocks_db_files_size_bytes");
assertThat(longGaugesHelpArgs.getAllValues())
.containsExactly(
"Estimated memory used for RocksDB index and filter blocks in bytes",
"Estimated database size in bytes");
verify(metricsSystemMock)
.createLabelledCounter(
eq(BesuMetricCategory.KVSTORE_ROCKSDB),
labelledCountersMetricsNameArgs.capture(),
labelledCountersHelpArgs.capture(),
any());
assertThat(labelledCountersMetricsNameArgs.getValue()).isEqualTo("rollback_count");
assertThat(labelledCountersHelpArgs.getValue())
.isEqualTo("Number of RocksDB transactions rolled back.");
List.of(TestSegment.EXPERIMENTAL))) {
KeyValueStorage keyValueStorage = new SegmentedKeyValueStorageAdapter(TestSegment.FOO, store);
// Assertions
assertThat(keyValueStorage).isNotNull();
verify(metricsSystemMock, times(4))
.createLabelledTimer(
eq(BesuMetricCategory.KVSTORE_ROCKSDB),
labelledTimersMetricsNameArgs.capture(),
labelledTimersHelpArgs.capture(),
any());
assertThat(labelledTimersMetricsNameArgs.getAllValues())
.containsExactly(
"read_latency_seconds",
"remove_latency_seconds",
"write_latency_seconds",
"commit_latency_seconds");
assertThat(labelledTimersHelpArgs.getAllValues())
.containsExactly(
"Latency for read from RocksDB.",
"Latency of remove requests from RocksDB.",
"Latency for write to RocksDB.",
"Latency for commits to RocksDB.");
verify(metricsSystemMock, times(2))
.createLongGauge(
eq(BesuMetricCategory.KVSTORE_ROCKSDB),
longGaugesMetricsNameArgs.capture(),
longGaugesHelpArgs.capture(),
any(LongSupplier.class));
assertThat(longGaugesMetricsNameArgs.getAllValues())
.containsExactly("rocks_db_table_readers_memory_bytes", "rocks_db_files_size_bytes");
assertThat(longGaugesHelpArgs.getAllValues())
.containsExactly(
"Estimated memory used for RocksDB index and filter blocks in bytes",
"Estimated database size in bytes");
verify(metricsSystemMock)
.createLabelledCounter(
eq(BesuMetricCategory.KVSTORE_ROCKSDB),
labelledCountersMetricsNameArgs.capture(),
labelledCountersHelpArgs.capture(),
any());
assertThat(labelledCountersMetricsNameArgs.getValue()).isEqualTo("rollback_count");
assertThat(labelledCountersHelpArgs.getValue())
.isEqualTo("Number of RocksDB transactions rolled back.");
}
}
public enum TestSegment implements SegmentIdentifier {

@ -56,18 +56,20 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void twoStoresAreIndependent() throws Exception {
final KeyValueStorage store1 = createStore();
final KeyValueStorage store2 = createStore();
try (final KeyValueStorage store1 = createStore()) {
try (final KeyValueStorage store2 = createStore()) {
final KeyValueStorageTransaction tx = store1.startTransaction();
final byte[] key = bytesFromHexString("0001");
final byte[] value = bytesFromHexString("0FFF");
final KeyValueStorageTransaction tx = store1.startTransaction();
final byte[] key = bytesFromHexString("0001");
final byte[] value = bytesFromHexString("0FFF");
tx.put(key, value);
tx.commit();
tx.put(key, value);
tx.commit();
final Optional<byte[]> result = store2.get(key);
assertThat(result).isEmpty();
final Optional<byte[]> result = store2.get(key);
assertThat(result).isEmpty();
}
}
}
/**
@ -77,20 +79,21 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void put() throws Exception {
final KeyValueStorage store = createStore();
final byte[] key = bytesFromHexString("0F");
final byte[] firstValue = bytesFromHexString("0ABC");
final byte[] secondValue = bytesFromHexString("0DEF");
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(key, firstValue);
tx.commit();
assertThat(store.get(key)).contains(firstValue);
tx = store.startTransaction();
tx.put(key, secondValue);
tx.commit();
assertThat(store.get(key)).contains(secondValue);
try (final KeyValueStorage store = createStore()) {
final byte[] key = bytesFromHexString("0F");
final byte[] firstValue = bytesFromHexString("0ABC");
final byte[] secondValue = bytesFromHexString("0DEF");
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(key, firstValue);
tx.commit();
assertThat(store.get(key)).contains(firstValue);
tx = store.startTransaction();
tx.put(key, secondValue);
tx.commit();
assertThat(store.get(key)).contains(secondValue);
}
}
/**
@ -100,16 +103,17 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void streamKeys() throws Exception {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
final List<byte[]> keys =
Stream.of("0F", "10", "11", "12")
.map(this::bytesFromHexString)
.collect(toUnmodifiableList());
keys.forEach(key -> tx.put(key, bytesFromHexString("0ABC")));
tx.commit();
assertThat(store.stream().map(Pair::getKey).collect(toUnmodifiableSet()))
.containsExactlyInAnyOrder(keys.toArray(new byte[][] {}));
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
final List<byte[]> keys =
Stream.of("0F", "10", "11", "12")
.map(this::bytesFromHexString)
.collect(toUnmodifiableList());
keys.forEach(key -> tx.put(key, bytesFromHexString("0ABC")));
tx.commit();
assertThat(store.stream().map(Pair::getKey).collect(toUnmodifiableSet()))
.containsExactlyInAnyOrder(keys.toArray(new byte[][] {}));
}
}
/**
@ -119,18 +123,19 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void getAllKeysThat() throws Exception {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.put(bytesFromHexString("0F"), bytesFromHexString("0ABC"));
tx.put(bytesFromHexString("10"), bytesFromHexString("0ABC"));
tx.put(bytesFromHexString("11"), bytesFromHexString("0ABC"));
tx.put(bytesFromHexString("12"), bytesFromHexString("0ABC"));
tx.commit();
Set<byte[]> keys = store.getAllKeysThat(bv -> Bytes.wrap(bv).toString().contains("1"));
assertThat(keys.size()).isEqualTo(3);
assertThat(keys)
.containsExactlyInAnyOrder(
bytesFromHexString("10"), bytesFromHexString("11"), bytesFromHexString("12"));
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.put(bytesFromHexString("0F"), bytesFromHexString("0ABC"));
tx.put(bytesFromHexString("10"), bytesFromHexString("0ABC"));
tx.put(bytesFromHexString("11"), bytesFromHexString("0ABC"));
tx.put(bytesFromHexString("12"), bytesFromHexString("0ABC"));
tx.commit();
Set<byte[]> keys = store.getAllKeysThat(bv -> Bytes.wrap(bv).toString().contains("1"));
assertThat(keys.size()).isEqualTo(3);
assertThat(keys)
.containsExactlyInAnyOrder(
bytesFromHexString("10"), bytesFromHexString("11"), bytesFromHexString("12"));
}
}
/**
@ -140,17 +145,18 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void containsKey() throws Exception {
final KeyValueStorage store = createStore();
final byte[] key = bytesFromHexString("ABCD");
final byte[] value = bytesFromHexString("DEFF");
try (final KeyValueStorage store = createStore()) {
final byte[] key = bytesFromHexString("ABCD");
final byte[] value = bytesFromHexString("DEFF");
assertThat(store.containsKey(key)).isFalse();
assertThat(store.containsKey(key)).isFalse();
final KeyValueStorageTransaction transaction = store.startTransaction();
transaction.put(key, value);
transaction.commit();
final KeyValueStorageTransaction transaction = store.startTransaction();
transaction.put(key, value);
transaction.commit();
assertThat(store.containsKey(key)).isTrue();
assertThat(store.containsKey(key)).isTrue();
}
}
/**
@ -160,18 +166,19 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void removeExisting() throws Exception {
final KeyValueStorage store = createStore();
final byte[] key = bytesFromHexString("0F");
final byte[] value = bytesFromHexString("0ABC");
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(key, value);
tx.commit();
tx = store.startTransaction();
tx.remove(key);
tx.commit();
assertThat(store.get(key)).isEmpty();
try (final KeyValueStorage store = createStore()) {
final byte[] key = bytesFromHexString("0F");
final byte[] value = bytesFromHexString("0ABC");
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(key, value);
tx.commit();
tx = store.startTransaction();
tx.remove(key);
tx.commit();
assertThat(store.get(key)).isEmpty();
}
}
/**
@ -181,15 +188,16 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void removeExistingSameTransaction() throws Exception {
final KeyValueStorage store = createStore();
final byte[] key = bytesFromHexString("0F");
final byte[] value = bytesFromHexString("0ABC");
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(key, value);
tx.remove(key);
tx.commit();
assertThat(store.get(key)).isEmpty();
try (final KeyValueStorage store = createStore()) {
final byte[] key = bytesFromHexString("0F");
final byte[] value = bytesFromHexString("0ABC");
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(key, value);
tx.remove(key);
tx.commit();
assertThat(store.get(key)).isEmpty();
}
}
/**
@ -199,13 +207,14 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void removeNonExistent() throws Exception {
final KeyValueStorage store = createStore();
final byte[] key = bytesFromHexString("0F");
try (final KeyValueStorage store = createStore()) {
final byte[] key = bytesFromHexString("0F");
KeyValueStorageTransaction tx = store.startTransaction();
tx.remove(key);
tx.commit();
assertThat(store.get(key)).isEmpty();
KeyValueStorageTransaction tx = store.startTransaction();
tx.remove(key);
tx.commit();
assertThat(store.get(key)).isEmpty();
}
}
/**
@ -216,39 +225,38 @@ public abstract class AbstractKeyValueStorageTest {
@Test
public void concurrentUpdate() throws Exception {
final int keyCount = 1000;
final KeyValueStorage store = createStore();
final CountDownLatch finishedLatch = new CountDownLatch(2);
final Function<byte[], Thread> updater =
(value) ->
new Thread(
() -> {
try {
for (int i = 0; i < keyCount; i++) {
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(Bytes.minimalBytes(i).toArrayUnsafe(), value);
tx.commit();
try (final KeyValueStorage store = createStore()) {
final CountDownLatch finishedLatch = new CountDownLatch(2);
final Function<byte[], Thread> updater =
(value) ->
new Thread(
() -> {
try {
for (int i = 0; i < keyCount; i++) {
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(Bytes.minimalBytes(i).toArrayUnsafe(), value);
tx.commit();
}
} finally {
finishedLatch.countDown();
}
} finally {
finishedLatch.countDown();
}
});
});
// Run 2 concurrent transactions that write a bunch of values to the same keys
final byte[] a = Bytes.of(10).toArrayUnsafe();
final byte[] b = Bytes.of(20).toArrayUnsafe();
updater.apply(a).start();
updater.apply(b).start();
// Run 2 concurrent transactions that write a bunch of values to the same keys
final byte[] a = Bytes.of(10).toArrayUnsafe();
final byte[] b = Bytes.of(20).toArrayUnsafe();
updater.apply(a).start();
updater.apply(b).start();
finishedLatch.await();
finishedLatch.await();
for (int i = 0; i < keyCount; i++) {
final byte[] key = Bytes.minimalBytes(i).toArrayUnsafe();
final byte[] actual = store.get(key).get();
assertThat(Arrays.equals(actual, a) || Arrays.equals(actual, b)).isTrue();
for (int i = 0; i < keyCount; i++) {
final byte[] key = Bytes.minimalBytes(i).toArrayUnsafe();
final byte[] actual = store.get(key).get();
assertThat(Arrays.equals(actual, a) || Arrays.equals(actual, b)).isTrue();
}
}
store.close();
}
/**
@ -258,34 +266,35 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void transactionCommit() throws Exception {
final KeyValueStorage store = createStore();
// Add some values
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(bytesOf(1), bytesOf(1));
tx.put(bytesOf(2), bytesOf(2));
tx.put(bytesOf(3), bytesOf(3));
tx.commit();
// Start transaction that adds, modifies, and removes some values
tx = store.startTransaction();
tx.put(bytesOf(2), bytesOf(3));
tx.put(bytesOf(2), bytesOf(4));
tx.remove(bytesOf(3));
tx.put(bytesOf(4), bytesOf(8));
// Check values before committing have not changed
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
assertThat(store.get(bytesOf(3))).contains(bytesOf(3));
assertThat(store.get(bytesOf(4))).isEmpty();
tx.commit();
// Check that values have been updated after commit
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(4));
assertThat(store.get(bytesOf(3))).isEmpty();
assertThat(store.get(bytesOf(4))).contains(bytesOf(8));
try (final KeyValueStorage store = createStore()) {
// Add some values
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(bytesOf(1), bytesOf(1));
tx.put(bytesOf(2), bytesOf(2));
tx.put(bytesOf(3), bytesOf(3));
tx.commit();
// Start transaction that adds, modifies, and removes some values
tx = store.startTransaction();
tx.put(bytesOf(2), bytesOf(3));
tx.put(bytesOf(2), bytesOf(4));
tx.remove(bytesOf(3));
tx.put(bytesOf(4), bytesOf(8));
// Check values before committing have not changed
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
assertThat(store.get(bytesOf(3))).contains(bytesOf(3));
assertThat(store.get(bytesOf(4))).isEmpty();
tx.commit();
// Check that values have been updated after commit
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(4));
assertThat(store.get(bytesOf(3))).isEmpty();
assertThat(store.get(bytesOf(4))).contains(bytesOf(8));
}
}
/**
@ -295,34 +304,35 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void transactionRollback() throws Exception {
final KeyValueStorage store = createStore();
// Add some values
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(bytesOf(1), bytesOf(1));
tx.put(bytesOf(2), bytesOf(2));
tx.put(bytesOf(3), bytesOf(3));
tx.commit();
// Start transaction that adds, modifies, and removes some values
tx = store.startTransaction();
tx.put(bytesOf(2), bytesOf(3));
tx.put(bytesOf(2), bytesOf(4));
tx.remove(bytesOf(3));
tx.put(bytesOf(4), bytesOf(8));
// Check values before committing have not changed
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
assertThat(store.get(bytesOf(3))).contains(bytesOf(3));
assertThat(store.get(bytesOf(4))).isEmpty();
tx.rollback();
// Check that values have not changed after rollback
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
assertThat(store.get(bytesOf(3))).contains(bytesOf(3));
assertThat(store.get(bytesOf(4))).isEmpty();
try (final KeyValueStorage store = createStore()) {
// Add some values
KeyValueStorageTransaction tx = store.startTransaction();
tx.put(bytesOf(1), bytesOf(1));
tx.put(bytesOf(2), bytesOf(2));
tx.put(bytesOf(3), bytesOf(3));
tx.commit();
// Start transaction that adds, modifies, and removes some values
tx = store.startTransaction();
tx.put(bytesOf(2), bytesOf(3));
tx.put(bytesOf(2), bytesOf(4));
tx.remove(bytesOf(3));
tx.put(bytesOf(4), bytesOf(8));
// Check values before committing have not changed
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
assertThat(store.get(bytesOf(3))).contains(bytesOf(3));
assertThat(store.get(bytesOf(4))).isEmpty();
tx.rollback();
// Check that values have not changed after rollback
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
assertThat(store.get(bytesOf(3))).contains(bytesOf(3));
assertThat(store.get(bytesOf(4))).isEmpty();
}
}
/**
@ -332,9 +342,10 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void transactionCommitEmpty() throws Exception {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
}
}
/**
@ -344,143 +355,120 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void transactionRollbackEmpty() throws Exception {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
}
}
/**
* Transaction put after commit.
*
* @throws Exception the exception
*/
/** Transaction put after commit. */
@Test
public void transactionPutAfterCommit() throws Exception {
public void transactionPutAfterCommit() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.put(bytesOf(1), bytesOf(1));
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.put(bytesOf(1), bytesOf(1));
}
})
.isInstanceOf(IllegalStateException.class);
}
/**
* Transaction remove after commit.
*
* @throws Exception the exception
*/
/** Transaction remove after commit. */
@Test
public void transactionRemoveAfterCommit() throws Exception {
public void transactionRemoveAfterCommit() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.remove(bytesOf(1));
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.remove(bytesOf(1));
}
})
.isInstanceOf(IllegalStateException.class);
}
/**
* Transaction put after rollback.
*
* @throws Exception the exception
*/
/** Transaction put after rollback. */
@Test
public void transactionPutAfterRollback() throws Exception {
public void transactionPutAfterRollback() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.put(bytesOf(1), bytesOf(1));
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.put(bytesOf(1), bytesOf(1));
}
})
.isInstanceOf(IllegalStateException.class);
}
/**
* Transaction remove after rollback.
*
* @throws Exception the exception
*/
/** Transaction remove after rollback. */
@Test
public void transactionRemoveAfterRollback() throws Exception {
public void transactionRemoveAfterRollback() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.remove(bytesOf(1));
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.remove(bytesOf(1));
}
})
.isInstanceOf(IllegalStateException.class);
}
/**
* Transaction commit after rollback.
*
* @throws Exception the exception
*/
/** Transaction commit after rollback. */
@Test
public void transactionCommitAfterRollback() throws Exception {
public void transactionCommitAfterRollback() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.commit();
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.commit();
}
})
.isInstanceOf(IllegalStateException.class);
}
/**
* Transaction commit twice.
*
* @throws Exception the exception
*/
/** Transaction commit twice. */
@Test
public void transactionCommitTwice() throws Exception {
public void transactionCommitTwice() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.commit();
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.commit();
}
})
.isInstanceOf(IllegalStateException.class);
}
/**
* Transaction rollback after commit.
*
* @throws Exception the exception
*/
/** Transaction rollback after commit. */
@Test
public void transactionRollbackAfterCommit() throws Exception {
public void transactionRollbackAfterCommit() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.rollback();
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.commit();
tx.rollback();
}
})
.isInstanceOf(IllegalStateException.class);
}
/**
* Transaction rollback twice.
*
* @throws Exception the exception
*/
/** Transaction rollback twice. */
@Test
public void transactionRollbackTwice() throws Exception {
public void transactionRollbackTwice() {
Assertions.assertThatThrownBy(
() -> {
final KeyValueStorage store = createStore();
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.rollback();
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx = store.startTransaction();
tx.rollback();
tx.rollback();
}
})
.isInstanceOf(IllegalStateException.class);
}
@ -492,19 +480,20 @@ public abstract class AbstractKeyValueStorageTest {
*/
@Test
public void twoTransactions() throws Exception {
final KeyValueStorage store = createStore();
try (final KeyValueStorage store = createStore()) {
final KeyValueStorageTransaction tx1 = store.startTransaction();
final KeyValueStorageTransaction tx2 = store.startTransaction();
final KeyValueStorageTransaction tx1 = store.startTransaction();
final KeyValueStorageTransaction tx2 = store.startTransaction();
tx1.put(bytesOf(1), bytesOf(1));
tx2.put(bytesOf(2), bytesOf(2));
tx1.put(bytesOf(1), bytesOf(1));
tx2.put(bytesOf(2), bytesOf(2));
tx1.commit();
tx2.commit();
tx1.commit();
tx2.commit();
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
assertThat(store.get(bytesOf(1))).contains(bytesOf(1));
assertThat(store.get(bytesOf(2))).contains(bytesOf(2));
}
}
/**

Loading…
Cancel
Save