Healing Mechanism for Flat Database in Besu (#5319)

The proposed pull request introduces a feature that allows healing of the flat database by streaming the flat database data and validating it by generating a proof from the trie structure. If the proof is found to be invalid, the code traverses the trie to fix the invalid range. To optimize the process and avoid checking the entire flat database, the PR includes enhancements such as tracking the accounts that need to be repaired during SnapSync. By implementing these optimizations, the PR aims to significantly reduce the time and resources required for repairing the flat database.

Signed-off-by: Karim TAAM <karim.t2am@gmail.com>
pull/5601/head
matkt 1 year ago committed by GitHub
parent ddacdc37c2
commit 180c75197c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      CHANGELOG.md
  2. 8
      besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java
  3. 74
      besu/src/main/java/org/hyperledger/besu/cli/options/unstable/SynchronizerOptions.java
  4. 26
      besu/src/test/java/org/hyperledger/besu/cli/BesuCommandTest.java
  5. 8
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiWorldStateProvider.java
  6. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/storage/BonsaiSnapshotWorldStateKeyValueStorage.java
  7. 253
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/storage/BonsaiWorldStateKeyValueStorage.java
  8. 6
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/storage/BonsaiWorldStateLayerStorage.java
  9. 174
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/FlatDbReaderStrategy.java
  10. 98
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/FullFlatDbReaderStrategy.java
  11. 140
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/storage/flat/PartialFlatDbReaderStrategy.java
  12. 38
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/proof/WorldStateProofProvider.java
  13. 17
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/WorldStateKeyValueStorage.java
  14. 57
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/FlatDbMode.java
  15. 35
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateStorage.java
  16. 136
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/BonsaiWorldStateKeyValueStorageTest.java
  17. 58
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/LogRollingTests.java
  18. 13
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/RollingImport.java
  19. 6
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/DefaultSynchronizer.java
  20. 17
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/checkpointsync/CheckpointDownloaderFactory.java
  21. 4
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java
  22. 5
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/worldstate/AccountTrieNodeDataRequest.java
  23. 3
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/worldstate/StorageTrieNodeDataRequest.java
  24. 9
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/CompleteTaskStep.java
  25. 15
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/DynamicPivotBlockSelector.java
  26. 17
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/LoadLocalDataStep.java
  27. 53
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/PersistDataStep.java
  28. 14
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/RangeManager.java
  29. 107
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/RequestDataStep.java
  30. 14
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapDownloaderFactory.java
  31. 33
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapSyncConfiguration.java
  32. 2
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapSyncDownloader.java
  33. 29
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapSyncProcessState.java
  34. 144
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java
  35. 132
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloadProcess.java
  36. 52
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java
  37. 143
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapsyncMetricsManager.java
  38. 27
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/StackTrie.java
  39. 35
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/context/SnapSyncStatePersistenceManager.java
  40. 33
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java
  41. 14
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/BytecodeRequest.java
  42. 58
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/SnapDataRequest.java
  43. 57
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/StorageRangeDataRequest.java
  44. 210
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/AccountFlatDatabaseHealingRangeRequest.java
  45. 24
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/AccountTrieNodeHealingRequest.java
  46. 190
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/StorageFlatDatabaseHealingRangeRequest.java
  47. 47
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/StorageTrieNodeHealingRequest.java
  48. 28
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/TrieNodeHealingRequest.java
  49. 2
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/state/SyncState.java
  50. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloaderTest.java
  51. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/CompleteTaskStepTest.java
  52. 6
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/DynamicPivotBlockManagerTest.java
  53. 17
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/LoadLocalDataStepTest.java
  54. 6
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/PersistDataStepTest.java
  55. 37
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/RangeManagerTest.java
  56. 105
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadStateTest.java
  57. 297
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/AccountFlatDatabaseHealingRangeRequestTest.java
  58. 325
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/StorageFlatDatabaseHealingRangeRequestTest.java
  59. 4
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/RangeStorageEntriesCollector.java
  60. 17
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/TrieIterator.java
  61. 85
      ethereum/trie/src/test/java/org/hyperledger/besu/ethereum/trie/RangeStorageEntriesCollectorTest.java
  62. 1
      ethereum/trie/src/test/java/org/hyperledger/besu/ethereum/trie/TrieIteratorTest.java
  63. 2
      plugin-api/build.gradle
  64. 11
      plugin-api/src/main/java/org/hyperledger/besu/plugin/services/storage/KeyValueStorage.java
  65. 6
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/segmented/RocksDBColumnarKeyValueSnapshot.java
  66. 8
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/segmented/RocksDBColumnarKeyValueStorage.java
  67. 6
      plugins/rocksdb/src/main/java/org/hyperledger/besu/plugin/services/storage/rocksdb/unsegmented/RocksDBKeyValueStorage.java
  68. 5
      services/kvstore/src/main/java/org/hyperledger/besu/services/kvstore/InMemoryKeyValueStorage.java
  69. 5
      services/kvstore/src/main/java/org/hyperledger/besu/services/kvstore/LayeredKeyValueStorage.java
  70. 5
      services/kvstore/src/main/java/org/hyperledger/besu/services/kvstore/LimitedInMemoryKeyValueStorage.java
  71. 11
      services/kvstore/src/main/java/org/hyperledger/besu/services/kvstore/SegmentedKeyValueStorage.java
  72. 5
      services/kvstore/src/main/java/org/hyperledger/besu/services/kvstore/SegmentedKeyValueStorageAdapter.java
  73. 2
      services/pipeline/src/main/java/org/hyperledger/besu/services/pipeline/Pipeline.java

@ -14,6 +14,8 @@ and in case a rollback is needed, before installing a previous version, the migr
- Tidy DEBUG logs by moving engine API full logging to TRACE [#5529](https://github.com/hyperledger/besu/pull/5529)
- remove PoW validation if merge is enabled as it is not needed anymore [#5538](https://github.com/hyperledger/besu/pull/5538)
- Use BlobDB for blockchain storage to reduce initial sync time and write amplification [#5475](https://github.com/hyperledger/besu/pull/5475)
- Update to Tuweni 2.4.1. [#5513](https://github.com/hyperledger/besu/pull/5513)
- Add healing flat db mechanism with experimental CLI options `--Xsnapsync-synchronizer-flat-db-healing-enabled=true` [#5319](https://github.com/hyperledger/besu/pull/5319)
### Bug Fixes
- check to ensure storage and transactions are not closed prior to reading/writing [#5527](https://github.com/hyperledger/besu/pull/5527)

@ -2168,6 +2168,14 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
SyncMode.X_CHECKPOINT.equals(getDefaultSyncModeIfNotSet()),
singletonList("--Xcheckpoint-post-merge-enabled"));
CommandLineUtils.failIfOptionDoesntMeetRequirement(
commandLine,
"--Xsnapsync-synchronizer-flat option can only be used when -Xsnapsync-synchronizer-flat-db-healing-enabled is true",
unstableSynchronizerOptions.isSnapsyncFlatDbHealingEnabled(),
asList(
"--Xsnapsync-synchronizer-flat-account-healed-count-per-request",
"--Xsnapsync-synchronizer-flat-slot-healed-count-per-request"));
if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE)
&& nodePrivateKeyFileOption.getNodePrivateKeyFile() != null) {
logger.warn(

@ -73,6 +73,15 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
private static final String SNAP_TRIENODE_COUNT_PER_REQUEST_FLAG =
"--Xsnapsync-synchronizer-trienode-count-per-request";
private static final String SNAP_FLAT_ACCOUNT_HEALED_COUNT_PER_REQUEST_FLAG =
"--Xsnapsync-synchronizer-flat-account-healed-count-per-request";
private static final String SNAP_FLAT_STORAGE_HEALED_COUNT_PER_REQUEST_FLAG =
"--Xsnapsync-synchronizer-flat-slot-healed-count-per-request";
private static final String SNAP_FLAT_DB_HEALING_ENABLED_FLAG =
"--Xsnapsync-synchronizer-flat-db-healing-enabled";
private static final String CHECKPOINT_POST_MERGE_FLAG = "--Xcheckpoint-post-merge-enabled";
/**
@ -258,7 +267,7 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
hidden = true,
defaultValue = "384",
paramLabel = "<INTEGER>",
description = "Snap sync sync storage queried per request (default: ${DEFAULT-VALUE})")
description = "Snap sync storage queried per request (default: ${DEFAULT-VALUE})")
private int snapsyncStorageCountPerRequest =
SnapSyncConfiguration.DEFAULT_STORAGE_COUNT_PER_REQUEST;
@ -267,7 +276,7 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
hidden = true,
defaultValue = "84",
paramLabel = "<INTEGER>",
description = "Snap sync sync bytecode queried per request (default: ${DEFAULT-VALUE})")
description = "Snap sync bytecode queried per request (default: ${DEFAULT-VALUE})")
private int snapsyncBytecodeCountPerRequest =
SnapSyncConfiguration.DEFAULT_BYTECODE_COUNT_PER_REQUEST;
@ -276,10 +285,39 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
hidden = true,
defaultValue = "384",
paramLabel = "<INTEGER>",
description = "Snap sync sync trie node queried per request (default: ${DEFAULT-VALUE})")
description = "Snap sync trie node queried per request (default: ${DEFAULT-VALUE})")
private int snapsyncTrieNodeCountPerRequest =
SnapSyncConfiguration.DEFAULT_TRIENODE_COUNT_PER_REQUEST;
@CommandLine.Option(
names = SNAP_FLAT_ACCOUNT_HEALED_COUNT_PER_REQUEST_FLAG,
hidden = true,
defaultValue = "128",
paramLabel = "<INTEGER>",
description =
"Snap sync flat accounts verified and healed per request (default: ${DEFAULT-VALUE})")
private int snapsyncFlatAccountHealedCountPerRequest =
SnapSyncConfiguration.DEFAULT_LOCAL_FLAT_ACCOUNT_COUNT_TO_HEAL_PER_REQUEST;
@CommandLine.Option(
names = SNAP_FLAT_STORAGE_HEALED_COUNT_PER_REQUEST_FLAG,
hidden = true,
defaultValue = "1024",
paramLabel = "<INTEGER>",
description =
"Snap sync flat slots verified and healed per request (default: ${DEFAULT-VALUE})")
private int snapsyncFlatStorageHealedCountPerRequest =
SnapSyncConfiguration.DEFAULT_LOCAL_FLAT_STORAGE_COUNT_TO_HEAL_PER_REQUEST;
@CommandLine.Option(
names = SNAP_FLAT_DB_HEALING_ENABLED_FLAG,
hidden = true,
defaultValue = "false",
paramLabel = "<Boolean>",
description = "Snap sync flat db healing enabled (default: ${DEFAULT-VALUE})")
private Boolean snapsyncFlatDbHealingEnabled =
SnapSyncConfiguration.DEFAULT_IS_FLAT_DB_HEALING_ENABLED;
@CommandLine.Option(
names = {CHECKPOINT_POST_MERGE_FLAG},
hidden = true,
@ -298,6 +336,15 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
return new SynchronizerOptions();
}
/**
* Flag to know whether the flat db healing feature is enabled or disabled.
*
* @return true is the flat db healing is enabled
*/
public boolean isSnapsyncFlatDbHealingEnabled() {
return snapsyncFlatDbHealingEnabled;
}
/**
* Create synchronizer options from Synchronizer Configuration.
*
@ -334,6 +381,12 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
config.getSnapSyncConfiguration().getBytecodeCountPerRequest();
options.snapsyncTrieNodeCountPerRequest =
config.getSnapSyncConfiguration().getTrienodeCountPerRequest();
options.snapsyncFlatAccountHealedCountPerRequest =
config.getSnapSyncConfiguration().getLocalFlatAccountCountToHealPerRequest();
options.snapsyncFlatStorageHealedCountPerRequest =
config.getSnapSyncConfiguration().getLocalFlatStorageCountToHealPerRequest();
options.snapsyncFlatDbHealingEnabled =
config.getSnapSyncConfiguration().isFlatDbHealingEnabled();
options.checkpointPostMergeSyncEnabled = config.isCheckpointPostMergeEnabled();
return options;
}
@ -364,6 +417,9 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
.storageCountPerRequest(snapsyncStorageCountPerRequest)
.bytecodeCountPerRequest(snapsyncBytecodeCountPerRequest)
.trienodeCountPerRequest(snapsyncTrieNodeCountPerRequest)
.localFlatAccountCountToHealPerRequest(snapsyncFlatAccountHealedCountPerRequest)
.localFlatStorageCountToHealPerRequest(snapsyncFlatStorageHealedCountPerRequest)
.isFlatDbHealingEnabled(snapsyncFlatDbHealingEnabled)
.build());
builder.checkpointPostMergeEnabled(checkpointPostMergeSyncEnabled);
@ -372,7 +428,8 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
@Override
public List<String> getCLIOptions() {
return Arrays.asList(
List<String> value =
Arrays.asList(
BLOCK_PROPAGATION_RANGE_FLAG,
OptionParser.format(blockPropagationRange),
DOWNLOADER_CHANGE_TARGET_THRESHOLD_BY_HEIGHT_FLAG,
@ -415,5 +472,14 @@ public class SynchronizerOptions implements CLIOptions<SynchronizerConfiguration
OptionParser.format(snapsyncBytecodeCountPerRequest),
SNAP_TRIENODE_COUNT_PER_REQUEST_FLAG,
OptionParser.format(snapsyncTrieNodeCountPerRequest));
if (isSnapsyncFlatDbHealingEnabled()) {
value.addAll(
Arrays.asList(
SNAP_FLAT_ACCOUNT_HEALED_COUNT_PER_REQUEST_FLAG,
OptionParser.format(snapsyncFlatAccountHealedCountPerRequest),
SNAP_FLAT_STORAGE_HEALED_COUNT_PER_REQUEST_FLAG,
OptionParser.format(snapsyncFlatStorageHealedCountPerRequest)));
}
return value;
}
}

@ -5725,4 +5725,30 @@ public class BesuCommandTest extends CommandTestAbstract {
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void snapsyncHealingOptionShouldBeDisabledByDefault() {
final TestBesuCommand besuCommand = parseCommand();
assertThat(besuCommand.unstableSynchronizerOptions.isSnapsyncFlatDbHealingEnabled()).isFalse();
}
@Test
public void snapsyncHealingOptionShouldWork() {
final TestBesuCommand besuCommand =
parseCommand("--Xsnapsync-synchronizer-flat-db-healing-enabled", "true");
assertThat(besuCommand.unstableSynchronizerOptions.isSnapsyncFlatDbHealingEnabled()).isTrue();
}
@Test
public void snapsyncForHealingFeaturesShouldFailWhenHealingIsNotEnabled() {
parseCommand("--Xsnapsync-synchronizer-flat-account-healed-count-per-request", "100");
assertThat(commandErrorOutput.toString(UTF_8))
.contains(
"--Xsnapsync-synchronizer-flat option can only be used when -Xsnapsync-synchronizer-flat-db-healing-enabled is true");
parseCommand("--Xsnapsync-synchronizer-flat-slot-healed-count-per-request", "100");
assertThat(commandErrorOutput.toString(UTF_8))
.contains(
"--Xsnapsync-synchronizer-flat option can only be used when -Xsnapsync-synchronizer-flat-db-healing-enabled is true");
}
}

@ -291,6 +291,12 @@ public class BonsaiWorldStateProvider implements WorldStateArchive {
return persistedState;
}
/**
* Prepares the state healing process for a given address and location. It prepares the state
* healing, including retrieving data from storage, identifying invalid slots or nodes, removing
* account and slot from the state trie, and committing the changes. Finally, it downgrades the
* world state storage to partial flat database mode.
*/
public void prepareStateHealing(final Address address, final Bytes location) {
final Set<Bytes> keysToDelete = new HashSet<>();
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = worldStateStorage.updater();
@ -340,6 +346,8 @@ public class BonsaiWorldStateProvider implements WorldStateArchive {
}
keysToDelete.forEach(bytes -> updater.removeAccountStateTrieNode(bytes, null));
updater.commit();
worldStateStorage.downgradeToPartialFlatDbMode();
}
public TrieLogManager getTrieLogManager() {

@ -49,6 +49,8 @@ public class BonsaiSnapshotWorldStateKeyValueStorage extends BonsaiWorldStateKey
final KeyValueStorage trieLogStorage,
final ObservableMetricsSystem metricsSystem) {
super(
parentWorldStateStorage.flatDbMode,
parentWorldStateStorage.flatDbReaderStrategy,
accountStorage,
codeStorage,
storageStorage,

@ -16,33 +16,34 @@ package org.hyperledger.besu.ethereum.bonsai.storage;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.StorageSlotKey;
import org.hyperledger.besu.ethereum.bonsai.storage.flat.FlatDbReaderStrategy;
import org.hyperledger.besu.ethereum.bonsai.storage.flat.FullFlatDbReaderStrategy;
import org.hyperledger.besu.ethereum.bonsai.storage.flat.PartialFlatDbReaderStrategy;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.trie.patricia.StoredNodeFactory;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
import org.hyperledger.besu.util.Subscribers;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.rlp.RLP;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings("unused")
public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(BonsaiWorldStateKeyValueStorage.class);
@ -53,22 +54,18 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
public static final byte[] WORLD_BLOCK_HASH_KEY =
"worldBlockHash".getBytes(StandardCharsets.UTF_8);
// 0x666C61744462537461747573
public static final byte[] FLAT_DB_MODE = "flatDbStatus".getBytes(StandardCharsets.UTF_8);
protected FlatDbMode flatDbMode;
protected FlatDbReaderStrategy flatDbReaderStrategy;
protected final KeyValueStorage accountStorage;
protected final KeyValueStorage codeStorage;
protected final KeyValueStorage storageStorage;
protected final KeyValueStorage trieBranchStorage;
protected final KeyValueStorage trieLogStorage;
private final Counter getAccountCounter;
private final Counter getAccountFlatDatabaseCounter;
private final Counter getAccountMerkleTrieCounter;
private final Counter getAccountMissingMerkleTrieCounter;
private final Counter getStorageValueCounter;
private final Counter getStorageValueFlatDatabaseCounter;
private final Counter getStorageValueMerkleTrieCounter;
private final Counter getStorageValueMissingMerkleTrieCounter;
protected final ObservableMetricsSystem metricsSystem;
private final AtomicBoolean shouldClose = new AtomicBoolean(false);
@ -79,117 +76,87 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
public BonsaiWorldStateKeyValueStorage(
final StorageProvider provider, final ObservableMetricsSystem metricsSystem) {
this(
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE),
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE),
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE),
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE),
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE),
metricsSystem);
this.accountStorage =
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
this.codeStorage =
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE);
this.storageStorage =
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE);
this.trieBranchStorage =
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE);
this.trieLogStorage =
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
this.metricsSystem = metricsSystem;
initFlatDbStrategy();
}
public BonsaiWorldStateKeyValueStorage(
final FlatDbMode flatDbMode,
final FlatDbReaderStrategy flatDbReaderStrategy,
final KeyValueStorage accountStorage,
final KeyValueStorage codeStorage,
final KeyValueStorage storageStorage,
final KeyValueStorage trieBranchStorage,
final KeyValueStorage trieLogStorage,
final ObservableMetricsSystem metricsSystem) {
this.flatDbMode = flatDbMode;
this.flatDbReaderStrategy = flatDbReaderStrategy;
this.accountStorage = accountStorage;
this.codeStorage = codeStorage;
this.storageStorage = storageStorage;
this.trieBranchStorage = trieBranchStorage;
this.trieLogStorage = trieLogStorage;
this.metricsSystem = metricsSystem;
getAccountCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_total",
"Total number of calls to getAccount");
getAccountFlatDatabaseCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_flat_database",
"Number of accounts found in the flat database");
getAccountMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_merkle_trie",
"Number of accounts not found in the flat database, but found in the merkle trie");
getAccountMissingMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_missing_merkle_trie",
"Number of accounts not found (either in the flat database or the merkle trie)");
getStorageValueCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_total",
"Total number of calls to getStorageValueBySlotHash");
getStorageValueFlatDatabaseCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_flat_database",
"Number of storage slots found in the flat database");
getStorageValueMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_merkle_trie",
"Number of storage slots not found in the flat database, but found in the merkle trie");
getStorageValueMissingMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_missing_merkle_trie",
"Number of storage slots not found (either in the flat database or in the merkle trie)");
}
@Override
public Optional<Bytes> getCode(final Bytes32 codeHash, final Hash accountHash) {
if (codeHash.equals(Hash.EMPTY)) {
return Optional.of(Bytes.EMPTY);
} else {
return codeStorage
.get(accountHash.toArrayUnsafe())
public void initFlatDbStrategy() {
this.flatDbMode =
FlatDbMode.fromVersion(
trieBranchStorage
.get(FLAT_DB_MODE)
.map(Bytes::wrap)
.filter(b -> Hash.hash(b).equals(codeHash));
.orElse(
FlatDbMode.PARTIAL
.getVersion())); // for backward compatibility we use partial as
// default
LOG.info("Bonsai flat db mode found {}", flatDbMode);
if (flatDbMode == FlatDbMode.FULL) {
this.flatDbReaderStrategy = new FullFlatDbReaderStrategy(metricsSystem);
} else {
this.flatDbReaderStrategy = new PartialFlatDbReaderStrategy(metricsSystem);
}
}
public Optional<Bytes> getAccount(final Hash accountHash) {
getAccountCounter.inc();
Optional<Bytes> response = accountStorage.get(accountHash.toArrayUnsafe()).map(Bytes::wrap);
if (response.isEmpty()) {
// after a snapsync/fastsync we only have the trie branches.
final Optional<Bytes> worldStateRootHash = getWorldStateRootHash();
if (worldStateRootHash.isPresent()) {
response =
new StoredMerklePatriciaTrie<>(
new StoredNodeFactory<>(
this::getAccountStateTrieNode, Function.identity(), Function.identity()),
Bytes32.wrap(worldStateRootHash.get()))
.get(accountHash);
if (response.isEmpty()) getAccountMissingMerkleTrieCounter.inc();
else getAccountMerkleTrieCounter.inc();
@Override
public DataStorageFormat getDataStorageFormat() {
return DataStorageFormat.BONSAI;
}
} else {
getAccountFlatDatabaseCounter.inc();
@Override
public FlatDbMode getFlatDbMode() {
return flatDbMode;
}
return response;
public FlatDbReaderStrategy getFlatDbReaderStrategy() {
return flatDbReaderStrategy;
}
@Override
public Optional<Bytes> getAccountTrieNodeData(final Bytes location, final Bytes32 hash) {
// for Bonsai trie fast sync this method should return an empty
return Optional.empty();
public Optional<Bytes> getCode(final Bytes32 codeHash, final Hash accountHash) {
if (codeHash.equals(Hash.EMPTY)) {
return Optional.of(Bytes.EMPTY);
} else {
return getFlatDbReaderStrategy().getCode(codeHash, accountHash, codeStorage);
}
}
public Optional<Bytes> getAccount(final Hash accountHash) {
return getFlatDbReaderStrategy()
.getAccount(
this::getWorldStateRootHash,
this::getAccountStateTrieNode,
accountHash,
accountStorage);
}
@Override
@ -204,19 +171,34 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
}
}
@Override
/**
* Retrieves the storage trie node associated with the specified account and location, if
* available.
*
* @param accountHash The hash of the account.
* @param location The location within the storage trie.
* @param maybeNodeHash The optional hash of the storage trie node to validate the retrieved data
* against.
* @return The optional bytes of the storage trie node.
*/
public Optional<Bytes> getAccountStorageTrieNode(
final Hash accountHash, final Bytes location, final Bytes32 nodeHash) {
if (nodeHash.equals(MerkleTrie.EMPTY_TRIE_NODE_HASH)) {
final Hash accountHash, final Bytes location, final Optional<Bytes32> maybeNodeHash) {
if (maybeNodeHash.filter(hash -> hash.equals(MerkleTrie.EMPTY_TRIE_NODE_HASH)).isPresent()) {
return Optional.of(MerkleTrie.EMPTY_TRIE_NODE);
} else {
return trieBranchStorage
.get(Bytes.concatenate(accountHash, location).toArrayUnsafe())
.map(Bytes::wrap)
.filter(b -> Hash.hash(b).equals(nodeHash));
.filter(data -> maybeNodeHash.map(hash -> Hash.hash(data).equals(hash)).orElse(true));
}
}
@Override
public Optional<Bytes> getAccountStorageTrieNode(
final Hash accountHash, final Bytes location, final Bytes32 nodeHash) {
return getAccountStorageTrieNode(accountHash, location, Optional.ofNullable(nodeHash));
}
public Optional<byte[]> getTrieLog(final Hash blockHash) {
return trieLogStorage.get(blockHash.toArrayUnsafe());
}
@ -251,31 +233,28 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
final Supplier<Optional<Hash>> storageRootSupplier,
final Hash accountHash,
final StorageSlotKey storageSlotKey) {
getStorageValueCounter.inc();
Optional<Bytes> response =
storageStorage
.get(Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
.map(Bytes::wrap);
if (response.isEmpty()) {
final Optional<Hash> storageRoot = storageRootSupplier.get();
final Optional<Bytes> worldStateRootHash = getWorldStateRootHash();
if (storageRoot.isPresent() && worldStateRootHash.isPresent()) {
response =
new StoredMerklePatriciaTrie<>(
new StoredNodeFactory<>(
return getFlatDbReaderStrategy()
.getStorageValueByStorageSlotKey(
this::getWorldStateRootHash,
storageRootSupplier,
(location, hash) -> getAccountStorageTrieNode(accountHash, location, hash),
Function.identity(),
Function.identity()),
storageRoot.get())
.get(storageSlotKey.getSlotHash())
.map(bytes -> Bytes32.leftPad(RLP.decodeValue(bytes)));
if (response.isEmpty()) getStorageValueMissingMerkleTrieCounter.inc();
else getStorageValueMerkleTrieCounter.inc();
accountHash,
storageSlotKey,
storageStorage);
}
} else {
getStorageValueFlatDatabaseCounter.inc();
@Override
public Map<Bytes32, Bytes> streamFlatAccounts(
final Bytes startKeyHash, final Bytes32 endKeyHash, final long max) {
return getFlatDbReaderStrategy()
.streamAccountFlatDatabase(accountStorage, startKeyHash, endKeyHash, max);
}
return response;
@Override
public Map<Bytes32, Bytes> streamFlatStorages(
final Hash accountHash, final Bytes startKeyHash, final Bytes32 endKeyHash, final long max) {
return getFlatDbReaderStrategy()
.streamStorageFlatDatabase(storageStorage, accountHash, startKeyHash, endKeyHash, max);
}
@Override
@ -292,14 +271,27 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
.orElse(false);
}
public void upgradeToFullFlatDbMode() {
final KeyValueStorageTransaction transaction = trieBranchStorage.startTransaction();
transaction.put(FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe());
transaction.commit();
initFlatDbStrategy(); // force reload of flat db reader strategy
}
public void downgradeToPartialFlatDbMode() {
final KeyValueStorageTransaction transaction = trieBranchStorage.startTransaction();
transaction.put(FLAT_DB_MODE, FlatDbMode.PARTIAL.getVersion().toArrayUnsafe());
transaction.commit();
initFlatDbStrategy(); // force reload of flat db reader strategy
}
@Override
public void clear() {
subscribers.forEach(BonsaiStorageSubscriber::onClearStorage);
accountStorage.clear();
codeStorage.clear();
storageStorage.clear();
getFlatDbReaderStrategy().clearAll(accountStorage, storageStorage, codeStorage);
trieBranchStorage.clear();
trieLogStorage.clear();
flatDbReaderStrategy = null; // force reload of flat db reader strategy
}
@Override
@ -311,8 +303,7 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
@Override
public void clearFlatDatabase() {
subscribers.forEach(BonsaiStorageSubscriber::onClearFlatDatabaseStorage);
accountStorage.clear();
storageStorage.clear();
getFlatDbReaderStrategy().resetOnResync(accountStorage, storageStorage);
}
@Override

@ -16,6 +16,7 @@
package org.hyperledger.besu.ethereum.bonsai.storage;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage.BonsaiStorageSubscriber;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SnappedKeyValueStorage;
@ -53,6 +54,11 @@ public class BonsaiWorldStateLayerStorage extends BonsaiSnapshotWorldStateKeyVal
metricsSystem);
}
@Override
public FlatDbMode getFlatDbMode() {
return parentWorldStateStorage.getFlatDbMode();
}
@Override
public BonsaiWorldStateLayerStorage clone() {
return new BonsaiWorldStateLayerStorage(

@ -0,0 +1,174 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.bonsai.storage.flat;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.StorageSlotKey;
import org.hyperledger.besu.ethereum.trie.NodeLoader;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import java.util.Map;
import java.util.Optional;
import java.util.TreeMap;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import kotlin.Pair;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.rlp.RLP;
/**
* This class represents a FlatDbReaderStrategy, which is responsible for reading data from flat
* databases. It implements various methods for retrieving account data, code data, and storage data
* from the corresponding KeyValueStorage.
*/
public abstract class FlatDbReaderStrategy {
protected final MetricsSystem metricsSystem;
protected final Counter getAccountCounter;
protected final Counter getAccountFoundInFlatDatabaseCounter;
protected final Counter getStorageValueCounter;
protected final Counter getStorageValueFlatDatabaseCounter;
public FlatDbReaderStrategy(final MetricsSystem metricsSystem) {
this.metricsSystem = metricsSystem;
getAccountCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_total",
"Total number of calls to getAccount");
getAccountFoundInFlatDatabaseCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_flat_database",
"Number of accounts found in the flat database");
getStorageValueCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_total",
"Total number of calls to getStorageValueBySlotHash");
getStorageValueFlatDatabaseCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_flat_database",
"Number of storage slots found in the flat database");
}
/*
* Retrieves the account data for the given account hash, using the world state root hash supplier and node loader.
*/
public abstract Optional<Bytes> getAccount(
Supplier<Optional<Bytes>> worldStateRootHashSupplier,
NodeLoader nodeLoader,
Hash accountHash,
KeyValueStorage accountStorage);
/*
* Retrieves the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
*/
public abstract Optional<Bytes> getStorageValueByStorageSlotKey(
Supplier<Optional<Bytes>> worldStateRootHashSupplier,
Supplier<Optional<Hash>> storageRootSupplier,
NodeLoader nodeLoader,
Hash accountHash,
StorageSlotKey storageSlotKey,
KeyValueStorage storageStorage);
/*
* Retrieves the code data for the given code hash and account hash.
*/
public Optional<Bytes> getCode(
final Bytes32 codeHash, final Hash accountHash, final KeyValueStorage codeStorage) {
if (codeHash.equals(Hash.EMPTY)) {
return Optional.of(Bytes.EMPTY);
} else {
return codeStorage
.get(accountHash.toArrayUnsafe())
.map(Bytes::wrap)
.filter(b -> Hash.hash(b).equals(codeHash));
}
}
public void clearAll(
final KeyValueStorage accountStorage,
final KeyValueStorage storageStorage,
final KeyValueStorage codeStorage) {
accountStorage.clear();
storageStorage.clear();
codeStorage.clear();
}
public void resetOnResync(
final KeyValueStorage accountStorage, final KeyValueStorage storageStorage) {
accountStorage.clear();
storageStorage.clear();
}
public Map<Bytes32, Bytes> streamAccountFlatDatabase(
final KeyValueStorage accountStorage,
final Bytes startKeyHash,
final Bytes32 endKeyHash,
final long max) {
final Stream<Pair<Bytes32, Bytes>> pairStream =
accountStorage
.streamFromKey(startKeyHash.toArrayUnsafe())
.limit(max)
.map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue())))
.takeWhile(pair -> pair.getFirst().compareTo(endKeyHash) <= 0);
final TreeMap<Bytes32, Bytes> collected =
pairStream.collect(
Collectors.toMap(Pair::getFirst, Pair::getSecond, (v1, v2) -> v1, TreeMap::new));
pairStream.close();
return collected;
}
public Map<Bytes32, Bytes> streamStorageFlatDatabase(
final KeyValueStorage storageStorage,
final Hash accountHash,
final Bytes startKeyHash,
final Bytes32 endKeyHash,
final long max) {
final Stream<Pair<Bytes32, Bytes>> pairStream =
storageStorage
.streamFromKey(Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe())
.takeWhile(pair -> Bytes.wrap(pair.getKey()).slice(0, Hash.SIZE).equals(accountHash))
.limit(max)
.map(
pair ->
new Pair<>(
Bytes32.wrap(Bytes.wrap(pair.getKey()).slice(Hash.SIZE)),
RLP.encodeValue(Bytes.wrap(pair.getValue()).trimLeadingZeros())))
.takeWhile(pair -> pair.getFirst().compareTo(endKeyHash) <= 0);
final TreeMap<Bytes32, Bytes> collected =
pairStream.collect(
Collectors.toMap(Pair::getFirst, Pair::getSecond, (v1, v2) -> v1, TreeMap::new));
pairStream.close();
return collected;
}
}

@ -0,0 +1,98 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.bonsai.storage.flat;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.StorageSlotKey;
import org.hyperledger.besu.ethereum.trie.NodeLoader;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import java.util.Optional;
import java.util.function.Supplier;
import org.apache.tuweni.bytes.Bytes;
public class FullFlatDbReaderStrategy extends FlatDbReaderStrategy {
protected final Counter getAccountNotFoundInFlatDatabaseCounter;
protected final Counter getStorageValueNotFoundInFlatDatabaseCounter;
public FullFlatDbReaderStrategy(final MetricsSystem metricsSystem) {
super(metricsSystem);
getAccountNotFoundInFlatDatabaseCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_missing_flat_database",
"Number of accounts not found in the flat database");
getStorageValueNotFoundInFlatDatabaseCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_missing_flat_database",
"Number of storage slots not found in the flat database");
}
@Override
public Optional<Bytes> getAccount(
final Supplier<Optional<Bytes>> worldStateRootHashSupplier,
final NodeLoader nodeLoader,
final Hash accountHash,
final KeyValueStorage accountStorage) {
getAccountCounter.inc();
final Optional<Bytes> accountFound =
accountStorage.get(accountHash.toArrayUnsafe()).map(Bytes::wrap);
if (accountFound.isPresent()) {
getAccountFoundInFlatDatabaseCounter.inc();
} else {
getAccountNotFoundInFlatDatabaseCounter.inc();
}
return accountFound;
}
@Override
public Optional<Bytes> getStorageValueByStorageSlotKey(
final Supplier<Optional<Bytes>> worldStateRootHashSupplier,
final Supplier<Optional<Hash>> storageRootSupplier,
final NodeLoader nodeLoader,
final Hash accountHash,
final StorageSlotKey storageSlotKey,
final KeyValueStorage storageStorage) {
getStorageValueCounter.inc();
final Optional<Bytes> storageFound =
storageStorage
.get(Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
.map(Bytes::wrap);
if (storageFound.isPresent()) {
getStorageValueFlatDatabaseCounter.inc();
} else {
getStorageValueNotFoundInFlatDatabaseCounter.inc();
}
return storageFound;
}
@Override
public void resetOnResync(
final KeyValueStorage accountStorage, final KeyValueStorage storageStorage) {
// NOOP
// not need to reset anything in full mode
}
}

@ -0,0 +1,140 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.bonsai.storage.flat;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.StorageSlotKey;
import org.hyperledger.besu.ethereum.trie.NodeLoader;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.trie.patricia.StoredNodeFactory;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Supplier;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.rlp.RLP;
/**
* This class represents a strategy for reading data from a partial flat database. It extends the
* FlatDbReaderStrategy and provides additional functionality for reading data from a merkle trie.
* If data is missing in the flat database, this strategy falls back to the merkle trie to retrieve
* the data. It adds a fallback mechanism for the `getAccount` and `getStorageValueByStorageSlotKey`
* methods, which checks if the data is present in the flat database, and if not, queries the merkle
* trie
*/
public class PartialFlatDbReaderStrategy extends FlatDbReaderStrategy {
protected final Counter getAccountMerkleTrieCounter;
protected final Counter getAccountMissingMerkleTrieCounter;
protected final Counter getStorageValueMerkleTrieCounter;
protected final Counter getStorageValueMissingMerkleTrieCounter;
public PartialFlatDbReaderStrategy(final MetricsSystem metricsSystem) {
super(metricsSystem);
getAccountMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_merkle_trie",
"Number of accounts not found in the flat database, but found in the merkle trie");
getAccountMissingMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_account_missing_merkle_trie",
"Number of accounts not found (either in the flat database or the merkle trie)");
getStorageValueMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_merkle_trie",
"Number of storage slots not found in the flat database, but found in the merkle trie");
getStorageValueMissingMerkleTrieCounter =
metricsSystem.createCounter(
BesuMetricCategory.BLOCKCHAIN,
"get_storagevalue_missing_merkle_trie",
"Number of storage slots not found (either in the flat database or in the merkle trie)");
}
@Override
public Optional<Bytes> getAccount(
final Supplier<Optional<Bytes>> worldStateRootHashSupplier,
final NodeLoader nodeLoader,
final Hash accountHash,
final KeyValueStorage accountStorage) {
getAccountCounter.inc();
Optional<Bytes> response = accountStorage.get(accountHash.toArrayUnsafe()).map(Bytes::wrap);
if (response.isEmpty()) {
// after a snapsync/fastsync we only have the trie branches.
final Optional<Bytes> worldStateRootHash = worldStateRootHashSupplier.get();
if (worldStateRootHash.isPresent()) {
response =
new StoredMerklePatriciaTrie<>(
new StoredNodeFactory<>(nodeLoader, Function.identity(), Function.identity()),
Bytes32.wrap(worldStateRootHash.get()))
.get(accountHash);
if (response.isEmpty()) {
getAccountMissingMerkleTrieCounter.inc();
} else {
getAccountMerkleTrieCounter.inc();
}
}
} else {
getAccountFoundInFlatDatabaseCounter.inc();
}
return response;
}
@Override
public Optional<Bytes> getStorageValueByStorageSlotKey(
final Supplier<Optional<Bytes>> worldStateRootHashSupplier,
final Supplier<Optional<Hash>> storageRootSupplier,
final NodeLoader nodeLoader,
final Hash accountHash,
final StorageSlotKey storageSlotKey,
final KeyValueStorage storageStorage) {
getStorageValueCounter.inc();
Optional<Bytes> response =
storageStorage
.get(Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
.map(Bytes::wrap);
if (response.isEmpty()) {
final Optional<Hash> storageRoot = storageRootSupplier.get();
final Optional<Bytes> worldStateRootHash = worldStateRootHashSupplier.get();
if (storageRoot.isPresent() && worldStateRootHash.isPresent()) {
response =
new StoredMerklePatriciaTrie<>(
new StoredNodeFactory<>(nodeLoader, Function.identity(), Function.identity()),
storageRoot.get())
.get(storageSlotKey.getSlotHash())
.map(bytes -> Bytes32.leftPad(RLP.decodeValue(bytes)));
if (response.isEmpty()) getStorageValueMissingMerkleTrieCounter.inc();
else getStorageValueMerkleTrieCounter.inc();
}
} else {
getStorageValueFlatDatabaseCounter.inc();
}
return response;
}
}

@ -42,6 +42,10 @@ import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.units.bigints.UInt256;
/**
* The WorldStateProofProvider class is responsible for providing proofs for world state entries. It
* interacts with the underlying storage and trie data structures to generate proofs.
*/
public class WorldStateProofProvider {
private final WorldStateStorage worldStateStorage;
@ -87,13 +91,35 @@ public class WorldStateProofProvider {
return storageProofs;
}
/**
* Retrieves the proof-related nodes for an account in the specified world state.
*
* @param worldStateRoot The root hash of the world state.
* @param accountHash The hash of the account.
* @return A list of proof-related nodes for the account.
*/
public List<Bytes> getAccountProofRelatedNodes(
final Hash worldStateRoot, final Bytes accountHash) {
final Hash worldStateRoot, final Bytes32 accountHash) {
final Proof<Bytes> accountProof =
newAccountStateTrie(worldStateRoot).getValueWithProof(accountHash);
return accountProof.getProofRelatedNodes();
}
/**
* Retrieves the proof-related nodes for a storage slot in the specified account storage trie.
*
* @param storageRoot The root hash of the account storage trie.
* @param accountHash The hash of the account.
* @param slotHash The hash of the storage slot.
* @return A list of proof-related nodes for the storage slot.
*/
public List<Bytes> getStorageProofRelatedNodes(
final Bytes32 storageRoot, final Bytes32 accountHash, final Bytes32 slotHash) {
final Proof<Bytes> storageProof =
newAccountStorageTrie(Hash.wrap(accountHash), storageRoot).getValueWithProof(slotHash);
return storageProof.getProofRelatedNodes();
}
private MerkleTrie<Bytes, Bytes> newAccountStateTrie(final Bytes32 rootHash) {
return new StoredMerklePatriciaTrie<>(
worldStateStorage::getAccountStateTrieNode, rootHash, b -> b, b -> b);
@ -109,6 +135,16 @@ public class WorldStateProofProvider {
b -> b);
}
/**
* Checks if a range proof is valid for a given range of keys.
*
* @param startKeyHash The hash of the starting key in the range.
* @param endKeyHash The hash of the ending key in the range.
* @param rootHash The root hash of the Merkle Trie.
* @param proofs The list of proofs for the keys in the range.
* @param keys The TreeMap of key-value pairs representing the range.
* @return {@code true} if the range proof is valid, {@code false} otherwise.
*/
public boolean isValidRangeProof(
final Bytes32 startKeyHash,
final Bytes32 endKeyHash,

@ -16,6 +16,8 @@ package org.hyperledger.besu.ethereum.storage.keyvalue;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
@ -43,6 +45,11 @@ public class WorldStateKeyValueStorage implements WorldStateStorage {
this.keyValueStorage = keyValueStorage;
}
@Override
public DataStorageFormat getDataStorageFormat() {
return DataStorageFormat.FOREST;
}
@Override
public Optional<Bytes> getCode(final Bytes32 codeHash, final Hash accountHash) {
if (codeHash.equals(Hash.EMPTY)) {
@ -52,11 +59,6 @@ public class WorldStateKeyValueStorage implements WorldStateStorage {
}
}
@Override
public Optional<Bytes> getAccountTrieNodeData(final Bytes location, final Bytes32 hash) {
return getAccountStateTrieNode(null, hash);
}
@Override
public Optional<Bytes> getAccountStateTrieNode(final Bytes location, final Bytes32 nodeHash) {
return getTrieNode(nodeHash);
@ -76,6 +78,11 @@ public class WorldStateKeyValueStorage implements WorldStateStorage {
}
}
@Override
public FlatDbMode getFlatDbMode() {
return FlatDbMode.NO_FLATTENED;
}
@Override
public Optional<Bytes> getNodeData(final Bytes location, final Bytes32 hash) {
if (hash.equals(MerkleTrie.EMPTY_TRIE_NODE_HASH)) {

@ -0,0 +1,57 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.worldstate;
import java.util.stream.Stream;
import org.apache.tuweni.bytes.Bytes;
/**
* The FlatDbMode enum represents the different modes of the flat database. It has two modes:
* PARTIAL and FULL.
*
* <p>- PARTIAL: Not all the leaves are present inside the flat database. The trie serves as a
* fallback to retrieve missing data. The PARTIAL mode is primarily used for backward compatibility
* purposes, where the flat database may not have all the required data, and the trie is utilized to
* fill in the gaps.
*
* <p>- FULL: The flat database contains the complete representation of the world state, and there
* is no need for a fallback mechanism. The FULL mode represents a fully synchronized state where
* the flat database encompasses all the necessary data.
*/
public enum FlatDbMode {
NO_FLATTENED(Bytes.EMPTY),
PARTIAL(Bytes.of(0x00)),
FULL(Bytes.of(0x01));
final Bytes version;
FlatDbMode(final Bytes version) {
this.version = version;
}
public Bytes getVersion() {
return version;
}
public static FlatDbMode fromVersion(final Bytes version) {
return Stream.of(FlatDbMode.values())
.filter(mode -> mode.getVersion().equals(version))
.findFirst()
.orElseThrow(
() -> new IllegalArgumentException("Unknown flat DB mode version: " + version));
}
}

@ -17,6 +17,8 @@ package org.hyperledger.besu.ethereum.worldstate;
import org.hyperledger.besu.datatypes.Hash;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.function.Predicate;
@ -27,14 +29,14 @@ public interface WorldStateStorage {
Optional<Bytes> getCode(Bytes32 codeHash, Hash accountHash);
Optional<Bytes> getAccountTrieNodeData(Bytes location, Bytes32 hash);
Optional<Bytes> getAccountStateTrieNode(Bytes location, Bytes32 nodeHash);
Optional<Bytes> getAccountStorageTrieNode(Hash accountHash, Bytes location, Bytes32 nodeHash);
Optional<Bytes> getNodeData(Bytes location, Bytes32 hash);
FlatDbMode getFlatDbMode();
boolean isWorldStateAvailable(Bytes32 rootHash, Hash blockHash);
default boolean contains(final Bytes32 hash) {
@ -42,6 +44,35 @@ public interface WorldStateStorage {
return getNodeData(null, hash).isPresent();
}
/**
* Streams flat accounts within a specified range.
*
* @param startKeyHash The start key hash of the range.
* @param endKeyHash The end key hash of the range.
* @param max The maximum number of entries to stream.
* @return A map of flat accounts. (Empty map in this default implementation)
*/
default Map<Bytes32, Bytes> streamFlatAccounts(
final Bytes startKeyHash, final Bytes32 endKeyHash, final long max) {
return Collections.emptyMap();
}
/**
* Streams flat storages within a specified range.
*
* @param accountHash The account hash.
* @param startKeyHash The start key hash of the range.
* @param endKeyHash The end key hash of the range.
* @param max The maximum number of entries to stream.
* @return A map of flat storages. (Empty map in this default implementation)
*/
default Map<Bytes32, Bytes> streamFlatStorages(
final Hash accountHash, final Bytes startKeyHash, final Bytes32 endKeyHash, final long max) {
return Collections.emptyMap();
}
DataStorageFormat getDataStorageFormat();
void clear();
void clearTrieLog();

@ -32,35 +32,61 @@ import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.StorageEntriesCollector;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import java.util.Arrays;
import java.util.Collection;
import java.util.Optional;
import java.util.TreeMap;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.units.bigints.UInt256;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mockito.Mockito;
@RunWith(Parameterized.class)
public class BonsaiWorldStateKeyValueStorageTest {
private final FlatDbMode flatDbMode;
public BonsaiWorldStateKeyValueStorageTest(final FlatDbMode flatDbMode) {
this.flatDbMode = flatDbMode;
}
@Parameterized.Parameters
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {{FlatDbMode.FULL}, {FlatDbMode.PARTIAL}});
}
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
@Before
public void setUp() {
if (flatDbMode.equals(FlatDbMode.FULL)) {
storage.upgradeToFullFlatDbMode();
}
}
@Test
public void getCode_returnsEmpty() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
assertThat(storage.getCode(Hash.EMPTY, Hash.EMPTY)).contains(Bytes.EMPTY);
}
@Test
public void getAccountStateTrieNode_returnsEmptyNode() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
assertThat(storage.getAccountStateTrieNode(Bytes.EMPTY, MerkleTrie.EMPTY_TRIE_NODE_HASH))
.contains(MerkleTrie.EMPTY_TRIE_NODE);
}
@Test
public void getAccountStorageTrieNode_returnsEmptyNode() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
assertThat(
storage.getAccountStorageTrieNode(
Hash.EMPTY, Bytes.EMPTY, MerkleTrie.EMPTY_TRIE_NODE_HASH))
@ -69,19 +95,16 @@ public class BonsaiWorldStateKeyValueStorageTest {
@Test
public void getNodeData_returnsEmptyValue() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
assertThat(storage.getNodeData(null, null)).isEmpty();
}
@Test
public void getNodeData_returnsEmptyNode() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
assertThat(storage.getNodeData(Bytes.EMPTY, MerkleTrie.EMPTY_TRIE_NODE_HASH)).isEmpty();
}
@Test
public void getCode_saveAndGetSpecialValues() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
storage
.updater()
.putCode(Hash.EMPTY, MerkleTrie.EMPTY_TRIE_NODE)
@ -95,7 +118,6 @@ public class BonsaiWorldStateKeyValueStorageTest {
@Test
public void getCode_saveAndGetRegularValue() {
final Bytes bytes = Bytes.fromHexString("0x123456");
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
storage.updater().putCode(Hash.EMPTY, bytes).commit();
assertThat(storage.getCode(Hash.hash(bytes), Hash.EMPTY)).contains(bytes);
@ -103,7 +125,6 @@ public class BonsaiWorldStateKeyValueStorageTest {
@Test
public void getAccountStateTrieNode_saveAndGetSpecialValues() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
storage
.updater()
.putAccountStateTrieNode(
@ -120,7 +141,7 @@ public class BonsaiWorldStateKeyValueStorageTest {
public void getAccountStateTrieNode_saveAndGetRegularValue() {
final Bytes location = Bytes.fromHexString("0x01");
final Bytes bytes = Bytes.fromHexString("0x123456");
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
storage.updater().putAccountStateTrieNode(location, Hash.hash(bytes), bytes).commit();
assertThat(storage.getAccountStateTrieNode(location, Hash.hash(bytes))).contains(bytes);
@ -128,7 +149,7 @@ public class BonsaiWorldStateKeyValueStorageTest {
@Test
public void getAccountStorageTrieNode_saveAndGetSpecialValues() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
storage
.updater()
.putAccountStorageTrieNode(
@ -152,7 +173,7 @@ public class BonsaiWorldStateKeyValueStorageTest {
final Hash accountHash = Hash.hash(Address.fromHexString("0x1"));
final Bytes location = Bytes.fromHexString("0x01");
final Bytes bytes = Bytes.fromHexString("0x123456");
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
storage
.updater()
.putAccountStorageTrieNode(accountHash, location, Hash.hash(bytes), bytes)
@ -163,9 +184,40 @@ public class BonsaiWorldStateKeyValueStorageTest {
}
@Test
public void getAccount_loadFromTrieWhenEmpty() {
public void getAccount_notLoadFromTrieWhenEmptyAndFlatDbFullMode() {
Assume.assumeTrue(flatDbMode == FlatDbMode.FULL);
final BonsaiWorldStateKeyValueStorage storage = spy(emptyStorage());
final MerkleTrie<Bytes, Bytes> trie = TrieGenerator.generateTrie(storage, 1);
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
trie.entriesFrom(root -> StorageEntriesCollector.collectEntries(root, Hash.ZERO, 1));
// save world state root hash
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
updater
.getTrieBranchStorageTransaction()
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
updater.commit();
// remove flat database
storage.clearFlatDatabase();
storage.upgradeToFullFlatDbMode();
Mockito.reset(storage);
assertThat(storage.getAccount(Hash.wrap(accounts.firstKey()))).isEmpty();
verify(storage, times(0)).getAccountStateTrieNode(any(), eq(trie.getRootHash()));
}
@Test
public void getAccount_loadFromTrieWhenEmptyAndFlatDbPartialMode() {
Assume.assumeTrue(flatDbMode == FlatDbMode.PARTIAL);
final BonsaiWorldStateKeyValueStorage storage = spy(emptyStorage());
MerkleTrie<Bytes, Bytes> trie = TrieGenerator.generateTrie(storage, 1);
final MerkleTrie<Bytes, Bytes> trie = TrieGenerator.generateTrie(storage, 1);
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
trie.entriesFrom(root -> StorageEntriesCollector.collectEntries(root, Hash.ZERO, 1));
@ -180,14 +232,47 @@ public class BonsaiWorldStateKeyValueStorageTest {
// remove flat database
storage.clearFlatDatabase();
Mockito.reset(storage);
assertThat(storage.getAccount(Hash.wrap(accounts.firstKey())))
.contains(accounts.firstEntry().getValue());
verify(storage, times(2)).getAccountStateTrieNode(any(), eq(trie.getRootHash()));
verify(storage, times(1)).getAccountStateTrieNode(any(), eq(trie.getRootHash()));
}
@Test
public void shouldUsePartialDBStrategyAfterDowngradingMode() {
Assume.assumeTrue(flatDbMode == FlatDbMode.PARTIAL);
final BonsaiWorldStateKeyValueStorage storage = spy(emptyStorage());
final MerkleTrie<Bytes, Bytes> trie = TrieGenerator.generateTrie(storage, 1);
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
trie.entriesFrom(root -> StorageEntriesCollector.collectEntries(root, Hash.ZERO, 1));
// save world state root hash
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
updater
.getTrieBranchStorageTransaction()
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
updater.commit();
Mockito.reset(storage);
// remove flat database
storage.clearFlatDatabase();
storage.upgradeToFullFlatDbMode();
assertThat(storage.getAccount(Hash.wrap(accounts.firstKey()))).isEmpty();
storage.downgradeToPartialFlatDbMode();
assertThat(storage.getAccount(Hash.wrap(accounts.firstKey())))
.contains(accounts.firstEntry().getValue());
}
@Test
public void getStorage_loadFromTrieWhenEmpty() {
public void getStorage_loadFromTrieWhenEmptyWithPartialMode() {
Assume.assumeTrue(flatDbMode == FlatDbMode.PARTIAL);
final BonsaiWorldStateKeyValueStorage storage = spy(emptyStorage());
final MerkleTrie<Bytes, Bytes> trie = TrieGenerator.generateTrie(storage, 1);
final TreeMap<Bytes32, Bytes> accounts =
@ -232,6 +317,24 @@ public class BonsaiWorldStateKeyValueStorageTest {
eq(Hash.wrap(accounts.firstKey())), any(), eq(storageTrie.getRootHash()));
}
@Test
public void getStorage_loadFromTrieWhenEmptyWithFullMode() {
Assume.assumeTrue(flatDbMode == FlatDbMode.FULL);
final BonsaiWorldStateKeyValueStorage storage = spy(emptyStorage());
storage.upgradeToFullFlatDbMode();
final MerkleTrie<Bytes, Bytes> trie = TrieGenerator.generateTrie(storage, 1);
// save world state root hash
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
updater
.getTrieBranchStorageTransaction()
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
updater.commit();
// remove flat database
storage.clearFlatDatabase();
}
@Test
public void reconcilesNonConflictingUpdaters() {
final Hash accountHashA = Hash.hash(Address.fromHexString("0x1"));
@ -241,7 +344,6 @@ public class BonsaiWorldStateKeyValueStorageTest {
final Bytes bytesB = Bytes.fromHexString("0x1234");
final Bytes bytesC = Bytes.fromHexString("0x123456");
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updaterA = storage.updater();
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updaterB = storage.updater();
@ -265,7 +367,6 @@ public class BonsaiWorldStateKeyValueStorageTest {
@Test
public void isWorldStateAvailable_StateAvailableByRootHash() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
final Bytes rootHashKey = Bytes32.fromHexString("0x01");
updater.getTrieBranchStorageTransaction().put(WORLD_ROOT_HASH_KEY, rootHashKey.toArrayUnsafe());
@ -277,7 +378,6 @@ public class BonsaiWorldStateKeyValueStorageTest {
@Test
public void isWorldStateAvailable_afterCallingSaveWorldstate() {
final BonsaiWorldStateKeyValueStorage storage = emptyStorage();
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
final Bytes blockHash = Bytes32.fromHexString("0x01");

@ -57,12 +57,15 @@ import org.mockito.junit.MockitoJUnitRunner;
public class LogRollingTests {
private BonsaiWorldStateProvider archive;
private InMemoryKeyValueStorageProvider provider;
private InMemoryKeyValueStorage accountStorage;
private InMemoryKeyValueStorage codeStorage;
private InMemoryKeyValueStorage storageStorage;
private InMemoryKeyValueStorage trieBranchStorage;
private InMemoryKeyValueStorage trieLogStorage;
private InMemoryKeyValueStorageProvider secondProvider;
private BonsaiWorldStateProvider secondArchive;
private InMemoryKeyValueStorage secondAccountStorage;
private InMemoryKeyValueStorage secondCodeStorage;
@ -121,7 +124,7 @@ public class LogRollingTests {
@Before
public void createStorage() {
final InMemoryKeyValueStorageProvider provider = new InMemoryKeyValueStorageProvider();
provider = new InMemoryKeyValueStorageProvider();
final CachedMerkleTrieLoader cachedMerkleTrieLoader =
new CachedMerkleTrieLoader(new NoOpMetricsSystem());
archive =
@ -144,7 +147,7 @@ public class LogRollingTests {
(InMemoryKeyValueStorage)
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
final InMemoryKeyValueStorageProvider secondProvider = new InMemoryKeyValueStorageProvider();
secondProvider = new InMemoryKeyValueStorageProvider();
final CachedMerkleTrieLoader secondOptimizedMerkleTrieLoader =
new CachedMerkleTrieLoader(new NoOpMetricsSystem());
secondArchive =
@ -180,14 +183,7 @@ public class LogRollingTests {
final BonsaiWorldState worldState =
new BonsaiWorldState(
archive,
new BonsaiWorldStateKeyValueStorage(
accountStorage,
codeStorage,
storageStorage,
trieBranchStorage,
trieLogStorage,
new NoOpMetricsSystem()));
archive, new BonsaiWorldStateKeyValueStorage(provider, new NoOpMetricsSystem()));
final WorldUpdater updater = worldState.updater();
final MutableAccount mutableAccount =
@ -200,13 +196,7 @@ public class LogRollingTests {
final BonsaiWorldState secondWorldState =
new BonsaiWorldState(
secondArchive,
new BonsaiWorldStateKeyValueStorage(
secondAccountStorage,
secondCodeStorage,
secondStorageStorage,
secondTrieBranchStorage,
secondTrieLogStorage,
new NoOpMetricsSystem()));
new BonsaiWorldStateKeyValueStorage(secondProvider, new NoOpMetricsSystem()));
final BonsaiWorldStateUpdateAccumulator secondUpdater =
(BonsaiWorldStateUpdateAccumulator) secondWorldState.updater();
@ -235,14 +225,7 @@ public class LogRollingTests {
public void rollForwardTwice() {
final BonsaiWorldState worldState =
new BonsaiWorldState(
archive,
new BonsaiWorldStateKeyValueStorage(
accountStorage,
codeStorage,
storageStorage,
trieBranchStorage,
trieLogStorage,
new NoOpMetricsSystem()));
archive, new BonsaiWorldStateKeyValueStorage(provider, new NoOpMetricsSystem()));
final WorldUpdater updater = worldState.updater();
final MutableAccount mutableAccount =
@ -263,13 +246,7 @@ public class LogRollingTests {
final BonsaiWorldState secondWorldState =
new BonsaiWorldState(
secondArchive,
new BonsaiWorldStateKeyValueStorage(
secondAccountStorage,
secondCodeStorage,
secondStorageStorage,
secondTrieBranchStorage,
secondTrieLogStorage,
new NoOpMetricsSystem()));
new BonsaiWorldStateKeyValueStorage(secondProvider, new NoOpMetricsSystem()));
final BonsaiWorldStateUpdateAccumulator secondUpdater =
(BonsaiWorldStateUpdateAccumulator) secondWorldState.updater();
@ -299,14 +276,7 @@ public class LogRollingTests {
public void rollBackOnce() {
final BonsaiWorldState worldState =
new BonsaiWorldState(
archive,
new BonsaiWorldStateKeyValueStorage(
accountStorage,
codeStorage,
storageStorage,
trieBranchStorage,
trieLogStorage,
new NoOpMetricsSystem()));
archive, new BonsaiWorldStateKeyValueStorage(provider, new NoOpMetricsSystem()));
final WorldUpdater updater = worldState.updater();
final MutableAccount mutableAccount =
@ -334,13 +304,7 @@ public class LogRollingTests {
final BonsaiWorldState secondWorldState =
new BonsaiWorldState(
secondArchive,
new BonsaiWorldStateKeyValueStorage(
secondAccountStorage,
secondCodeStorage,
secondStorageStorage,
secondTrieBranchStorage,
secondTrieLogStorage,
new NoOpMetricsSystem()));
new BonsaiWorldStateKeyValueStorage(secondProvider, new NoOpMetricsSystem()));
final WorldUpdater secondUpdater = secondWorldState.updater();
final MutableAccount secondMutableAccount =

@ -50,6 +50,9 @@ public class RollingImport {
final BonsaiWorldStateProvider archive =
new BonsaiWorldStateProvider(
provider, null, cachedMerkleTrieLoader, new NoOpMetricsSystem(), null);
final BonsaiWorldState bonsaiState =
new BonsaiWorldState(
archive, new BonsaiWorldStateKeyValueStorage(provider, new NoOpMetricsSystem()));
final InMemoryKeyValueStorage accountStorage =
(InMemoryKeyValueStorage)
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
@ -66,16 +69,6 @@ public class RollingImport {
final InMemoryKeyValueStorage trieLogStorage =
(InMemoryKeyValueStorage)
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
final BonsaiWorldState bonsaiState =
new BonsaiWorldState(
archive,
new BonsaiWorldStateKeyValueStorage(
accountStorage,
codeStorage,
storageStorage,
trieBranchStorage,
trieLogStorage,
new NoOpMetricsSystem()));
int count = 0;
while (!reader.isDone()) {

@ -30,7 +30,7 @@ import org.hyperledger.besu.ethereum.eth.sync.fastsync.worldstate.FastDownloader
import org.hyperledger.besu.ethereum.eth.sync.fullsync.FullSyncDownloader;
import org.hyperledger.besu.ethereum.eth.sync.fullsync.SyncTerminationCondition;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapDownloaderFactory;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapPersistedContext;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.context.SnapSyncStatePersistenceManager;
import org.hyperledger.besu.ethereum.eth.sync.state.PendingBlocksManager;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
@ -146,7 +146,7 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
this.fastSyncFactory =
() ->
CheckpointDownloaderFactory.createCheckpointDownloader(
new SnapPersistedContext(storageProvider),
new SnapSyncStatePersistenceManager(storageProvider),
pivotBlockSelector,
syncConfig,
dataDirectory,
@ -161,7 +161,7 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
this.fastSyncFactory =
() ->
SnapDownloaderFactory.createSnapDownloader(
new SnapPersistedContext(storageProvider),
new SnapSyncStatePersistenceManager(storageProvider),
pivotBlockSelector,
syncConfig,
dataDirectory,

@ -26,15 +26,16 @@ import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncDownloader;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncState;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncStateStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapDownloaderFactory;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapPersistedContext;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncDownloader;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldStateDownloader;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.context.SnapSyncStatePersistenceManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ScheduleBasedBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.services.tasks.InMemoryTasksPriorityQueues;
@ -50,8 +51,9 @@ public class CheckpointDownloaderFactory extends SnapDownloaderFactory {
private static final Logger LOG = LoggerFactory.getLogger(CheckpointDownloaderFactory.class);
@SuppressWarnings("UnusedVariable")
public static Optional<FastSyncDownloader<?>> createCheckpointDownloader(
final SnapPersistedContext snapContext,
final SnapSyncStatePersistenceManager snapContext,
final PivotBlockSelector pivotBlockSelector,
final SynchronizerConfiguration syncConfig,
final Path dataDirectory,
@ -85,7 +87,10 @@ public class CheckpointDownloaderFactory extends SnapDownloaderFactory {
snapContext.clear();
syncState
.getAccountToRepair()
.ifPresent(address -> snapContext.addInconsistentAccount(Hash.hash(address)));
.ifPresent(
address ->
snapContext.addAccountsToBeRepaired(
CompactEncoding.bytesToPath(Hash.hash(address))));
} else if (fastSyncState.getPivotBlockHeader().isEmpty()
&& protocolContext.getBlockchain().getChainHeadBlockNumber()
!= BlockHeader.GENESIS_BLOCK_NUMBER) {
@ -126,8 +131,8 @@ public class CheckpointDownloaderFactory extends SnapDownloaderFactory {
metricsSystem);
}
final SnapSyncState snapSyncState =
new SnapSyncState(
final SnapSyncProcessState snapSyncState =
new SnapSyncProcessState(
fastSyncStateStorage.loadState(
ScheduleBasedBlockHeaderFunctions.create(protocolSchedule)));

@ -16,12 +16,12 @@ package org.hyperledger.besu.ethereum.eth.sync.fastsync;
import static org.hyperledger.besu.util.FutureUtils.exceptionallyCompose;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.manager.exceptions.MaxRetriesReachedException;
import org.hyperledger.besu.ethereum.eth.sync.ChainDownloader;
import org.hyperledger.besu.ethereum.eth.sync.TrailingPeerRequirements;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.StalledDownloadException;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.services.tasks.TaskCollection;
import org.hyperledger.besu.util.ExceptionUtils;
@ -82,7 +82,7 @@ public class FastSyncDownloader<REQUEST> {
}
protected CompletableFuture<FastSyncState> start(final FastSyncState fastSyncState) {
if (worldStateStorage instanceof BonsaiWorldStateKeyValueStorage) {
if (worldStateStorage.getDataStorageFormat().equals(DataStorageFormat.BONSAI)) {
LOG.info("Clearing bonsai flat account db");
worldStateStorage.clearFlatDatabase();
worldStateStorage.clearTrieLog();

@ -20,6 +20,7 @@ import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.rlp.RLPOutput;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage.Updater;
@ -65,7 +66,6 @@ class AccountTrieNodeDataRequest extends TrieNodeDataRequest {
final Bytes value) {
final Stream.Builder<NodeDataRequest> builder = Stream.builder();
final StateTrieAccountValue accountValue = StateTrieAccountValue.readFrom(RLP.input(value));
// Add code, if appropriate
final Optional<Hash> accountHash =
Optional.of(
@ -73,12 +73,13 @@ class AccountTrieNodeDataRequest extends TrieNodeDataRequest {
Bytes32.wrap(
CompactEncoding.pathToBytes(
Bytes.concatenate(getLocation().orElse(Bytes.EMPTY), path)))));
if (worldStateStorage instanceof BonsaiWorldStateKeyValueStorage) {
if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.NO_FLATTENED)) {
((BonsaiWorldStateKeyValueStorage.Updater) worldStateStorage.updater())
.putAccountInfoState(accountHash.get(), value)
.commit();
}
// Add code, if appropriate
if (!accountValue.getCodeHash().equals(Hash.EMPTY)) {
builder.add(createCodeRequest(accountValue.getCodeHash(), accountHash));
}

@ -18,6 +18,7 @@ import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.rlp.RLPOutput;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage.Updater;
@ -69,7 +70,7 @@ class StorageTrieNodeDataRequest extends TrieNodeDataRequest {
final Optional<Bytes> location,
final Bytes path,
final Bytes value) {
if (worldStateStorage instanceof BonsaiWorldStateKeyValueStorage) {
if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.NO_FLATTENED)) {
((BonsaiWorldStateKeyValueStorage.Updater) worldStateStorage.updater())
.putStorageValueBySlotHash(
accountHash.get(),

@ -15,18 +15,19 @@
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.TrieNodeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.TrieNodeHealingRequest;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.services.tasks.Task;
public class CompleteTaskStep {
private final SnapSyncState snapSyncState;
private final SnapSyncProcessState snapSyncState;
private final Counter completedRequestsCounter;
private final Counter retriedRequestsCounter;
public CompleteTaskStep(final SnapSyncState snapSyncState, final MetricsSystem metricsSystem) {
public CompleteTaskStep(
final SnapSyncProcessState snapSyncState, final MetricsSystem metricsSystem) {
this.snapSyncState = snapSyncState;
completedRequestsCounter =
metricsSystem.createCounter(
@ -43,7 +44,7 @@ public class CompleteTaskStep {
public synchronized void markAsCompleteOrFailed(
final SnapWorldDownloadState downloadState, final Task<SnapDataRequest> task) {
if (task.getData().isResponseReceived()
|| (task.getData() instanceof TrieNodeDataRequest
|| (task.getData() instanceof TrieNodeHealingRequest
&& task.getData().isExpired(snapSyncState))) {
completedRequestsCounter.inc();
task.markCompleted();

@ -31,28 +31,33 @@ import java.util.function.BiConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DynamicPivotBlockManager {
/**
* DynamicPivotBlockSelector is responsible for dynamically selecting the pivot block for Snapsync.
* It uses an algorithm to find the most suitable pivot block based on the current network
* conditions.
*/
public class DynamicPivotBlockSelector {
private static final Duration DEFAULT_CHECK_INTERVAL = Duration.ofSeconds(60);
public static final BiConsumer<BlockHeader, Boolean> doNothingOnPivotChange = (___, __) -> {};
private static final Logger LOG = LoggerFactory.getLogger(DynamicPivotBlockManager.class);
private static final Logger LOG = LoggerFactory.getLogger(DynamicPivotBlockSelector.class);
private final AtomicBoolean isTimeToCheckAgain = new AtomicBoolean(true);
private final EthContext ethContext;
private final FastSyncActions syncActions;
private final SnapSyncState syncState;
private final SnapSyncProcessState syncState;
private final int pivotBlockWindowValidity;
private final int pivotBlockDistanceBeforeCaching;
private Optional<BlockHeader> lastPivotBlockFound;
public DynamicPivotBlockManager(
public DynamicPivotBlockSelector(
final EthContext ethContext,
final FastSyncActions fastSyncActions,
final SnapSyncState fastSyncState,
final SnapSyncProcessState fastSyncState,
final int pivotBlockWindowValidity,
final int pivotBlockDistanceBeforeCaching) {
this.ethContext = ethContext;

@ -15,7 +15,7 @@
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.TrieNodeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.TrieNodeHealingRequest;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
@ -32,16 +32,20 @@ public class LoadLocalDataStep {
private final WorldStateStorage worldStateStorage;
private final SnapWorldDownloadState downloadState;
private final SnapSyncState snapSyncState;
private final SnapSyncProcessState snapSyncState;
private final SnapSyncConfiguration snapSyncConfiguration;
private final Counter existingNodeCounter;
public LoadLocalDataStep(
final WorldStateStorage worldStateStorage,
final SnapWorldDownloadState downloadState,
final SnapSyncConfiguration snapSyncConfiguration,
final MetricsSystem metricsSystem,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState) {
this.worldStateStorage = worldStateStorage;
this.downloadState = downloadState;
this.snapSyncConfiguration = snapSyncConfiguration;
existingNodeCounter =
metricsSystem.createCounter(
BesuMetricCategory.SYNCHRONIZER,
@ -52,16 +56,17 @@ public class LoadLocalDataStep {
public Stream<Task<SnapDataRequest>> loadLocalDataTrieNode(
final Task<SnapDataRequest> task, final Pipe<Task<SnapDataRequest>> completedTasks) {
final TrieNodeDataRequest request = (TrieNodeDataRequest) task.getData();
final TrieNodeHealingRequest request = (TrieNodeHealingRequest) task.getData();
// check if node is already stored in the worldstate
if (snapSyncState.hasPivotBlockHeader()) {
Optional<Bytes> existingData = request.getExistingData(worldStateStorage);
Optional<Bytes> existingData = request.getExistingData(downloadState, worldStateStorage);
if (existingData.isPresent()) {
existingNodeCounter.inc();
request.setData(existingData.get());
request.setRequiresPersisting(false);
final WorldStateStorage.Updater updater = worldStateStorage.updater();
request.persist(worldStateStorage, updater, downloadState, snapSyncState);
request.persist(
worldStateStorage, updater, downloadState, snapSyncState, snapSyncConfiguration);
updater.commit();
downloadState.enqueueRequests(request.getRootStorageRequests(worldStateStorage));
completedTasks.put(task);

@ -14,8 +14,9 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.TrieNodeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.TrieNodeHealingRequest;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.services.tasks.Task;
@ -24,17 +25,21 @@ import java.util.stream.Stream;
public class PersistDataStep {
private final SnapSyncState snapSyncState;
private final SnapSyncProcessState snapSyncState;
private final WorldStateStorage worldStateStorage;
private final SnapWorldDownloadState downloadState;
private final SnapSyncConfiguration snapSyncConfiguration;
public PersistDataStep(
final SnapSyncState snapSyncState,
final SnapSyncProcessState snapSyncState,
final WorldStateStorage worldStateStorage,
final SnapWorldDownloadState downloadState) {
final SnapWorldDownloadState downloadState,
final SnapSyncConfiguration snapSyncConfiguration) {
this.snapSyncState = snapSyncState;
this.worldStateStorage = worldStateStorage;
this.downloadState = downloadState;
this.snapSyncConfiguration = snapSyncConfiguration;
}
public List<Task<SnapDataRequest>> persist(final List<Task<SnapDataRequest>> tasks) {
@ -44,7 +49,7 @@ public class PersistDataStep {
// enqueue child requests
final Stream<SnapDataRequest> childRequests =
task.getData().getChildRequests(downloadState, worldStateStorage, snapSyncState);
if (!(task.getData() instanceof TrieNodeDataRequest)) {
if (!(task.getData() instanceof TrieNodeHealingRequest)) {
enqueueChildren(childRequests);
} else {
if (!task.getData().isExpired(snapSyncState)) {
@ -56,10 +61,16 @@ public class PersistDataStep {
// persist nodes
final int persistedNodes =
task.getData().persist(worldStateStorage, updater, downloadState, snapSyncState);
task.getData()
.persist(
worldStateStorage,
updater,
downloadState,
snapSyncState,
snapSyncConfiguration);
if (persistedNodes > 0) {
if (task.getData() instanceof TrieNodeDataRequest) {
downloadState.getMetricsManager().notifyNodesHealed(persistedNodes);
if (task.getData() instanceof TrieNodeHealingRequest) {
downloadState.getMetricsManager().notifyTrieNodesHealed(persistedNodes);
} else {
downloadState.getMetricsManager().notifyNodesGenerated(persistedNodes);
}
@ -70,10 +81,36 @@ public class PersistDataStep {
return tasks;
}
/**
* This method will heal the local flat database if necessary and persist it
*
* @param tasks range to heal and/or persist
* @return completed tasks
*/
public List<Task<SnapDataRequest>> healFlatDatabase(final List<Task<SnapDataRequest>> tasks) {
final BonsaiWorldStateKeyValueStorage.Updater updater =
(BonsaiWorldStateKeyValueStorage.Updater) worldStateStorage.updater();
for (Task<SnapDataRequest> task : tasks) {
// heal and/or persist
task.getData()
.persist(worldStateStorage, updater, downloadState, snapSyncState, snapSyncConfiguration);
// enqueue child requests, these will be the right part of the ranges to complete if we have
// not healed all the range
enqueueChildren(
task.getData().getChildRequests(downloadState, worldStateStorage, snapSyncState));
}
updater.commit();
return tasks;
}
public Task<SnapDataRequest> persist(final Task<SnapDataRequest> task) {
return persist(List.of(task)).get(0);
}
public Task<SnapDataRequest> healFlatDatabase(final Task<SnapDataRequest> task) {
return healFlatDatabase(List.of(task)).get(0);
}
private void enqueueChildren(final Stream<SnapDataRequest> childRequests) {
downloadState.enqueueRequests(childRequests);
}

@ -42,6 +42,17 @@ public class RangeManager {
private RangeManager() {}
public static int getRangeCount(
final Bytes32 min, final Bytes32 max, final TreeMap<Bytes32, Bytes> items) {
if (min.equals(MIN_RANGE) && max.equals(MAX_RANGE)) {
return MAX_RANGE
.toUnsignedBigInteger()
.divide(items.lastKey().toUnsignedBigInteger().subtract(min.toUnsignedBigInteger()))
.intValue();
}
return 1;
}
public static Map<Bytes32, Bytes32> generateAllRanges(final int sizeRange) {
if (sizeRange == 1) {
return Map.ofEntries(Map.entry(MIN_RANGE, MAX_RANGE));
@ -75,6 +86,9 @@ public class RangeManager {
final BigInteger min, final BigInteger max, final int nbRange) {
final BigInteger rangeSize = max.subtract(min).divide(BigInteger.valueOf(nbRange));
final TreeMap<Bytes32, Bytes32> ranges = new TreeMap<>();
if (min.compareTo(max) > 0) {
return ranges;
}
if (min.equals(max) || nbRange == 1) {
ranges.put(format(min), format(max));
return ranges;

@ -27,16 +27,19 @@ import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.AccountRangeDataR
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.BytecodeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.StorageRangeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.TrieNodeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.AccountFlatDatabaseHealingRangeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.StorageFlatDatabaseHealingRangeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.TrieNodeHealingRequest;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.services.tasks.Task;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
@ -47,8 +50,10 @@ import org.apache.tuweni.bytes.Bytes32;
public class RequestDataStep {
private final SnapSyncState fastSyncState;
private final WorldDownloadState<SnapDataRequest> downloadState;
private final WorldStateStorage worldStateStorage;
private final SnapSyncProcessState fastSyncState;
private final SnapWorldDownloadState downloadState;
private final SnapSyncConfiguration snapSyncConfiguration;
private final MetricsSystem metricsSystem;
private final EthContext ethContext;
private final WorldStateProofProvider worldStateProofProvider;
@ -56,11 +61,14 @@ public class RequestDataStep {
public RequestDataStep(
final EthContext ethContext,
final WorldStateStorage worldStateStorage,
final SnapSyncState fastSyncState,
final WorldDownloadState<SnapDataRequest> downloadState,
final SnapSyncProcessState fastSyncState,
final SnapWorldDownloadState downloadState,
final SnapSyncConfiguration snapSyncConfiguration,
final MetricsSystem metricsSystem) {
this.worldStateStorage = worldStateStorage;
this.fastSyncState = fastSyncState;
this.downloadState = downloadState;
this.snapSyncConfiguration = snapSyncConfiguration;
this.metricsSystem = metricsSystem;
this.ethContext = ethContext;
this.worldStateProofProvider = new WorldStateProofProvider(worldStateStorage);
@ -175,8 +183,8 @@ public class RequestDataStep {
final Map<Bytes, List<Bytes>> message = new HashMap<>();
requestTasks.stream()
.map(Task::getData)
.map(TrieNodeDataRequest.class::cast)
.map(TrieNodeDataRequest::getTrieNodePath)
.map(TrieNodeHealingRequest.class::cast)
.map(TrieNodeHealingRequest::getTrieNodePath)
.forEach(
path -> {
final List<Bytes> bytes =
@ -196,7 +204,7 @@ public class RequestDataStep {
if (response != null) {
downloadState.removeOutstandingTask(getTrieNodeFromPeerTask);
for (final Task<SnapDataRequest> task : requestTasks) {
final TrieNodeDataRequest request = (TrieNodeDataRequest) task.getData();
final TrieNodeHealingRequest request = (TrieNodeHealingRequest) task.getData();
final Bytes matchingData = response.get(request.getPathId());
if (matchingData != null) {
request.setData(matchingData);
@ -206,4 +214,85 @@ public class RequestDataStep {
return requestTasks;
});
}
/**
* Retrieves local accounts from the flat database and generates the necessary proof, updates the
* data request with the retrieved information, and returns the modified data request task.
*
* @param requestTask request data to fill
* @return data request with local accounts
*/
public CompletableFuture<Task<SnapDataRequest>> requestLocalFlatAccounts(
final Task<SnapDataRequest> requestTask) {
final AccountFlatDatabaseHealingRangeRequest accountDataRequest =
(AccountFlatDatabaseHealingRangeRequest) requestTask.getData();
final BlockHeader blockHeader = fastSyncState.getPivotBlockHeader().get();
// retrieve accounts from flat database
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
worldStateStorage.streamFlatAccounts(
accountDataRequest.getStartKeyHash(),
accountDataRequest.getEndKeyHash(),
snapSyncConfiguration.getLocalFlatAccountCountToHealPerRequest());
final List<Bytes> proofs = new ArrayList<>();
if (!accounts.isEmpty()) {
// generate range proof if accounts are present
proofs.addAll(
worldStateProofProvider.getAccountProofRelatedNodes(
blockHeader.getStateRoot(), accounts.firstKey()));
proofs.addAll(
worldStateProofProvider.getAccountProofRelatedNodes(
blockHeader.getStateRoot(), accounts.lastKey()));
}
accountDataRequest.setRootHash(blockHeader.getStateRoot());
accountDataRequest.addLocalData(worldStateProofProvider, accounts, new ArrayDeque<>(proofs));
return CompletableFuture.completedFuture(requestTask);
}
/**
* Retrieves local storage slots from the flat database and generates the necessary proof, updates
* the data request with the retrieved information, and returns the modified data request task.
*
* @param requestTask request data to fill
* @return data request with local slots
*/
public CompletableFuture<Task<SnapDataRequest>> requestLocalFlatStorages(
final Task<SnapDataRequest> requestTask) {
final StorageFlatDatabaseHealingRangeRequest storageDataRequest =
(StorageFlatDatabaseHealingRangeRequest) requestTask.getData();
final BlockHeader blockHeader = fastSyncState.getPivotBlockHeader().get();
storageDataRequest.setRootHash(blockHeader.getStateRoot());
// retrieve slots from flat database
final TreeMap<Bytes32, Bytes> slots =
(TreeMap<Bytes32, Bytes>)
worldStateStorage.streamFlatStorages(
storageDataRequest.getAccountHash(),
storageDataRequest.getStartKeyHash(),
storageDataRequest.getEndKeyHash(),
snapSyncConfiguration.getLocalFlatStorageCountToHealPerRequest());
final List<Bytes> proofs = new ArrayList<>();
if (!slots.isEmpty()) {
// generate range proof if slots are present
proofs.addAll(
worldStateProofProvider.getStorageProofRelatedNodes(
storageDataRequest.getStorageRoot(),
storageDataRequest.getAccountHash(),
slots.firstKey()));
proofs.addAll(
worldStateProofProvider.getStorageProofRelatedNodes(
storageDataRequest.getStorageRoot(),
storageDataRequest.getAccountHash(),
slots.lastKey()));
}
storageDataRequest.addLocalData(worldStateProofProvider, slots, new ArrayDeque<>(proofs));
return CompletableFuture.completedFuture(requestTask);
}
}

@ -26,11 +26,13 @@ import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncDownloader;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncState;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncStateStorage;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.worldstate.FastDownloaderFactory;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.context.SnapSyncStatePersistenceManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ScheduleBasedBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.services.tasks.InMemoryTasksPriorityQueues;
@ -47,7 +49,7 @@ public class SnapDownloaderFactory extends FastDownloaderFactory {
private static final Logger LOG = LoggerFactory.getLogger(SnapDownloaderFactory.class);
public static Optional<FastSyncDownloader<?>> createSnapDownloader(
final SnapPersistedContext snapContext,
final SnapSyncStatePersistenceManager snapContext,
final PivotBlockSelector pivotBlockSelector,
final SynchronizerConfiguration syncConfig,
final Path dataDirectory,
@ -76,12 +78,14 @@ public class SnapDownloaderFactory extends FastDownloaderFactory {
final FastSyncState fastSyncState =
fastSyncStateStorage.loadState(ScheduleBasedBlockHeaderFunctions.create(protocolSchedule));
if (syncState.isResyncNeeded()) {
snapContext.clear();
syncState
.getAccountToRepair()
.ifPresent(address -> snapContext.addInconsistentAccount(Hash.hash(address)));
.ifPresent(
address ->
snapContext.addAccountsToBeRepaired(
CompactEncoding.bytesToPath(Hash.hash(address))));
} else if (fastSyncState.getPivotBlockHeader().isEmpty()
&& protocolContext.getBlockchain().getChainHeadBlockNumber()
!= BlockHeader.GENESIS_BLOCK_NUMBER) {
@ -90,8 +94,8 @@ public class SnapDownloaderFactory extends FastDownloaderFactory {
return Optional.empty();
}
final SnapSyncState snapSyncState =
new SnapSyncState(
final SnapSyncProcessState snapSyncState =
new SnapSyncProcessState(
fastSyncStateStorage.loadState(
ScheduleBasedBlockHeaderFunctions.create(protocolSchedule)));

@ -22,9 +22,21 @@ public class SnapSyncConfiguration {
// we use 126 and not the max value (128) to avoid sending requests that will be refused
public static final int DEFAULT_PIVOT_BLOCK_WINDOW_VALIDITY = 126;
public static final int DEFAULT_PIVOT_BLOCK_DISTANCE_BEFORE_CACHING = 60;
public static final int DEFAULT_STORAGE_COUNT_PER_REQUEST = 384;
public static final int DEFAULT_BYTECODE_COUNT_PER_REQUEST = 84;
public static final int DEFAULT_TRIENODE_COUNT_PER_REQUEST = 384;
public static final int DEFAULT_STORAGE_COUNT_PER_REQUEST =
384; // The default number of storage entries to download from peers per request.
public static final int DEFAULT_BYTECODE_COUNT_PER_REQUEST =
84; // The default number of code entries to download from peers per request.
public static final int DEFAULT_TRIENODE_COUNT_PER_REQUEST =
384; // The default number of trienode entries to download from peers per request.
public static final int DEFAULT_LOCAL_FLAT_ACCOUNT_COUNT_TO_HEAL_PER_REQUEST =
128; // The default number of flat accounts entries to verify and heal per request.
public static final int DEFAULT_LOCAL_FLAT_STORAGE_COUNT_TO_HEAL_PER_REQUEST =
1024; // The default number of flat slots entries to verify and heal per request.
public static final Boolean DEFAULT_IS_FLAT_DB_HEALING_ENABLED = Boolean.FALSE;
public static SnapSyncConfiguration getDefault() {
return ImmutableSnapSyncConfiguration.builder().build();
@ -54,4 +66,19 @@ public class SnapSyncConfiguration {
public int getTrienodeCountPerRequest() {
return DEFAULT_TRIENODE_COUNT_PER_REQUEST;
}
@Value.Default
public int getLocalFlatAccountCountToHealPerRequest() {
return DEFAULT_LOCAL_FLAT_ACCOUNT_COUNT_TO_HEAL_PER_REQUEST;
}
@Value.Default
public int getLocalFlatStorageCountToHealPerRequest() {
return DEFAULT_LOCAL_FLAT_STORAGE_COUNT_TO_HEAL_PER_REQUEST;
}
@Value.Default
public Boolean isFlatDbHealingEnabled() {
return DEFAULT_IS_FLAT_DB_HEALING_ENABLED;
}
}

@ -61,6 +61,6 @@ public class SnapSyncDownloader extends FastSyncDownloader<SnapDataRequest> {
protected FastSyncState storeState(final FastSyncState fastSyncState) {
initialFastSyncState = fastSyncState;
fastSyncStateStorage.storeState(fastSyncState);
return new SnapSyncState(fastSyncState);
return new SnapSyncProcessState(fastSyncState);
}
}

@ -21,25 +21,38 @@ import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SnapSyncState extends FastSyncState {
private static final Logger LOG = LoggerFactory.getLogger(SnapSyncState.class);
/**
* Represents the state of the SnapSync process, including the current progress, healing status, and
* other relevant information.
*/
public class SnapSyncProcessState extends FastSyncState {
private static final Logger LOG = LoggerFactory.getLogger(SnapSyncProcessState.class);
private boolean isHealInProgress;
private boolean isHealTrieInProgress;
private boolean isHealFlatDatabaseInProgress;
private boolean isWaitingBlockchain;
public SnapSyncState(final FastSyncState fastSyncState) {
public SnapSyncProcessState(final FastSyncState fastSyncState) {
super(
fastSyncState.getPivotBlockNumber(),
fastSyncState.getPivotBlockHash(),
fastSyncState.getPivotBlockHeader());
}
public boolean isHealInProgress() {
return isHealInProgress;
public boolean isHealTrieInProgress() {
return isHealTrieInProgress;
}
public void setHealTrieStatus(final boolean healTrieStatus) {
isHealTrieInProgress = healTrieStatus;
}
public boolean isHealFlatDatabaseInProgress() {
return isHealFlatDatabaseInProgress;
}
public void setHealStatus(final boolean healStatus) {
isHealInProgress = healStatus;
public void setHealFlatDatabaseInProgress(final boolean healFlatDatabaseInProgress) {
isHealFlatDatabaseInProgress = healFlatDatabaseInProgress;
}
public boolean isWaitingBlockchain() {

@ -14,16 +14,22 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest.createAccountFlatHealingRangeRequest;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest.createAccountTrieNodeDataRequest;
import org.hyperledger.besu.ethereum.chain.BlockAddedObserver;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.context.SnapSyncStatePersistenceManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.AccountRangeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.BytecodeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.StorageRangeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.AccountFlatDatabaseHealingRangeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.StorageFlatDatabaseHealingRangeRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldDownloadState;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.services.tasks.InMemoryTaskQueue;
@ -35,12 +41,14 @@ import java.time.Clock;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.OptionalLong;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.Stream;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -52,18 +60,23 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
new InMemoryTaskQueue<>();
protected final InMemoryTaskQueue<SnapDataRequest> pendingStorageRequests =
new InMemoryTaskQueue<>();
protected final InMemoryTaskQueue<SnapDataRequest> pendingBigStorageRequests =
protected final InMemoryTaskQueue<SnapDataRequest> pendingLargeStorageRequests =
new InMemoryTaskQueue<>();
protected final InMemoryTaskQueue<SnapDataRequest> pendingCodeRequests =
new InMemoryTaskQueue<>();
protected final InMemoryTasksPriorityQueues<SnapDataRequest> pendingTrieNodeRequests =
new InMemoryTasksPriorityQueues<>();
public HashSet<Bytes> inconsistentAccounts = new HashSet<>();
private DynamicPivotBlockManager dynamicPivotBlockManager;
protected final InMemoryTasksPriorityQueues<SnapDataRequest>
pendingAccountFlatDatabaseHealingRequests = new InMemoryTasksPriorityQueues<>();
private final SnapPersistedContext snapContext;
private final SnapSyncState snapSyncState;
protected final InMemoryTasksPriorityQueues<SnapDataRequest>
pendingStorageFlatDatabaseHealingRequests = new InMemoryTasksPriorityQueues<>();
private HashSet<Bytes> accountsToBeRepaired = new HashSet<>();
private DynamicPivotBlockSelector pivotBlockSelector;
private final SnapSyncStatePersistenceManager snapContext;
private final SnapSyncProcessState snapSyncState;
// blockchain
private final Blockchain blockchain;
@ -74,9 +87,9 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
public SnapWorldDownloadState(
final WorldStateStorage worldStateStorage,
final SnapPersistedContext snapContext,
final SnapSyncStatePersistenceManager snapContext,
final Blockchain blockchain,
final SnapSyncState snapSyncState,
final SnapSyncProcessState snapSyncState,
final InMemoryTasksPriorityQueues<SnapDataRequest> pendingRequests,
final int maxRequestsWithoutProgress,
final long minMillisBeforeStalling,
@ -113,7 +126,7 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_pending_big_storage_requests_current",
"Number of storage pending requests for snap sync world state download",
pendingBigStorageRequests::size);
pendingLargeStorageRequests::size);
metricsManager
.getMetricsSystem()
.createLongGauge(
@ -147,15 +160,21 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
&& pendingAccountRequests.allTasksCompleted()
&& pendingCodeRequests.allTasksCompleted()
&& pendingStorageRequests.allTasksCompleted()
&& pendingBigStorageRequests.allTasksCompleted()
&& pendingTrieNodeRequests.allTasksCompleted()) {
if (!snapSyncState.isHealInProgress()) {
startHeal();
} else if (dynamicPivotBlockManager.isBlockchainBehind()) {
&& pendingLargeStorageRequests.allTasksCompleted()
&& pendingTrieNodeRequests.allTasksCompleted()
&& pendingAccountFlatDatabaseHealingRequests.allTasksCompleted()
&& pendingStorageFlatDatabaseHealingRequests.allTasksCompleted()) {
if (!snapSyncState.isHealTrieInProgress()) {
startTrieHeal();
} else if (pivotBlockSelector.isBlockchainBehind()) {
LOG.info("Pausing world state download while waiting for sync to complete");
if (blockObserverId.isEmpty())
blockObserverId = OptionalLong.of(blockchain.observeBlockAdded(getBlockAddedListener()));
snapSyncState.setWaitingBlockchain(true);
} else if (!snapSyncState.isHealFlatDatabaseInProgress()
&& worldStateStorage.getFlatDbMode().equals(FlatDbMode.FULL)) {
// only doing a flat db heal for bonsai
startFlatDatabaseHeal(header);
} else {
final WorldStateStorage.Updater updater = worldStateStorage.updater();
updater.saveWorldState(header.getHash(), header.getStateRoot(), rootNodeData);
@ -163,6 +182,7 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
metricsManager.notifySnapSyncCompleted();
snapContext.clear();
internalFuture.complete(null);
return true;
}
}
@ -175,16 +195,16 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
super.cleanupQueues();
pendingAccountRequests.clear();
pendingStorageRequests.clear();
pendingBigStorageRequests.clear();
pendingLargeStorageRequests.clear();
pendingCodeRequests.clear();
pendingTrieNodeRequests.clear();
}
public synchronized void startHeal() {
public synchronized void startTrieHeal() {
snapContext.clearAccountRangeTasks();
snapSyncState.setHealStatus(true);
snapSyncState.setHealTrieStatus(true);
// try to find new pivot block before healing
dynamicPivotBlockManager.switchToNewPivotBlock(
pivotBlockSelector.switchToNewPivotBlock(
(blockHeader, newPivotBlockFound) -> {
snapContext.clearAccountRangeTasks();
LOG.info(
@ -192,19 +212,33 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
blockHeader.getNumber());
enqueueRequest(
createAccountTrieNodeDataRequest(
blockHeader.getStateRoot(), Bytes.EMPTY, inconsistentAccounts));
blockHeader.getStateRoot(), Bytes.EMPTY, accountsToBeRepaired));
});
}
public synchronized void reloadHeal() {
public synchronized void reloadTrieHeal() {
worldStateStorage.clearFlatDatabase();
worldStateStorage.clearTrieLog();
pendingTrieNodeRequests.clear();
pendingCodeRequests.clear();
snapSyncState.setHealStatus(false);
snapSyncState.setHealTrieStatus(false);
checkCompletion(snapSyncState.getPivotBlockHeader().orElseThrow());
}
public synchronized void startFlatDatabaseHeal(final BlockHeader header) {
LOG.info("Running flat database heal process");
snapSyncState.setHealFlatDatabaseInProgress(true);
final Map<Bytes32, Bytes32> ranges = RangeManager.generateAllRanges(16);
ranges.forEach(
(key, value) ->
enqueueRequest(
createAccountFlatHealingRangeRequest(header.getStateRoot(), key, value)));
}
public boolean isBonsaiStorageFormat() {
return worldStateStorage.getDataStorageFormat().equals(DataStorageFormat.BONSAI);
}
@Override
public synchronized void enqueueRequest(final SnapDataRequest request) {
if (!internalFuture.isDone()) {
@ -212,12 +246,16 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
pendingCodeRequests.add(request);
} else if (request instanceof StorageRangeDataRequest) {
if (!((StorageRangeDataRequest) request).getStartKeyHash().equals(RangeManager.MIN_RANGE)) {
pendingBigStorageRequests.add(request);
pendingLargeStorageRequests.add(request);
} else {
pendingStorageRequests.add(request);
}
} else if (request instanceof AccountRangeDataRequest) {
pendingAccountRequests.add(request);
} else if (request instanceof AccountFlatDatabaseHealingRangeRequest) {
pendingAccountFlatDatabaseHealingRequests.add(request);
} else if (request instanceof StorageFlatDatabaseHealingRangeRequest) {
pendingStorageFlatDatabaseHealingRequests.add(request);
} else {
pendingTrieNodeRequests.add(request);
}
@ -225,17 +263,28 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
}
}
public synchronized void setInconsistentAccounts(final HashSet<Bytes> inconsistentAccounts) {
this.inconsistentAccounts = inconsistentAccounts;
public synchronized void setAccountsToBeRepaired(final HashSet<Bytes> accountsToBeRepaired) {
this.accountsToBeRepaired = accountsToBeRepaired;
}
public synchronized void addInconsistentAccount(final Bytes account) {
if (!inconsistentAccounts.contains(account)) {
snapContext.addInconsistentAccount(account);
inconsistentAccounts.add(account);
/**
* Adds an account to the list of accounts to be repaired during the healing process. If the
* account is not already in the list, it is added to both the snap context and the internal set
* of accounts to be repaired.
*
* @param account The account to be added for repair.
*/
public synchronized void addAccountsToBeRepaired(final Bytes account) {
if (!accountsToBeRepaired.contains(account)) {
snapContext.addAccountsToBeRepaired(account);
accountsToBeRepaired.add(account);
}
}
public HashSet<Bytes> getAccountsToBeRepaired() {
return accountsToBeRepaired;
}
@Override
public synchronized void enqueueRequests(final Stream<SnapDataRequest> requests) {
if (!internalFuture.isDone()) {
@ -281,13 +330,13 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
public synchronized Task<SnapDataRequest> dequeueAccountRequestBlocking() {
return dequeueRequestBlocking(
List.of(pendingStorageRequests, pendingBigStorageRequests, pendingCodeRequests),
List.of(pendingStorageRequests, pendingLargeStorageRequests, pendingCodeRequests),
pendingAccountRequests,
unused -> snapContext.updatePersistedTasks(pendingAccountRequests.asList()));
}
public synchronized Task<SnapDataRequest> dequeueBigStorageRequestBlocking() {
return dequeueRequestBlocking(Collections.emptyList(), pendingBigStorageRequests, __ -> {});
public synchronized Task<SnapDataRequest> dequeueLargeStorageRequestBlocking() {
return dequeueRequestBlocking(Collections.emptyList(), pendingLargeStorageRequests, __ -> {});
}
public synchronized Task<SnapDataRequest> dequeueStorageRequestBlocking() {
@ -300,37 +349,60 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
public synchronized Task<SnapDataRequest> dequeueTrieNodeRequestBlocking() {
return dequeueRequestBlocking(
List.of(pendingAccountRequests, pendingStorageRequests, pendingBigStorageRequests),
List.of(pendingAccountRequests, pendingStorageRequests, pendingLargeStorageRequests),
pendingTrieNodeRequests,
__ -> {});
}
public synchronized Task<SnapDataRequest> dequeueAccountFlatDatabaseHealingRequestBlocking() {
return dequeueRequestBlocking(
List.of(
pendingAccountRequests,
pendingStorageRequests,
pendingLargeStorageRequests,
pendingTrieNodeRequests,
pendingStorageFlatDatabaseHealingRequests),
pendingAccountFlatDatabaseHealingRequests,
__ -> {});
}
public synchronized Task<SnapDataRequest> dequeueStorageFlatDatabaseHealingRequestBlocking() {
return dequeueRequestBlocking(
List.of(
pendingAccountRequests,
pendingStorageRequests,
pendingLargeStorageRequests,
pendingTrieNodeRequests),
pendingStorageFlatDatabaseHealingRequests,
__ -> {});
}
public SnapsyncMetricsManager getMetricsManager() {
return metricsManager;
}
public void setDynamicPivotBlockManager(final DynamicPivotBlockManager dynamicPivotBlockManager) {
this.dynamicPivotBlockManager = dynamicPivotBlockManager;
public void setPivotBlockSelector(final DynamicPivotBlockSelector pivotBlockSelector) {
this.pivotBlockSelector = pivotBlockSelector;
}
public BlockAddedObserver getBlockAddedListener() {
return addedBlockContext -> {
if (snapSyncState.isWaitingBlockchain()) {
// if we receive a new pivot block we can restart the heal
dynamicPivotBlockManager.check(
pivotBlockSelector.check(
(____, isNewPivotBlock) -> {
if (isNewPivotBlock) {
snapSyncState.setWaitingBlockchain(false);
}
});
// if we are close to the head we can also restart the heal and finish snapsync
if (!dynamicPivotBlockManager.isBlockchainBehind()) {
if (!pivotBlockSelector.isBlockchainBehind()) {
snapSyncState.setWaitingBlockchain(false);
}
if (!snapSyncState.isWaitingBlockchain()) {
blockObserverId.ifPresent(blockchain::removeObserver);
blockObserverId = OptionalLong.empty();
reloadHeal();
reloadTrieHeal();
}
}
};

@ -15,7 +15,7 @@
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.DynamicPivotBlockManager.doNothingOnPivotChange;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.DynamicPivotBlockSelector.doNothingOnPivotChange;
import static org.hyperledger.besu.services.pipeline.PipelineBuilder.createPipelineFrom;
import org.hyperledger.besu.ethereum.eth.manager.EthScheduler;
@ -47,24 +47,33 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
private final Pipeline<Task<SnapDataRequest>> completionPipeline;
private final Pipeline<Task<SnapDataRequest>> fetchAccountPipeline;
private final Pipeline<Task<SnapDataRequest>> fetchStorageDataPipeline;
private final Pipeline<Task<SnapDataRequest>> fetchBigStorageDataPipeline;
private final Pipeline<Task<SnapDataRequest>> fetchLargeStorageDataPipeline;
private final Pipeline<Task<SnapDataRequest>> fetchCodePipeline;
private final Pipeline<Task<SnapDataRequest>> fetchHealPipeline;
private final Pipeline<Task<SnapDataRequest>> trieHealingPipeline;
private final Pipeline<Task<SnapDataRequest>> flatAccountHealingPipeline;
private final Pipeline<Task<SnapDataRequest>> flatStorageHealingPipeline;
private final WritePipe<Task<SnapDataRequest>> requestsToComplete;
private SnapWorldStateDownloadProcess(
final Pipeline<Task<SnapDataRequest>> fetchAccountPipeline,
final Pipeline<Task<SnapDataRequest>> fetchStorageDataPipeline,
final Pipeline<Task<SnapDataRequest>> fetchBigStorageDataPipeline,
final Pipeline<Task<SnapDataRequest>> fetchLargeStorageDataPipeline,
final Pipeline<Task<SnapDataRequest>> fetchCodePipeline,
final Pipeline<Task<SnapDataRequest>> fetchHealPipeline,
final Pipeline<Task<SnapDataRequest>> trieHealingPipeline,
final Pipeline<Task<SnapDataRequest>> flatAccountHealingPipeline,
final Pipeline<Task<SnapDataRequest>> flatStorageHealingPipeline,
final Pipeline<Task<SnapDataRequest>> completionPipeline,
final WritePipe<Task<SnapDataRequest>> requestsToComplete) {
this.fetchStorageDataPipeline = fetchStorageDataPipeline;
this.fetchAccountPipeline = fetchAccountPipeline;
this.fetchBigStorageDataPipeline = fetchBigStorageDataPipeline;
this.fetchLargeStorageDataPipeline = fetchLargeStorageDataPipeline;
this.fetchCodePipeline = fetchCodePipeline;
this.fetchHealPipeline = fetchHealPipeline;
this.trieHealingPipeline = trieHealingPipeline;
this.flatAccountHealingPipeline = flatAccountHealingPipeline;
this.flatStorageHealingPipeline = flatStorageHealingPipeline;
this.completionPipeline = completionPipeline;
this.requestsToComplete = requestsToComplete;
}
@ -79,17 +88,24 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
ethScheduler.startPipeline(fetchAccountPipeline);
final CompletableFuture<Void> fetchStorageFuture =
ethScheduler.startPipeline(fetchStorageDataPipeline);
final CompletableFuture<Void> fetchBigStorageFuture =
ethScheduler.startPipeline(fetchBigStorageDataPipeline);
final CompletableFuture<Void> fetchLargeStorageFuture =
ethScheduler.startPipeline(fetchLargeStorageDataPipeline);
final CompletableFuture<Void> fetchCodeFuture = ethScheduler.startPipeline(fetchCodePipeline);
final CompletableFuture<Void> fetchHealFuture = ethScheduler.startPipeline(fetchHealPipeline);
final CompletableFuture<Void> trieHealingFuture =
ethScheduler.startPipeline(trieHealingPipeline);
final CompletableFuture<Void> flatAccountHealingFuture =
ethScheduler.startPipeline(flatAccountHealingPipeline);
final CompletableFuture<Void> flatStorageHealingFuture =
ethScheduler.startPipeline(flatStorageHealingPipeline);
final CompletableFuture<Void> completionFuture = ethScheduler.startPipeline(completionPipeline);
fetchAccountFuture
.thenCombine(fetchStorageFuture, (unused, unused2) -> null)
.thenCombine(fetchBigStorageFuture, (unused, unused2) -> null)
.thenCombine(fetchLargeStorageFuture, (unused, unused2) -> null)
.thenCombine(fetchCodeFuture, (unused, unused2) -> null)
.thenCombine(fetchHealFuture, (unused, unused2) -> null)
.thenCombine(trieHealingFuture, (unused, unused2) -> null)
.thenCombine(flatAccountHealingFuture, (unused, unused2) -> null)
.thenCombine(flatStorageHealingFuture, (unused, unused2) -> null)
.whenComplete(
(result, error) -> {
if (error != null) {
@ -110,9 +126,11 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
}
fetchAccountPipeline.abort();
fetchStorageDataPipeline.abort();
fetchBigStorageDataPipeline.abort();
fetchLargeStorageDataPipeline.abort();
fetchCodePipeline.abort();
fetchHealPipeline.abort();
trieHealingPipeline.abort();
flatAccountHealingPipeline.abort();
flatStorageHealingPipeline.abort();
return null;
});
return completionFuture;
@ -122,9 +140,11 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
public void abort() {
fetchAccountPipeline.abort();
fetchStorageDataPipeline.abort();
fetchBigStorageDataPipeline.abort();
fetchLargeStorageDataPipeline.abort();
fetchCodePipeline.abort();
fetchHealPipeline.abort();
trieHealingPipeline.abort();
flatAccountHealingPipeline.abort();
flatStorageHealingPipeline.abort();
completionPipeline.abort();
}
@ -136,18 +156,19 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
private MetricsSystem metricsSystem;
private LoadLocalDataStep loadLocalDataStep;
private RequestDataStep requestDataStep;
private SnapSyncState snapSyncState;
private SnapSyncProcessState snapSyncState;
private PersistDataStep persistDataStep;
private CompleteTaskStep completeTaskStep;
private DynamicPivotBlockManager pivotBlockManager;
private DynamicPivotBlockSelector pivotBlockManager;
public Builder configuration(final SnapSyncConfiguration snapSyncConfiguration) {
this.snapSyncConfiguration = snapSyncConfiguration;
return this;
}
public Builder pivotBlockManager(final DynamicPivotBlockManager pivotBlockManager) {
this.pivotBlockManager = pivotBlockManager;
public Builder dynamicPivotBlockSelector(
final DynamicPivotBlockSelector dynamicPivotBlockSelector) {
this.pivotBlockManager = dynamicPivotBlockSelector;
return this;
}
@ -181,7 +202,7 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
return this;
}
public Builder fastSyncState(final SnapSyncState fastSyncState) {
public Builder fastSyncState(final SnapSyncProcessState fastSyncState) {
this.snapSyncState = fastSyncState;
return this;
}
@ -268,33 +289,34 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
tasks.forEach(requestsToComplete::put);
});
final Pipeline<Task<SnapDataRequest>> fetchBigStorageDataPipeline =
final Pipeline<Task<SnapDataRequest>> fetchLargeStorageDataPipeline =
createPipelineFrom(
"dequeueBigStorageRequestBlocking",
"dequeueLargeStorageRequestBlocking",
new TaskQueueIterator<>(
downloadState, () -> downloadState.dequeueBigStorageRequestBlocking()),
downloadState, () -> downloadState.dequeueLargeStorageRequestBlocking()),
bufferCapacity,
outputCounter,
true,
"world_state_download")
.thenProcess(
"checkNewPivotBlock-BigStorage",
"checkNewPivotBlock-LargeStorage",
tasks -> {
pivotBlockManager.check(doNothingOnPivotChange);
return tasks;
})
.thenProcessAsyncOrdered(
"batchDownloadBigStorageData",
"batchDownloadLargeStorageData",
requestTask -> requestDataStep.requestStorage(List.of(requestTask)),
maxOutstandingRequests)
.thenProcess(
"batchPersistBigStorageData",
"batchPersistLargeStorageData",
task -> {
persistDataStep.persist(task);
return task;
})
.andFinishWith(
"batchBigStorageDataDownloaded", tasks -> tasks.forEach(requestsToComplete::put));
"batchLargeStorageDataDownloaded",
tasks -> tasks.forEach(requestsToComplete::put));
final Pipeline<Task<SnapDataRequest>> fetchCodePipeline =
createPipelineFrom(
@ -337,7 +359,7 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
.andFinishWith(
"batchCodeDataDownloaded", tasks -> tasks.forEach(requestsToComplete::put));
final Pipeline<Task<SnapDataRequest>> fetchHealDataPipeline =
final Pipeline<Task<SnapDataRequest>> trieHealingPipeline =
createPipelineFrom(
"requestTrieNodeDequeued",
new TaskQueueIterator<>(
@ -345,7 +367,7 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
bufferCapacity,
outputCounter,
true,
"world_state_download")
"world_state_heal")
.thenFlatMapInParallel(
"requestLoadLocalTrieNodeData",
task -> loadLocalDataStep.loadLocalDataTrieNode(task, requestsToComplete),
@ -373,23 +395,63 @@ public class SnapWorldStateDownloadProcess implements WorldStateDownloadProcess
.andFinishWith(
"batchTrieNodeDataDownloaded", tasks -> tasks.forEach(requestsToComplete::put));
final Pipeline<Task<SnapDataRequest>> accountFlatDatabaseHealingPipeline =
createPipelineFrom(
"dequeueFlatAccountRequestBlocking",
new TaskQueueIterator<>(
downloadState,
() -> downloadState.dequeueAccountFlatDatabaseHealingRequestBlocking()),
bufferCapacity,
outputCounter,
true,
"world_state_heal")
.thenProcessAsync(
"batchDownloadFlatAccountData",
requestTask -> requestDataStep.requestLocalFlatAccounts(requestTask),
maxOutstandingRequests)
.thenProcess(
"batchHealAndPersistFlatAccountData",
task -> persistDataStep.healFlatDatabase(task))
.andFinishWith("batchFlatAccountDataDownloaded", requestsToComplete::put);
final Pipeline<Task<SnapDataRequest>> storageFlatDatabaseHealingPipeline =
createPipelineFrom(
"dequeueFlatStorageRequestBlocking",
new TaskQueueIterator<>(
downloadState,
() -> downloadState.dequeueStorageFlatDatabaseHealingRequestBlocking()),
bufferCapacity,
outputCounter,
true,
"world_state_heal")
.thenProcessAsyncOrdered(
"batchDownloadFlatStorageData",
requestTask -> requestDataStep.requestLocalFlatStorages(requestTask),
maxOutstandingRequests)
.thenProcess(
"batchHealAndPersistFlatStorageData",
task -> persistDataStep.healFlatDatabase(task))
.andFinishWith("batchFlatStorageDataDownloaded", requestsToComplete::put);
return new SnapWorldStateDownloadProcess(
fetchAccountDataPipeline,
fetchStorageDataPipeline,
fetchBigStorageDataPipeline,
fetchLargeStorageDataPipeline,
fetchCodePipeline,
fetchHealDataPipeline,
trieHealingPipeline,
accountFlatDatabaseHealingPipeline,
storageFlatDatabaseHealingPipeline,
completionPipeline,
requestsToComplete);
}
}
private static void reloadHealWhenNeeded(
final SnapSyncState snapSyncState,
final SnapSyncProcessState snapSyncState,
final SnapWorldDownloadState downloadState,
final boolean newBlockFound) {
if (snapSyncState.isHealInProgress() && newBlockFound) {
downloadState.reloadHeal();
if (snapSyncState.isHealTrieInProgress() && newBlockFound) {
downloadState.reloadTrieHeal();
}
}
}

@ -14,17 +14,21 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.DOWNLOAD;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest.createAccountRangeDataRequest;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncActions;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.context.SnapSyncStatePersistenceManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.AccountRangeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
@ -53,7 +57,7 @@ public class SnapWorldStateDownloader implements WorldStateDownloader {
private final MetricsSystem metricsSystem;
private final EthContext ethContext;
private final SnapPersistedContext snapContext;
private final SnapSyncStatePersistenceManager snapContext;
private final InMemoryTasksPriorityQueues<SnapDataRequest> snapTaskCollection;
private final SnapSyncConfiguration snapSyncConfiguration;
private final int maxOutstandingRequests;
@ -65,7 +69,7 @@ public class SnapWorldStateDownloader implements WorldStateDownloader {
public SnapWorldStateDownloader(
final EthContext ethContext,
final SnapPersistedContext snapContext,
final SnapSyncStatePersistenceManager snapContext,
final ProtocolContext protocolContext,
final WorldStateStorage worldStateStorage,
final InMemoryTasksPriorityQueues<SnapDataRequest> snapTaskCollection,
@ -120,7 +124,7 @@ public class SnapWorldStateDownloader implements WorldStateDownloader {
return failed;
}
final SnapSyncState snapSyncState = (SnapSyncState) fastSyncState;
final SnapSyncProcessState snapSyncState = (SnapSyncProcessState) fastSyncState;
final BlockHeader header = fastSyncState.getPivotBlockHeader().get();
final Hash stateRoot = header.getStateRoot();
LOG.info(
@ -149,28 +153,34 @@ public class SnapWorldStateDownloader implements WorldStateDownloader {
final List<AccountRangeDataRequest> currentAccountRange =
snapContext.getCurrentAccountRange();
final HashSet<Bytes> inconsistentAccounts = snapContext.getInconsistentAccounts();
final HashSet<Bytes> inconsistentAccounts = snapContext.getAccountsToBeRepaired();
if (!currentAccountRange.isEmpty()) { // continue to download worldstate ranges
newDownloadState.setInconsistentAccounts(inconsistentAccounts);
newDownloadState.setAccountsToBeRepaired(inconsistentAccounts);
snapContext
.getCurrentAccountRange()
.forEach(
snapDataRequest -> {
snapsyncMetricsManager.notifyStateDownloaded(
snapDataRequest.getStartKeyHash(), snapDataRequest.getEndKeyHash());
snapsyncMetricsManager.notifyRangeProgress(
DOWNLOAD, snapDataRequest.getStartKeyHash(), snapDataRequest.getEndKeyHash());
newDownloadState.enqueueRequest(snapDataRequest);
});
} else if (!snapContext.getInconsistentAccounts().isEmpty()) { // restart only the heal step
snapSyncState.setHealStatus(true);
} else if (!snapContext.getAccountsToBeRepaired().isEmpty()) { // restart only the heal step
snapSyncState.setHealTrieStatus(true);
worldStateStorage.clearFlatDatabase();
worldStateStorage.clearTrieLog();
newDownloadState.setInconsistentAccounts(inconsistentAccounts);
newDownloadState.setAccountsToBeRepaired(inconsistentAccounts);
newDownloadState.enqueueRequest(
SnapDataRequest.createAccountTrieNodeDataRequest(
stateRoot, Bytes.EMPTY, snapContext.getInconsistentAccounts()));
} else { // start from scratch
stateRoot, Bytes.EMPTY, snapContext.getAccountsToBeRepaired()));
} else {
// start from scratch
worldStateStorage.clear();
// we have to upgrade to full flat db mode if we are in bonsai mode
if (worldStateStorage.getDataStorageFormat().equals(DataStorageFormat.BONSAI)
&& snapSyncConfiguration.isFlatDbHealingEnabled()) {
((BonsaiWorldStateKeyValueStorage) worldStateStorage).upgradeToFullFlatDbMode();
}
ranges.forEach(
(key, value) ->
newDownloadState.enqueueRequest(
@ -180,8 +190,8 @@ public class SnapWorldStateDownloader implements WorldStateDownloader {
Optional<CompleteTaskStep> maybeCompleteTask =
Optional.of(new CompleteTaskStep(snapSyncState, metricsSystem));
final DynamicPivotBlockManager dynamicPivotBlockManager =
new DynamicPivotBlockManager(
final DynamicPivotBlockSelector dynamicPivotBlockManager =
new DynamicPivotBlockSelector(
ethContext,
fastSyncActions,
snapSyncState,
@ -192,26 +202,32 @@ public class SnapWorldStateDownloader implements WorldStateDownloader {
SnapWorldStateDownloadProcess.builder()
.configuration(snapSyncConfiguration)
.maxOutstandingRequests(maxOutstandingRequests)
.pivotBlockManager(dynamicPivotBlockManager)
.dynamicPivotBlockSelector(dynamicPivotBlockManager)
.loadLocalDataStep(
new LoadLocalDataStep(
worldStateStorage, newDownloadState, metricsSystem, snapSyncState))
worldStateStorage,
newDownloadState,
snapSyncConfiguration,
metricsSystem,
snapSyncState))
.requestDataStep(
new RequestDataStep(
ethContext,
worldStateStorage,
snapSyncState,
newDownloadState,
snapSyncConfiguration,
metricsSystem))
.persistDataStep(
new PersistDataStep(snapSyncState, worldStateStorage, newDownloadState))
new PersistDataStep(
snapSyncState, worldStateStorage, newDownloadState, snapSyncConfiguration))
.completeTaskStep(maybeCompleteTask.get())
.downloadState(newDownloadState)
.fastSyncState(snapSyncState)
.metricsSystem(metricsSystem)
.build();
newDownloadState.setDynamicPivotBlockManager(dynamicPivotBlockManager);
newDownloadState.setPivotBlockSelector(dynamicPivotBlockManager);
return newDownloadState.startDownload(downloadProcess, ethContext.getScheduler());
}

@ -15,6 +15,7 @@
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static io.netty.util.internal.ObjectUtil.checkNonEmpty;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.HEAL_TRIE;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.metrics.BesuMetricCategory;
@ -35,6 +36,7 @@ import org.apache.tuweni.bytes.Bytes32;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Manages the metrics related to the SnapSync process. */
public class SnapsyncMetricsManager {
private static final Logger LOG = LoggerFactory.getLogger(SnapsyncMetricsManager.class);
@ -43,12 +45,34 @@ public class SnapsyncMetricsManager {
private final MetricsSystem metricsSystem;
private final EthContext ethContext;
private final AtomicReference<BigDecimal> percentageDownloaded;
private final AtomicLong nbAccounts;
private final AtomicLong nbSlots;
/** Represents the progress status of the snapsync process. */
private final AtomicReference<BigDecimal> percentageProgress;
/**
* Represents the number of accounts downloaded during the initial step of the snapsync process.
*/
private final AtomicLong nbAccountsDownloaded;
/** Represents the number of slots downloaded during the initial step of the snapsync process. */
private final AtomicLong nbSlotsDownloaded;
/** Represents the number of code entries downloaded. */
private final AtomicLong nbCodes;
private final AtomicLong nbNodesGenerated;
private final AtomicLong nbNodesHealed;
/**
* Represents the number of trie nodes generated during the initial step of the snapsync process.
*/
private final AtomicLong nbTrieNodesGenerated;
/** Represents the number of flat accounts healed during the healing process. */
private final AtomicLong nbFlatAccountsHealed;
/** Represents the number of flat slots healed during the healing process. */
private final AtomicLong nbFlatSlotsHealed;
/** Represents the number of trie nodes healed during the healing process. */
private final AtomicLong nbTrieNodesHealed;
private long startSyncTime;
private final Map<Bytes32, BigInteger> lastRangeIndex = new HashMap<>();
@ -58,33 +82,44 @@ public class SnapsyncMetricsManager {
public SnapsyncMetricsManager(final MetricsSystem metricsSystem, final EthContext ethContext) {
this.metricsSystem = metricsSystem;
this.ethContext = ethContext;
percentageDownloaded = new AtomicReference<>(new BigDecimal(0));
nbAccounts = new AtomicLong(0);
nbSlots = new AtomicLong(0);
percentageProgress = new AtomicReference<>(new BigDecimal(0));
nbAccountsDownloaded = new AtomicLong(0);
nbSlotsDownloaded = new AtomicLong(0);
nbCodes = new AtomicLong(0);
nbNodesGenerated = new AtomicLong(0);
nbNodesHealed = new AtomicLong(0);
nbTrieNodesGenerated = new AtomicLong(0);
nbFlatAccountsHealed = new AtomicLong(0);
nbFlatSlotsHealed = new AtomicLong(0);
nbTrieNodesHealed = new AtomicLong(0);
metricsSystem.createLongGauge(
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_generated_nodes_total",
"Total number of data nodes generated as part of snap sync world state download",
nbNodesGenerated::get);
nbTrieNodesGenerated::get);
metricsSystem.createLongGauge(
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_healed_nodes_total",
"Total number of data nodes healed as part of snap sync world state heal process",
nbNodesHealed::get);
nbTrieNodesHealed::get);
metricsSystem.createLongGauge(
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_accounts_total",
"Total number of accounts downloaded as part of snap sync world state",
nbAccounts::get);
nbAccountsDownloaded::get);
metricsSystem.createLongGauge(
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_slots_total",
"Total number of slots downloaded as part of snap sync world state",
nbSlots::get);
nbSlotsDownloaded::get);
metricsSystem.createLongGauge(
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_flat_accounts_healed_total",
"Total number of accounts healed in the flat database as part of snap sync world state",
nbFlatAccountsHealed::get);
metricsSystem.createLongGauge(
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_flat_slots_healed_total",
"Total number of slots healed in the flat database as part of snap sync world state",
nbFlatSlotsHealed::get);
metricsSystem.createLongGauge(
BesuMetricCategory.SYNCHRONIZER,
"snap_world_state_codes_total",
@ -94,18 +129,19 @@ public class SnapsyncMetricsManager {
public void initRange(final Map<Bytes32, Bytes32> ranges) {
for (Map.Entry<Bytes32, Bytes32> entry : ranges.entrySet()) {
lastRangeIndex.put(entry.getValue(), entry.getKey().toUnsignedBigInteger());
this.lastRangeIndex.put(entry.getValue(), entry.getKey().toUnsignedBigInteger());
}
startSyncTime = System.currentTimeMillis();
lastNotifyTimestamp = startSyncTime;
this.startSyncTime = System.currentTimeMillis();
this.lastNotifyTimestamp = startSyncTime;
}
public void notifyStateDownloaded(final Bytes32 startKeyHash, final Bytes32 endKeyHash) {
public void notifyRangeProgress(
final Step step, final Bytes32 startKeyHash, final Bytes32 endKeyHash) {
checkNonEmpty(lastRangeIndex, "snapsync range collection");
if (lastRangeIndex.containsKey(endKeyHash)) {
final BigInteger lastPos = lastRangeIndex.get(endKeyHash);
final BigInteger newPos = startKeyHash.toUnsignedBigInteger();
percentageDownloaded.getAndAccumulate(
percentageProgress.getAndAccumulate(
BigDecimal.valueOf(100)
.multiply(new BigDecimal(newPos.subtract(lastPos)))
.divide(
@ -113,16 +149,16 @@ public class SnapsyncMetricsManager {
MathContext.DECIMAL32),
BigDecimal::add);
lastRangeIndex.put(endKeyHash, newPos);
print(false);
print(step);
}
}
public void notifyAccountsDownloaded(final long nbAccounts) {
this.nbAccounts.getAndAdd(nbAccounts);
this.nbAccountsDownloaded.getAndAdd(nbAccounts);
}
public void notifySlotsDownloaded(final long nbSlots) {
this.nbSlots.getAndAdd(nbSlots);
this.nbSlotsDownloaded.getAndAdd(nbSlots);
}
public void notifyCodeDownloaded() {
@ -130,34 +166,51 @@ public class SnapsyncMetricsManager {
}
public void notifyNodesGenerated(final long nbNodes) {
this.nbNodesGenerated.getAndAdd(nbNodes);
this.nbTrieNodesGenerated.getAndAdd(nbNodes);
}
public void notifyNodesHealed(final long nbNodes) {
this.nbNodesHealed.getAndAdd(nbNodes);
print(true);
public void notifyTrieNodesHealed(final long nbNodes) {
this.nbTrieNodesHealed.getAndAdd(nbNodes);
print(HEAL_TRIE);
}
private void print(final boolean isHeal) {
private void print(final Step step) {
final long now = System.currentTimeMillis();
if (now - lastNotifyTimestamp >= PRINT_DELAY) {
lastNotifyTimestamp = now;
if (!isHeal) {
int peerCount = -1; // ethContext is not available in tests
if (ethContext != null && ethContext.getEthPeers().peerCount() >= 0) {
peerCount = ethContext.getEthPeers().peerCount();
}
switch (step) {
case DOWNLOAD -> {
LOG.debug(
"Worldstate download in progress accounts={}, slots={}, codes={}, nodes={}",
nbAccounts,
nbSlots,
"Worldstate {} in progress accounts={}, slots={}, codes={}, nodes={}",
step.message,
nbAccountsDownloaded,
nbSlotsDownloaded,
nbCodes,
nbNodesGenerated);
nbTrieNodesGenerated);
LOG.info(
"Worldstate {} progress: {}%, Peer count: {}",
step.message, percentageProgress.get().setScale(2, RoundingMode.HALF_UP), peerCount);
}
case HEAL_FLAT -> {
LOG.debug(
"Worldstate {} in progress accounts={}, slots={}",
step.message,
nbFlatAccountsHealed,
nbFlatSlotsHealed);
LOG.info(
"Worldstate {} progress: {}%, Peer count: {}",
step.message, percentageProgress.get().setScale(2, RoundingMode.HALF_UP), peerCount);
}
case HEAL_TRIE -> {
LOG.info(
"Worldstate download progress: {}%, Peer count: {}",
percentageDownloaded.get().setScale(2, RoundingMode.HALF_UP), peerCount);
} else {
LOG.info("Healed {} world state nodes", nbNodesHealed.get());
"Healed {} world state trie nodes, Peer count: {}",
nbTrieNodesHealed.get(),
peerCount);
}
}
}
}
@ -166,8 +219,8 @@ public class SnapsyncMetricsManager {
final Duration duration = Duration.ofMillis(System.currentTimeMillis() - startSyncTime);
LOG.info(
"Finished worldstate snapsync with nodes {} (healed={}) duration {}{}:{},{}.",
nbNodesGenerated.addAndGet(nbNodesHealed.get()),
nbNodesHealed,
nbTrieNodesGenerated.addAndGet(nbTrieNodesHealed.get()),
nbTrieNodesHealed,
duration.toHoursPart() > 0 ? (duration.toHoursPart() + ":") : "",
duration.toMinutesPart(),
duration.toSecondsPart(),
@ -177,4 +230,16 @@ public class SnapsyncMetricsManager {
public MetricsSystem getMetricsSystem() {
return metricsSystem;
}
public enum Step {
DOWNLOAD("download"),
HEAL_TRIE("trie node healing"),
HEAL_FLAT("flat database healing");
final String message;
Step(final String message) {
this.message = message;
}
}
}

@ -38,6 +38,13 @@ import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.immutables.value.Value;
/**
* StackTrie represents a stack-based Merkle Patricia Trie used in the context of snapsync
* synchronization. It allows adding elements, retrieving elements, and committing the trie changes
* to a node updater and flat database updater. The trie operates on a stack of segments and commits
* the changes once the number of segments reaches a threshold. It utilizes proofs and keys to build
* and update the trie structure.
*/
public class StackTrie {
private final Bytes32 rootHash;
@ -80,6 +87,10 @@ public class StackTrie {
}
public void commit(final NodeUpdater nodeUpdater) {
commit((key, value) -> {}, nodeUpdater);
}
public void commit(final FlatDatabaseUpdater flatDatabaseUpdater, final NodeUpdater nodeUpdater) {
if (nbSegments.decrementAndGet() <= 0 && !elements.isEmpty()) {
@ -112,9 +123,12 @@ public class StackTrie {
new StoredMerklePatriciaTrie<>(
snapStoredNodeFactory, proofs.isEmpty() ? MerkleTrie.EMPTY_TRIE_NODE_HASH : rootHash);
for (Map.Entry<Bytes32, Bytes> account : keys.entrySet()) {
trie.put(account.getKey(), new SnapPutVisitor<>(snapStoredNodeFactory, account.getValue()));
for (Map.Entry<Bytes32, Bytes> entry : keys.entrySet()) {
trie.put(entry.getKey(), new SnapPutVisitor<>(snapStoredNodeFactory, entry.getValue()));
}
keys.forEach(flatDatabaseUpdater::update);
trie.commit(
nodeUpdater,
(new CommitVisitor<>(nodeUpdater) {
@ -137,6 +151,15 @@ public class StackTrie {
}
}
public interface FlatDatabaseUpdater {
static FlatDatabaseUpdater noop() {
return (key, value) -> {};
}
void update(final Bytes32 key, final Bytes value);
}
@Value.Immutable
public abstract static class TaskElement {

@ -12,7 +12,7 @@
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
package org.hyperledger.besu.ethereum.eth.sync.snapsync.context;
import org.hyperledger.besu.ethereum.eth.sync.backwardsync.GenericKeyValueStorageFacade;
import org.hyperledger.besu.ethereum.eth.sync.backwardsync.ValueConvertor;
@ -34,16 +34,21 @@ import java.util.stream.IntStream;
import org.apache.tuweni.bytes.Bytes;
public class SnapPersistedContext {
/**
* Manages the persistence of the SnapSync state, allowing it to be saved and retrieved from the
* database. The SnapSync state includes the current progress, downloaded data, and other relevant
* information needed to resume SnapSync from where it left off after a client restart.
*/
public class SnapSyncStatePersistenceManager {
private final byte[] SNAP_INCONSISTENT_ACCOUNT_INDEX =
private final byte[] SNAP_ACCOUNT_TO_BE_REPAIRED_INDEX =
"snapInconsistentAccountsStorageIndex".getBytes(StandardCharsets.UTF_8);
private final GenericKeyValueStorageFacade<BigInteger, AccountRangeDataRequest>
accountRangeToDownload;
private final GenericKeyValueStorageFacade<BigInteger, Bytes> healContext;
public SnapPersistedContext(final StorageProvider storageProvider) {
public SnapSyncStatePersistenceManager(final StorageProvider storageProvider) {
this.accountRangeToDownload =
new GenericKeyValueStorageFacade<>(
BigInteger::toByteArray,
@ -79,6 +84,11 @@ public class SnapPersistedContext {
KeyValueSegmentIdentifier.SNAPSYNC_ACCOUNT_TO_FIX));
}
/**
* Persists the current account range tasks to the database.
*
* @param accountRangeDataRequests The current account range tasks to persist.
*/
public void updatePersistedTasks(final List<? extends SnapDataRequest> accountRangeDataRequests) {
accountRangeToDownload.clear();
accountRangeToDownload.putAll(
@ -93,16 +103,21 @@ public class SnapPersistedContext {
.toArrayUnsafe())));
}
public void addInconsistentAccount(final Bytes inconsistentAccount) {
/**
* Persists the current accounts to be repaired in the database.
*
* @param accountsToBeRepaired The current list of accounts to persist.
*/
public void addAccountsToBeRepaired(final Bytes accountsToBeRepaired) {
final BigInteger index =
healContext
.get(SNAP_INCONSISTENT_ACCOUNT_INDEX)
.get(SNAP_ACCOUNT_TO_BE_REPAIRED_INDEX)
.map(bytes -> new BigInteger(bytes.toArrayUnsafe()).add(BigInteger.ONE))
.orElse(BigInteger.ZERO);
healContext.putAll(
keyValueStorageTransaction -> {
keyValueStorageTransaction.put(SNAP_INCONSISTENT_ACCOUNT_INDEX, index.toByteArray());
keyValueStorageTransaction.put(index.toByteArray(), inconsistentAccount.toArrayUnsafe());
keyValueStorageTransaction.put(SNAP_ACCOUNT_TO_BE_REPAIRED_INDEX, index.toByteArray());
keyValueStorageTransaction.put(index.toByteArray(), accountsToBeRepaired.toArrayUnsafe());
});
}
@ -112,9 +127,9 @@ public class SnapPersistedContext {
.collect(Collectors.toList());
}
public HashSet<Bytes> getInconsistentAccounts() {
public HashSet<Bytes> getAccountsToBeRepaired() {
return healContext
.streamValuesFromKeysThat(notEqualsTo(SNAP_INCONSISTENT_ACCOUNT_INDEX))
.streamValuesFromKeysThat(notEqualsTo(SNAP_ACCOUNT_TO_BE_REPAIRED_INDEX))
.collect(Collectors.toCollection(HashSet::new));
}

@ -18,15 +18,20 @@ import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MAX_R
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MIN_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.findNewBeginElementInRange;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType.ACCOUNT_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.DOWNLOAD;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.StackTrie.FlatDatabaseUpdater.noop;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.StackTrie;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.rlp.RLPInput;
import org.hyperledger.besu.ethereum.trie.NodeUpdater;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage.Updater;
@ -51,8 +56,8 @@ public class AccountRangeDataRequest extends SnapDataRequest {
private static final Logger LOG = LoggerFactory.getLogger(AccountRangeDataRequest.class);
protected final Bytes32 startKeyHash;
protected final Bytes32 endKeyHash;
private final Bytes32 startKeyHash;
private final Bytes32 endKeyHash;
private final Optional<Bytes32> startStorageRange;
private final Optional<Bytes32> endStorageRange;
@ -102,7 +107,8 @@ public class AccountRangeDataRequest extends SnapDataRequest {
final WorldStateStorage worldStateStorage,
final Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
if (startStorageRange.isPresent() && endStorageRange.isPresent()) {
// not store the new account if we just want to complete the account thanks to another
@ -118,7 +124,15 @@ public class AccountRangeDataRequest extends SnapDataRequest {
nbNodesSaved.getAndIncrement();
};
stackTrie.commit(nodeUpdater);
StackTrie.FlatDatabaseUpdater flatDatabaseUpdater = noop();
if (worldStateStorage.getFlatDbMode().equals(FlatDbMode.FULL)) {
// we have a flat DB only with Bonsai
flatDatabaseUpdater =
(key, value) ->
((BonsaiWorldStateKeyValueStorage.BonsaiUpdater) updater)
.putAccountInfoState(Hash.wrap(key), value);
}
stackTrie.commit(flatDatabaseUpdater, nodeUpdater);
downloadState.getMetricsManager().notifyAccountsDownloaded(stackTrie.getElementsCount().get());
@ -149,7 +163,7 @@ public class AccountRangeDataRequest extends SnapDataRequest {
public Stream<SnapDataRequest> getChildRequests(
final SnapWorldDownloadState downloadState,
final WorldStateStorage worldStateStorage,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState) {
final List<SnapDataRequest> childRequests = new ArrayList<>();
final StackTrie.TaskElement taskElement = stackTrie.getElement(startKeyHash);
@ -159,11 +173,14 @@ public class AccountRangeDataRequest extends SnapDataRequest {
missingRightElement -> {
downloadState
.getMetricsManager()
.notifyStateDownloaded(missingRightElement, endKeyHash);
.notifyRangeProgress(DOWNLOAD, missingRightElement, endKeyHash);
childRequests.add(
createAccountRangeDataRequest(getRootHash(), missingRightElement, endKeyHash));
},
() -> downloadState.getMetricsManager().notifyStateDownloaded(endKeyHash, endKeyHash));
() ->
downloadState
.getMetricsManager()
.notifyRangeProgress(DOWNLOAD, endKeyHash, endKeyHash));
// find missing storages and code
for (Map.Entry<Bytes32, Bytes> account : taskElement.keys().entrySet()) {

@ -18,7 +18,8 @@ import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType.BYTECO
import static org.slf4j.LoggerFactory.getLogger;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage.Updater;
@ -53,14 +54,19 @@ public class BytecodeRequest extends SnapDataRequest {
final WorldStateStorage worldStateStorage,
final Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
updater.putCode(Hash.wrap(accountHash), code);
downloadState.getMetricsManager().notifyCodeDownloaded();
return possibleParent
.map(
trieNodeDataRequest ->
trieNodeDataRequest.saveParent(
worldStateStorage, updater, downloadState, snapSyncState)
worldStateStorage,
updater,
downloadState,
snapSyncState,
snapSyncConfiguration)
+ 1)
.orElse(1);
}
@ -74,7 +80,7 @@ public class BytecodeRequest extends SnapDataRequest {
public Stream<SnapDataRequest> getChildRequests(
final SnapWorldDownloadState downloadState,
final WorldStateStorage worldStateStorage,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState) {
return Stream.empty();
}

@ -18,8 +18,14 @@ import static org.hyperledger.besu.ethereum.eth.sync.fastsync.worldstate.NodeDat
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.AccountFlatDatabaseHealingRangeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.AccountTrieNodeHealingRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.StorageFlatDatabaseHealingRangeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.StorageTrieNodeHealingRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.TrieNodeHealingRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloaderException;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.services.tasks.TasksPriorityProvider;
@ -34,7 +40,7 @@ import org.apache.tuweni.bytes.Bytes32;
public abstract class SnapDataRequest implements TasksPriorityProvider {
protected Optional<TrieNodeDataRequest> possibleParent = Optional.empty();
protected Optional<TrieNodeHealingRequest> possibleParent = Optional.empty();
protected int depth;
protected long priority;
protected final AtomicInteger pendingChildren = new AtomicInteger(0);
@ -52,6 +58,21 @@ public abstract class SnapDataRequest implements TasksPriorityProvider {
return new AccountRangeDataRequest(rootHash, startKeyHash, endKeyHash);
}
public static AccountFlatDatabaseHealingRangeRequest createAccountFlatHealingRangeRequest(
final Hash rootHash, final Bytes32 startKeyHash, final Bytes32 endKeyHash) {
return new AccountFlatDatabaseHealingRangeRequest(rootHash, startKeyHash, endKeyHash);
}
public static StorageFlatDatabaseHealingRangeRequest createStorageFlatHealingRangeRequest(
final Hash rootHash,
final Bytes32 accountHash,
final Bytes32 storageRoot,
final Bytes32 startKeyHash,
final Bytes32 endKeyHash) {
return new StorageFlatDatabaseHealingRangeRequest(
rootHash, accountHash, storageRoot, startKeyHash, endKeyHash);
}
public static AccountRangeDataRequest createAccountDataRequest(
final Hash rootHash,
final Hash accountHash,
@ -70,22 +91,22 @@ public abstract class SnapDataRequest implements TasksPriorityProvider {
rootHash, accountHash, storageRoot, startKeyHash, endKeyHash);
}
public static AccountTrieNodeDataRequest createAccountTrieNodeDataRequest(
public static AccountTrieNodeHealingRequest createAccountTrieNodeDataRequest(
final Hash hash, final Bytes location, final HashSet<Bytes> inconsistentAccounts) {
return new AccountTrieNodeDataRequest(hash, hash, location, inconsistentAccounts);
return new AccountTrieNodeHealingRequest(hash, hash, location, inconsistentAccounts);
}
public static AccountTrieNodeDataRequest createAccountTrieNodeDataRequest(
public static AccountTrieNodeHealingRequest createAccountTrieNodeDataRequest(
final Hash hash,
final Hash rootHash,
final Bytes location,
final HashSet<Bytes> inconsistentAccounts) {
return new AccountTrieNodeDataRequest(hash, rootHash, location, inconsistentAccounts);
return new AccountTrieNodeHealingRequest(hash, rootHash, location, inconsistentAccounts);
}
public static StorageTrieNodeDataRequest createStorageTrieNodeDataRequest(
public static StorageTrieNodeHealingRequest createStorageTrieNodeDataRequest(
final Hash hash, final Hash accountHash, final Hash rootHash, final Bytes location) {
return new StorageTrieNodeDataRequest(hash, accountHash, rootHash, location);
return new StorageTrieNodeHealingRequest(hash, accountHash, rootHash, location);
}
public static BytecodeRequest createBytecodeRequest(
@ -97,28 +118,31 @@ public abstract class SnapDataRequest implements TasksPriorityProvider {
final WorldStateStorage worldStateStorage,
final WorldStateStorage.Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
return doPersist(worldStateStorage, updater, downloadState, snapSyncState);
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
return doPersist(
worldStateStorage, updater, downloadState, snapSyncState, snapSyncConfiguration);
}
protected abstract int doPersist(
final WorldStateStorage worldStateStorage,
final WorldStateStorage.Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState);
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration);
public abstract boolean isResponseReceived();
public boolean isExpired(final SnapSyncState snapSyncState) {
public boolean isExpired(final SnapSyncProcessState snapSyncState) {
return false;
}
public abstract Stream<SnapDataRequest> getChildRequests(
final SnapWorldDownloadState downloadState,
final WorldStateStorage worldStateStorage,
final SnapSyncState snapSyncState);
final SnapSyncProcessState snapSyncState);
protected void registerParent(final TrieNodeDataRequest parent) {
public void registerParent(final TrieNodeHealingRequest parent) {
if (this.possibleParent.isPresent()) {
throw new WorldStateDownloaderException("Cannot set parent twice");
}
@ -135,9 +159,11 @@ public abstract class SnapDataRequest implements TasksPriorityProvider {
final WorldStateStorage worldStateStorage,
final WorldStateStorage.Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
if (pendingChildren.decrementAndGet() == 0) {
return persist(worldStateStorage, updater, downloadState, snapSyncState);
return persist(
worldStateStorage, updater, downloadState, snapSyncState, snapSyncConfiguration);
}
return 0;
}

@ -17,17 +17,21 @@ package org.hyperledger.besu.ethereum.eth.sync.snapsync.request;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MAX_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MIN_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.findNewBeginElementInRange;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.getRangeCount;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType.STORAGE_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.StackTrie.FlatDatabaseUpdater.noop;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.StackTrie;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldDownloadState;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.trie.NodeUpdater;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage.Updater;
@ -36,13 +40,13 @@ import java.util.List;
import java.util.Optional;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Stream;
import com.google.common.annotations.VisibleForTesting;
import kotlin.collections.ArrayDeque;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.rlp.RLP;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -85,23 +89,27 @@ public class StorageRangeDataRequest extends SnapDataRequest {
final WorldStateStorage worldStateStorage,
final Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
// search incomplete nodes in the range
final AtomicInteger nbNodesSaved = new AtomicInteger();
final AtomicReference<Updater> updaterTmp = new AtomicReference<>(worldStateStorage.updater());
final NodeUpdater nodeUpdater =
(location, hash, value) -> {
updaterTmp.get().putAccountStorageTrieNode(accountHash, location, hash, value);
if (nbNodesSaved.getAndIncrement() % 1000 == 0) {
updaterTmp.get().commit();
updaterTmp.set(worldStateStorage.updater());
}
updater.putAccountStorageTrieNode(accountHash, location, hash, value);
};
stackTrie.commit(nodeUpdater);
StackTrie.FlatDatabaseUpdater flatDatabaseUpdater = noop();
if (worldStateStorage.getFlatDbMode().equals(FlatDbMode.FULL)) {
// we have a flat DB only with Bonsai
flatDatabaseUpdater =
(key, value) ->
((BonsaiWorldStateKeyValueStorage.Updater) updater)
.putStorageValueBySlotHash(
accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value)));
}
updaterTmp.get().commit();
stackTrie.commit(flatDatabaseUpdater, nodeUpdater);
downloadState.getMetricsManager().notifySlotsDownloaded(stackTrie.getElementsCount().get());
@ -109,7 +117,7 @@ public class StorageRangeDataRequest extends SnapDataRequest {
}
public void addResponse(
final WorldDownloadState<SnapDataRequest> downloadState,
final SnapWorldDownloadState downloadState,
final WorldStateProofProvider worldStateProofProvider,
final TreeMap<Bytes32, Bytes> slots,
final ArrayDeque<Bytes> proofs) {
@ -133,7 +141,7 @@ public class StorageRangeDataRequest extends SnapDataRequest {
}
@Override
public boolean isExpired(final SnapSyncState snapSyncState) {
public boolean isExpired(final SnapSyncProcessState snapSyncState) {
return snapSyncState.isExpired(this);
}
@ -141,7 +149,7 @@ public class StorageRangeDataRequest extends SnapDataRequest {
public Stream<SnapDataRequest> getChildRequests(
final SnapWorldDownloadState downloadState,
final WorldStateStorage worldStateStorage,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState) {
final List<SnapDataRequest> childRequests = new ArrayList<>();
if (!isProofValid.orElse(false)) {
@ -153,7 +161,7 @@ public class StorageRangeDataRequest extends SnapDataRequest {
findNewBeginElementInRange(storageRoot, taskElement.proofs(), taskElement.keys(), endKeyHash)
.ifPresent(
missingRightElement -> {
final int nbRanges = findNbRanges(taskElement.keys());
final int nbRanges = getRangeCount(startKeyHash, endKeyHash, taskElement.keys());
RangeManager.generateRanges(missingRightElement, endKeyHash, nbRanges)
.forEach(
(key, value) -> {
@ -163,28 +171,15 @@ public class StorageRangeDataRequest extends SnapDataRequest {
storageRangeDataRequest.addStackTrie(Optional.of(stackTrie));
childRequests.add(storageRangeDataRequest);
});
if (!snapSyncState.isHealInProgress()
&& startKeyHash.equals(MIN_RANGE)
&& endKeyHash.equals(MAX_RANGE)) {
if (startKeyHash.equals(MIN_RANGE) && endKeyHash.equals(MAX_RANGE)) {
// need to heal this account storage
downloadState.addInconsistentAccount(CompactEncoding.bytesToPath(accountHash));
downloadState.addAccountsToBeRepaired(CompactEncoding.bytesToPath(accountHash));
}
});
return childRequests.stream();
}
private int findNbRanges(final TreeMap<Bytes32, Bytes> slots) {
if (startKeyHash.equals(MIN_RANGE) && endKeyHash.equals(MAX_RANGE)) {
return MAX_RANGE
.toUnsignedBigInteger()
.divide(
slots.lastKey().toUnsignedBigInteger().subtract(startKeyHash.toUnsignedBigInteger()))
.intValue();
}
return 1;
}
public Bytes32 getAccountHash() {
return accountHash;
}

@ -0,0 +1,210 @@
/*
* Copyright contributors to Hyperledger Besu
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MAX_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MIN_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.HEAL_FLAT;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.RangeStorageEntriesCollector;
import org.hyperledger.besu.ethereum.trie.TrieIterator;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Stream;
import kotlin.collections.ArrayDeque;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
/**
* The AccountFlatDatabaseHealingRangeRequest class represents a request to heal a range of account
* in the flat databases. It encapsulates the necessary information to identify the range and
* initiate the healing process.
*/
public class AccountFlatDatabaseHealingRangeRequest extends SnapDataRequest {
private final Bytes32 startKeyHash;
private final Bytes32 endKeyHash;
private TreeMap<Bytes32, Bytes> existingAccounts;
private TreeMap<Bytes32, Bytes> removedAccounts;
private boolean isProofValid;
public AccountFlatDatabaseHealingRangeRequest(
final Hash originalRootHash, final Bytes32 startKeyHash, final Bytes32 endKeyHash) {
super(RequestType.ACCOUNT_RANGE, originalRootHash);
this.startKeyHash = startKeyHash;
this.endKeyHash = endKeyHash;
this.existingAccounts = new TreeMap<>();
this.removedAccounts = new TreeMap<>();
this.isProofValid = false;
}
@Override
public Stream<SnapDataRequest> getChildRequests(
final SnapWorldDownloadState downloadState,
final WorldStateStorage worldStateStorage,
final SnapSyncProcessState snapSyncState) {
final List<SnapDataRequest> childRequests = new ArrayList<>();
if (!existingAccounts.isEmpty()) {
// new request is added if the response does not match all the requested range
RangeManager.generateRanges(
existingAccounts.lastKey().toUnsignedBigInteger().add(BigInteger.ONE),
endKeyHash.toUnsignedBigInteger(),
1)
.forEach(
(key, value) -> {
downloadState.getMetricsManager().notifyRangeProgress(HEAL_FLAT, key, endKeyHash);
final AccountFlatDatabaseHealingRangeRequest storageRangeDataRequest =
createAccountFlatHealingRangeRequest(getRootHash(), key, value);
childRequests.add(storageRangeDataRequest);
});
} else {
downloadState.getMetricsManager().notifyRangeProgress(HEAL_FLAT, endKeyHash, endKeyHash);
}
Stream.of(existingAccounts.entrySet(), removedAccounts.entrySet())
.flatMap(Collection::stream)
.forEach(
account -> {
if (downloadState
.getAccountsToBeRepaired()
.contains(CompactEncoding.bytesToPath(account.getKey()))) {
final StateTrieAccountValue accountValue =
StateTrieAccountValue.readFrom(RLP.input(account.getValue()));
childRequests.add(
createStorageFlatHealingRangeRequest(
getRootHash(),
account.getKey(),
accountValue.getStorageRoot(),
MIN_RANGE,
MAX_RANGE));
}
});
return childRequests.stream();
}
public Bytes32 getStartKeyHash() {
return startKeyHash;
}
public Bytes32 getEndKeyHash() {
return endKeyHash;
}
@Override
public boolean isResponseReceived() {
return true;
}
public void addLocalData(
final WorldStateProofProvider worldStateProofProvider,
final TreeMap<Bytes32, Bytes> accounts,
final ArrayDeque<Bytes> proofs) {
if (!accounts.isEmpty() && !proofs.isEmpty()) {
// very proof in order to check if the local flat database is valid or not
isProofValid =
worldStateProofProvider.isValidRangeProof(
startKeyHash, endKeyHash, getRootHash(), proofs, accounts);
this.existingAccounts = accounts;
}
}
@Override
protected int doPersist(
final WorldStateStorage worldStateStorage,
final WorldStateStorage.Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration syncConfig) {
if (!isProofValid) { // if proof is not valid we need to fix the flat database
final BonsaiWorldStateKeyValueStorage.Updater bonsaiUpdater =
(BonsaiWorldStateKeyValueStorage.Updater) updater;
final MerkleTrie<Bytes, Bytes> accountTrie =
new StoredMerklePatriciaTrie<>(
worldStateStorage::getAccountStateTrieNode,
getRootHash(),
Function.identity(),
Function.identity());
// retrieve the data from the trie in order to know what to fix in the flat database
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
startKeyHash,
existingAccounts.isEmpty() ? endKeyHash : existingAccounts.lastKey(),
existingAccounts.isEmpty()
? syncConfig.getLocalFlatAccountCountToHealPerRequest()
: Integer.MAX_VALUE,
Integer.MAX_VALUE);
// put all flat accounts in the list, and gradually keep only those that are not in the trie
// to remove and heal them.
removedAccounts = new TreeMap<>(existingAccounts);
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
existingAccounts =
(TreeMap<Bytes32, Bytes>)
accountTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, startKeyHash));
// doing the fix
existingAccounts.forEach(
(key, value) -> {
if (removedAccounts.containsKey(key)) {
removedAccounts.remove(key);
} else {
final Hash accountHash = Hash.wrap(key);
// if the account was missing in the flat db we need to heal the storage
downloadState.addAccountsToBeRepaired(CompactEncoding.bytesToPath(accountHash));
bonsaiUpdater.putAccountInfoState(accountHash, value);
}
});
removedAccounts.forEach(
(key, value) -> {
final Hash accountHash = Hash.wrap(key);
// if the account was removed we will have to heal the storage
downloadState.addAccountsToBeRepaired(CompactEncoding.bytesToPath(accountHash));
bonsaiUpdater.removeAccountInfoState(accountHash);
});
}
return existingAccounts.size() + removedAccounts.size();
}
}

@ -12,16 +12,21 @@
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request;
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest.createAccountTrieNodeDataRequest;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
@ -35,11 +40,12 @@ import java.util.stream.Stream;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
public class AccountTrieNodeDataRequest extends TrieNodeDataRequest {
/** Represents a healing request for an account trie node. */
public class AccountTrieNodeHealingRequest extends TrieNodeHealingRequest {
private final HashSet<Bytes> inconsistentAccounts;
AccountTrieNodeDataRequest(
public AccountTrieNodeHealingRequest(
final Hash hash,
final Hash originalRootHash,
final Bytes location,
@ -53,7 +59,8 @@ public class AccountTrieNodeDataRequest extends TrieNodeDataRequest {
final WorldStateStorage worldStateStorage,
final WorldStateStorage.Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
if (isRoot()) {
downloadState.setRootNodeData(data);
}
@ -62,7 +69,8 @@ public class AccountTrieNodeDataRequest extends TrieNodeDataRequest {
}
@Override
public Optional<Bytes> getExistingData(final WorldStateStorage worldStateStorage) {
public Optional<Bytes> getExistingData(
final SnapWorldDownloadState downloadState, final WorldStateStorage worldStateStorage) {
return worldStateStorage
.getAccountStateTrieNode(getLocation(), getNodeHash())
.filter(data -> !getLocation().isEmpty());
@ -131,7 +139,9 @@ public class AccountTrieNodeDataRequest extends TrieNodeDataRequest {
final Hash accountHash =
Hash.wrap(
Bytes32.wrap(CompactEncoding.pathToBytes(Bytes.concatenate(getLocation(), path))));
if (worldStateStorage instanceof BonsaiWorldStateKeyValueStorage) {
// update the flat db only for bonsai
if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.NO_FLATTENED)) {
((BonsaiWorldStateKeyValueStorage.Updater) worldStateStorage.updater())
.putAccountInfoState(accountHash, value)
.commit();

@ -0,0 +1,190 @@
/*
* Copyright contributors to Hyperledger Besu
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.getRangeCount;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType.STORAGE_RANGE;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.RangeStorageEntriesCollector;
import org.hyperledger.besu.ethereum.trie.TrieIterator;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Stream;
import kotlin.collections.ArrayDeque;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.rlp.RLP;
/**
* The StorageFlatDatabaseHealingRangeRequest class represents a request to heal a range of storage
* in the flat databases. It encapsulates the necessary information to identify the range and
* initiate the healing process.
*/
public class StorageFlatDatabaseHealingRangeRequest extends SnapDataRequest {
private final Hash accountHash;
private final Bytes32 storageRoot;
private final Bytes32 startKeyHash;
private final Bytes32 endKeyHash;
private TreeMap<Bytes32, Bytes> slots;
private boolean isProofValid;
public StorageFlatDatabaseHealingRangeRequest(
final Hash rootHash,
final Bytes32 accountHash,
final Bytes32 storageRoot,
final Bytes32 startKeyHash,
final Bytes32 endKeyHash) {
super(STORAGE_RANGE, rootHash);
this.accountHash = Hash.wrap(accountHash);
this.storageRoot = storageRoot;
this.startKeyHash = startKeyHash;
this.endKeyHash = endKeyHash;
this.isProofValid = false;
}
@Override
public Stream<SnapDataRequest> getChildRequests(
final SnapWorldDownloadState downloadState,
final WorldStateStorage worldStateStorage,
final SnapSyncProcessState snapSyncState) {
final List<SnapDataRequest> childRequests = new ArrayList<>();
if (!slots.isEmpty()) {
// new request is added if the response does not match all the requested range
final int nbRanges = getRangeCount(startKeyHash, endKeyHash, slots);
RangeManager.generateRanges(
slots.lastKey().toUnsignedBigInteger().add(BigInteger.ONE),
endKeyHash.toUnsignedBigInteger(),
nbRanges)
.forEach(
(key, value) -> {
final StorageFlatDatabaseHealingRangeRequest storageRangeDataRequest =
createStorageFlatHealingRangeRequest(
getRootHash(), accountHash, storageRoot, key, value);
childRequests.add(storageRangeDataRequest);
});
}
return childRequests.stream();
}
public Hash getAccountHash() {
return accountHash;
}
public Bytes32 getStorageRoot() {
return storageRoot;
}
public Bytes32 getStartKeyHash() {
return startKeyHash;
}
public Bytes32 getEndKeyHash() {
return endKeyHash;
}
@Override
public boolean isResponseReceived() {
return true;
}
public void addLocalData(
final WorldStateProofProvider worldStateProofProvider,
final TreeMap<Bytes32, Bytes> slots,
final ArrayDeque<Bytes> proofs) {
if (!slots.isEmpty() && !proofs.isEmpty()) {
// very proof in order to check if the local flat database is valid or not
isProofValid =
worldStateProofProvider.isValidRangeProof(
startKeyHash, endKeyHash, storageRoot, proofs, slots);
}
this.slots = slots;
}
@Override
protected int doPersist(
final WorldStateStorage worldStateStorage,
final WorldStateStorage.Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
if (!isProofValid) {
// If the proof is not valid, it indicates that the flat database needs to be fixed.
final BonsaiWorldStateKeyValueStorage.Updater bonsaiUpdater =
(BonsaiWorldStateKeyValueStorage.Updater) updater;
final MerkleTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) ->
worldStateStorage.getAccountStorageTrieNode(accountHash, location, hash),
storageRoot,
Function.identity(),
Function.identity());
Map<Bytes32, Bytes> remainingKeys = new TreeMap<>(slots);
// Retrieve the data from the trie in order to know what needs to be fixed in the flat
// database
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
startKeyHash,
slots.isEmpty() ? endKeyHash : slots.lastKey(),
slots.isEmpty()
? snapSyncConfiguration.getLocalFlatStorageCountToHealPerRequest()
: Integer.MAX_VALUE,
Integer.MAX_VALUE);
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
slots =
(TreeMap<Bytes32, Bytes>)
storageTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, startKeyHash));
// Perform the fix by updating the flat database
slots.forEach(
(key, value) -> {
if (remainingKeys.containsKey(key)) {
remainingKeys.remove(key);
} else {
bonsaiUpdater.putStorageValueBySlotHash(
accountHash, Hash.wrap(key), Bytes32.leftPad(RLP.decodeValue(value)));
}
});
remainingKeys.forEach(
(key, value) -> bonsaiUpdater.removeStorageValueBySlotHash(accountHash, Hash.wrap(key)));
}
return slots.size();
}
}

@ -12,13 +12,17 @@
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request;
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage.Updater;
@ -30,11 +34,12 @@ import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.rlp.RLP;
public class StorageTrieNodeDataRequest extends TrieNodeDataRequest {
/** Represents a healing request for a storage trie node. */
public class StorageTrieNodeHealingRequest extends TrieNodeHealingRequest {
final Hash accountHash;
StorageTrieNodeDataRequest(
public StorageTrieNodeHealingRequest(
final Hash nodeHash, final Hash accountHash, final Hash rootHash, final Bytes location) {
super(nodeHash, rootHash, location);
this.accountHash = accountHash;
@ -45,20 +50,42 @@ public class StorageTrieNodeDataRequest extends TrieNodeDataRequest {
final WorldStateStorage worldStateStorage,
final Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
updater.putAccountStorageTrieNode(getAccountHash(), getLocation(), getNodeHash(), data);
return 1;
}
@Override
public Optional<Bytes> getExistingData(final WorldStateStorage worldStateStorage) {
return worldStateStorage.getAccountStorageTrieNode(
getAccountHash(), getLocation(), getNodeHash());
public Optional<Bytes> getExistingData(
final SnapWorldDownloadState downloadState, final WorldStateStorage worldStateStorage) {
Optional<Bytes> accountStorageTrieNode =
worldStateStorage.getAccountStorageTrieNode(
getAccountHash(),
getLocation(),
null); // push null to not check the hash in the getAccountStorageTrieNode method
if (accountStorageTrieNode.isPresent()) {
return accountStorageTrieNode
.filter(node -> Hash.hash(node).equals(getNodeHash()))
.or(
() -> { // if we have a storage in database but not the good one we will need to fix
// the account later
downloadState.addAccountsToBeRepaired(
CompactEncoding.bytesToPath(getAccountHash()));
return Optional.empty();
});
} else {
if (getNodeHash().equals(MerkleTrie.EMPTY_TRIE_NODE_HASH)) {
return Optional.of(MerkleTrie.EMPTY_TRIE_NODE);
}
return Optional.empty();
}
}
@Override
protected SnapDataRequest createChildNodeDataRequest(final Hash childHash, final Bytes location) {
return createStorageTrieNodeDataRequest(childHash, getAccountHash(), getRootHash(), location);
return SnapDataRequest.createStorageTrieNodeDataRequest(
childHash, getAccountHash(), getRootHash(), location);
}
@Override
@ -67,7 +94,7 @@ public class StorageTrieNodeDataRequest extends TrieNodeDataRequest {
final Bytes location,
final Bytes path,
final Bytes value) {
if (worldStateStorage instanceof BonsaiWorldStateKeyValueStorage) {
if (!worldStateStorage.getFlatDbMode().equals(FlatDbMode.NO_FLATTENED)) {
((BonsaiWorldStateKeyValueStorage.Updater) worldStateStorage.updater())
.putStorageValueBySlotHash(
accountHash, getSlotHash(location, path), Bytes32.leftPad(RLP.decodeValue(value)))

@ -12,14 +12,16 @@
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request;
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType.TRIE_NODE;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.trie.Node;
import org.hyperledger.besu.ethereum.trie.patricia.TrieNodeDecoder;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
@ -34,7 +36,8 @@ import java.util.stream.Stream;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
public abstract class TrieNodeDataRequest extends SnapDataRequest implements TasksPriorityProvider {
public abstract class TrieNodeHealingRequest extends SnapDataRequest
implements TasksPriorityProvider {
private final Bytes32 nodeHash;
private final Bytes location;
@ -42,7 +45,7 @@ public abstract class TrieNodeDataRequest extends SnapDataRequest implements Tas
protected boolean requiresPersisting = true;
protected TrieNodeDataRequest(final Hash nodeHash, final Hash rootHash, final Bytes location) {
protected TrieNodeHealingRequest(final Hash nodeHash, final Hash rootHash, final Bytes location) {
super(TRIE_NODE, rootHash);
this.nodeHash = nodeHash;
this.location = location;
@ -54,7 +57,8 @@ public abstract class TrieNodeDataRequest extends SnapDataRequest implements Tas
final WorldStateStorage worldStateStorage,
final WorldStateStorage.Updater updater,
final SnapWorldDownloadState downloadState,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState,
final SnapSyncConfiguration snapSyncConfiguration) {
if (isExpired(snapSyncState) || pendingChildren.get() > 0) {
// we do nothing. Our last child will eventually persist us.
return 0;
@ -62,12 +66,15 @@ public abstract class TrieNodeDataRequest extends SnapDataRequest implements Tas
int saved = 0;
if (requiresPersisting) {
checkNotNull(data, "Must set data before node can be persisted.");
saved = doPersist(worldStateStorage, updater, downloadState, snapSyncState);
saved =
doPersist(
worldStateStorage, updater, downloadState, snapSyncState, snapSyncConfiguration);
}
if (possibleParent.isPresent()) {
return possibleParent
.get()
.saveParent(worldStateStorage, updater, downloadState, snapSyncState)
.saveParent(
worldStateStorage, updater, downloadState, snapSyncState, snapSyncConfiguration)
+ saved;
}
return saved;
@ -77,7 +84,7 @@ public abstract class TrieNodeDataRequest extends SnapDataRequest implements Tas
public Stream<SnapDataRequest> getChildRequests(
final SnapWorldDownloadState downloadState,
final WorldStateStorage worldStateStorage,
final SnapSyncState snapSyncState) {
final SnapSyncProcessState snapSyncState) {
if (!isResponseReceived()) {
// If this node hasn't been downloaded yet, we can't return any child data
return Stream.empty();
@ -116,7 +123,7 @@ public abstract class TrieNodeDataRequest extends SnapDataRequest implements Tas
}
@Override
public boolean isExpired(final SnapSyncState snapSyncState) {
public boolean isExpired(final SnapSyncProcessState snapSyncState) {
return snapSyncState.isExpired(this);
}
@ -158,7 +165,8 @@ public abstract class TrieNodeDataRequest extends SnapDataRequest implements Tas
return !Objects.equals(node.getHash(), nodeHash) && node.isReferencedByHash();
}
public abstract Optional<Bytes> getExistingData(final WorldStateStorage worldStateStorage);
public abstract Optional<Bytes> getExistingData(
final SnapWorldDownloadState downloadState, final WorldStateStorage worldStateStorage);
public abstract List<Bytes> getTrieNodePath();

@ -61,7 +61,7 @@ public class SyncState {
private volatile boolean isResyncNeeded;
private Optional<Address> maybeAccountToRepair;
private Optional<Address> maybeAccountToRepair = Optional.empty();
public SyncState(final Blockchain blockchain, final EthPeers ethPeers) {
this(blockchain, ethPeers, false, Optional.empty());

@ -34,6 +34,7 @@ import org.hyperledger.besu.ethereum.eth.sync.fastsync.worldstate.FastWorldState
import org.hyperledger.besu.ethereum.eth.sync.fastsync.worldstate.NodeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.StalledDownloadException;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloader;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.services.tasks.TaskCollection;
@ -77,6 +78,7 @@ public class FastSyncDownloaderTest {
@Before
public void setup() {
when(worldStateStorage.getDataStorageFormat()).thenReturn(DataStorageFormat.FOREST);
when(worldStateStorage.isWorldStateAvailable(any(), any())).thenReturn(true);
}

@ -41,7 +41,7 @@ public class CompleteTaskStepTest {
private static final Hash HASH = Hash.hash(Bytes.of(1, 2, 3));
private final SnapSyncState snapSyncState = mock(SnapSyncState.class);
private final SnapSyncProcessState snapSyncState = mock(SnapSyncProcessState.class);
private final SnapWorldDownloadState downloadState = mock(SnapWorldDownloadState.class);
private final BlockHeader blockHeader =

@ -35,18 +35,18 @@ import org.junit.Test;
public class DynamicPivotBlockManagerTest {
private final SnapSyncState snapSyncState = mock(SnapSyncState.class);
private final SnapSyncProcessState snapSyncState = mock(SnapSyncProcessState.class);
private final FastSyncActions fastSyncActions = mock(FastSyncActions.class);
private final SyncState syncState = mock(SyncState.class);
private final EthContext ethContext = mock(EthContext.class);
private DynamicPivotBlockManager dynamicPivotBlockManager;
private DynamicPivotBlockSelector dynamicPivotBlockManager;
@Before
public void setup() {
when(fastSyncActions.getSyncState()).thenReturn(syncState);
when(ethContext.getScheduler()).thenReturn(new DeterministicEthScheduler());
dynamicPivotBlockManager =
new DynamicPivotBlockManager(
new DynamicPivotBlockSelector(
ethContext,
fastSyncActions,
snapSyncState,

@ -25,8 +25,8 @@ import static org.mockito.Mockito.when;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockHeaderTestFixture;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.AccountTrieNodeDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal.AccountTrieNodeHealingRequest;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.services.pipeline.Pipe;
@ -48,7 +48,7 @@ public class LoadLocalDataStepTest {
private final BlockHeader blockHeader =
new BlockHeaderTestFixture().stateRoot(HASH).buildHeader();
private final AccountTrieNodeDataRequest request =
private final AccountTrieNodeHealingRequest request =
SnapDataRequest.createAccountTrieNodeDataRequest(
HASH, Bytes.fromHexString("0x01"), new HashSet<>());
private final Task<SnapDataRequest> task = new StubTask(request);
@ -56,14 +56,20 @@ public class LoadLocalDataStepTest {
private final Pipe<Task<SnapDataRequest>> completedTasks =
new Pipe<>(10, NO_OP_COUNTER, NO_OP_COUNTER, NO_OP_COUNTER);
private final SnapSyncState snapSyncState = mock(SnapSyncState.class);
private final SnapSyncProcessState snapSyncState = mock(SnapSyncProcessState.class);
private final SnapWorldDownloadState downloadState = mock(SnapWorldDownloadState.class);
private final WorldStateStorage worldStateStorage = mock(WorldStateStorage.class);
private final WorldStateStorage.Updater updater = mock(WorldStateStorage.Updater.class);
private final SnapSyncConfiguration snapSyncConfiguration = mock(SnapSyncConfiguration.class);
private final LoadLocalDataStep loadLocalDataStep =
new LoadLocalDataStep(
worldStateStorage, downloadState, new NoOpMetricsSystem(), snapSyncState);
worldStateStorage,
downloadState,
snapSyncConfiguration,
new NoOpMetricsSystem(),
snapSyncState);
@Before
public void setup() {
@ -115,7 +121,8 @@ public class LoadLocalDataStepTest {
Mockito.reset(updater);
// Should not require persisting.
request.persist(worldStateStorage, updater, downloadState, snapSyncState);
request.persist(
worldStateStorage, updater, downloadState, snapSyncState, snapSyncConfiguration);
verifyNoInteractions(updater);
}
}

@ -40,11 +40,13 @@ public class PersistDataStepTest {
private final WorldStateStorage worldStateStorage =
new InMemoryKeyValueStorageProvider().createWorldStateStorage(DataStorageFormat.FOREST);
private final SnapSyncState snapSyncState = mock(SnapSyncState.class);
private final SnapSyncProcessState snapSyncState = mock(SnapSyncProcessState.class);
private final SnapWorldDownloadState downloadState = mock(SnapWorldDownloadState.class);
private final SnapSyncConfiguration snapSyncConfiguration = mock(SnapSyncConfiguration.class);
private final PersistDataStep persistDataStep =
new PersistDataStep(snapSyncState, worldStateStorage, downloadState);
new PersistDataStep(snapSyncState, worldStateStorage, downloadState, snapSyncConfiguration);
@Before
public void setUp() {

@ -14,6 +14,8 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.TrieGenerator;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
@ -32,11 +34,28 @@ import java.util.TreeMap;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.assertj.core.api.Assertions;
import org.junit.Test;
public final class RangeManagerTest {
@Test
public void testRemainingRangesEqualToOneWhenFirstRangeContainsMoreThanHalf() {
TreeMap<Bytes32, Bytes> items = new TreeMap<>();
items.put(Bytes32.repeat((byte) 0xbb), Bytes.wrap(new byte[] {0x03}));
int nbRanges =
RangeManager.getRangeCount(RangeManager.MIN_RANGE, RangeManager.MAX_RANGE, items);
assertThat(nbRanges).isEqualTo(1);
}
@Test
public void testRemainingRangesEqualToOneWhenFirstRangeContainsLessThanHalf() {
TreeMap<Bytes32, Bytes> items = new TreeMap<>();
items.put(Bytes32.repeat((byte) 0x77), Bytes.wrap(new byte[] {0x03}));
int nbRanges =
RangeManager.getRangeCount(RangeManager.MIN_RANGE, RangeManager.MAX_RANGE, items);
assertThat(nbRanges).isEqualTo(2);
}
@Test
public void testGenerateAllRangesWithSize1() {
final Map<Bytes32, Bytes32> expectedResult = new HashMap<>();
@ -45,8 +64,8 @@ public final class RangeManagerTest {
Bytes32.fromHexString(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
final Map<Bytes32, Bytes32> ranges = RangeManager.generateAllRanges(1);
Assertions.assertThat(ranges.size()).isEqualTo(1);
Assertions.assertThat(ranges).isEqualTo(expectedResult);
assertThat(ranges.size()).isEqualTo(1);
assertThat(ranges).isEqualTo(expectedResult);
}
@Test
@ -65,8 +84,8 @@ public final class RangeManagerTest {
Bytes32.fromHexString(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
final Map<Bytes32, Bytes32> ranges = RangeManager.generateAllRanges(3);
Assertions.assertThat(ranges.size()).isEqualTo(3);
Assertions.assertThat(ranges).isEqualTo(expectedResult);
assertThat(ranges.size()).isEqualTo(3);
assertThat(ranges).isEqualTo(expectedResult);
}
@Test
@ -91,8 +110,8 @@ public final class RangeManagerTest {
Bytes32.fromHexString(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
3);
Assertions.assertThat(ranges.size()).isEqualTo(3);
Assertions.assertThat(ranges).isEqualTo(expectedResult);
assertThat(ranges.size()).isEqualTo(3);
assertThat(ranges).isEqualTo(expectedResult);
}
@Test
@ -130,7 +149,7 @@ public final class RangeManagerTest {
RangeManager.findNewBeginElementInRange(
accountStateTrie.getRootHash(), proofs, accounts, RangeManager.MAX_RANGE);
Assertions.assertThat(newBeginElementInRange)
assertThat(newBeginElementInRange)
.contains(Bytes32.leftPad(Bytes.wrap(Bytes.ofUnsignedShort(0x0b))));
}
@ -169,6 +188,6 @@ public final class RangeManagerTest {
RangeManager.findNewBeginElementInRange(
accountStateTrie.getRootHash(), proofs, accounts, RangeManager.MAX_RANGE);
Assertions.assertThat(newBeginElementInRange).isEmpty();
assertThat(newBeginElementInRange).isEmpty();
}
}

@ -35,6 +35,7 @@ import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockHeaderTestFixture;
import org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.eth.manager.task.EthTask;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.context.SnapSyncStatePersistenceManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.BytecodeRequest;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.eth.sync.worldstate.WorldStateDownloadProcess;
@ -56,6 +57,7 @@ import java.util.function.BiConsumer;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -77,12 +79,13 @@ public class SnapWorldDownloadStateTest {
new InMemoryTasksPriorityQueues<>();
private final WorldStateDownloadProcess worldStateDownloadProcess =
mock(WorldStateDownloadProcess.class);
private final SnapSyncState snapSyncState = mock(SnapSyncState.class);
private final SnapPersistedContext snapContext = mock(SnapPersistedContext.class);
private final SnapSyncProcessState snapSyncState = mock(SnapSyncProcessState.class);
private final SnapSyncStatePersistenceManager snapContext =
mock(SnapSyncStatePersistenceManager.class);
private final SnapsyncMetricsManager metricsManager = mock(SnapsyncMetricsManager.class);
private final Blockchain blockchain = mock(Blockchain.class);
private final DynamicPivotBlockManager dynamicPivotBlockManager =
mock(DynamicPivotBlockManager.class);
private final DynamicPivotBlockSelector dynamicPivotBlockManager =
mock(DynamicPivotBlockSelector.class);
private final TestClock clock = new TestClock();
private SnapWorldDownloadState downloadState;
@ -91,13 +94,21 @@ public class SnapWorldDownloadStateTest {
@Parameterized.Parameters
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {{DataStorageFormat.BONSAI}, {DataStorageFormat.FOREST}});
return Arrays.asList(
new Object[][] {
{DataStorageFormat.BONSAI, true},
{DataStorageFormat.BONSAI, false},
{DataStorageFormat.FOREST, false}
});
}
private final DataStorageFormat storageFormat;
private final boolean isFlatDbHealingEnabled;
public SnapWorldDownloadStateTest(final DataStorageFormat storageFormat) {
public SnapWorldDownloadStateTest(
final DataStorageFormat storageFormat, final boolean isFlatDbHealingEnabled) {
this.storageFormat = storageFormat;
this.isFlatDbHealingEnabled = isFlatDbHealingEnabled;
}
@Before
@ -123,7 +134,8 @@ public class SnapWorldDownloadStateTest {
MIN_MILLIS_BEFORE_STALLING,
metricsManager,
clock);
final DynamicPivotBlockManager dynamicPivotBlockManager = mock(DynamicPivotBlockManager.class);
final DynamicPivotBlockSelector dynamicPivotBlockManager =
mock(DynamicPivotBlockSelector.class);
doAnswer(
invocation -> {
BiConsumer<BlockHeader, Boolean> callback = invocation.getArgument(0);
@ -132,7 +144,7 @@ public class SnapWorldDownloadStateTest {
})
.when(dynamicPivotBlockManager)
.switchToNewPivotBlock(any());
downloadState.setDynamicPivotBlockManager(dynamicPivotBlockManager);
downloadState.setPivotBlockSelector(dynamicPivotBlockManager);
downloadState.setRootNodeData(ROOT_NODE_DATA);
future = downloadState.getDownloadFuture();
assertThat(downloadState.isDownloading()).isTrue();
@ -140,7 +152,8 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldCompleteReturnedFutureWhenNoPendingTasksRemain() {
when(snapSyncState.isHealInProgress()).thenReturn(true);
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
when(snapSyncState.isHealFlatDatabaseInProgress()).thenReturn(true);
downloadState.checkCompletion(header);
assertThat(future).isCompleted();
@ -149,7 +162,7 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldStartHealWhenNoSnapsyncPendingTasksRemain() {
when(snapSyncState.isHealInProgress()).thenReturn(false);
when(snapSyncState.isHealTrieInProgress()).thenReturn(false);
when(snapSyncState.getPivotBlockHeader()).thenReturn(Optional.of(mock(BlockHeader.class)));
assertThat(downloadState.pendingTrieNodeRequests.isEmpty()).isTrue();
@ -160,7 +173,8 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldStoreRootNodeBeforeReturnedFutureCompletes() {
when(snapSyncState.isHealInProgress()).thenReturn(true);
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
when(snapSyncState.isHealFlatDatabaseInProgress()).thenReturn(true);
final CompletableFuture<Void> postFutureChecks =
future.thenAccept(
result ->
@ -175,7 +189,7 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldNotCompleteWhenThereAreAccountPendingTasks() {
when(snapSyncState.isHealInProgress()).thenReturn(false);
when(snapSyncState.isHealTrieInProgress()).thenReturn(false);
downloadState.pendingAccountRequests.add(
SnapDataRequest.createAccountDataRequest(
Hash.EMPTY_TRIE_HASH,
@ -192,7 +206,7 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldNotCompleteWhenThereAreStoragePendingTasks() {
when(snapSyncState.isHealInProgress()).thenReturn(false);
when(snapSyncState.isHealTrieInProgress()).thenReturn(false);
downloadState.pendingStorageRequests.add(
SnapDataRequest.createStorageTrieNodeDataRequest(
Hash.EMPTY_TRIE_HASH, Hash.wrap(Bytes32.random()), Hash.EMPTY_TRIE_HASH, Bytes.EMPTY));
@ -203,7 +217,7 @@ public class SnapWorldDownloadStateTest {
assertThat(worldStateStorage.getAccountStateTrieNode(Bytes.EMPTY, ROOT_NODE_HASH)).isEmpty();
assertThat(downloadState.isDownloading()).isTrue();
downloadState.pendingBigStorageRequests.add(
downloadState.pendingLargeStorageRequests.add(
SnapDataRequest.createStorageTrieNodeDataRequest(
Hash.EMPTY_TRIE_HASH, Hash.wrap(Bytes32.random()), Hash.EMPTY_TRIE_HASH, Bytes.EMPTY));
@ -216,7 +230,7 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldNotCompleteWhenThereAreTriePendingTasks() {
when(snapSyncState.isHealInProgress()).thenReturn(true);
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
downloadState.pendingTrieNodeRequests.add(
SnapDataRequest.createAccountTrieNodeDataRequest(
Hash.wrap(Bytes32.random()), Bytes.EMPTY, new HashSet<>()));
@ -228,6 +242,21 @@ public class SnapWorldDownloadStateTest {
assertThat(downloadState.isDownloading()).isTrue();
}
@Test
public void shouldNotCompleteWhenThereAreFlatDBHealingPendingTasks() {
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
when(snapSyncState.isHealFlatDatabaseInProgress()).thenReturn(true);
downloadState.pendingAccountFlatDatabaseHealingRequests.add(
SnapDataRequest.createAccountFlatHealingRangeRequest(
Hash.wrap(Bytes32.random()), Bytes32.ZERO, Bytes32.ZERO));
downloadState.checkCompletion(header);
assertThat(future).isNotDone();
assertThat(worldStateStorage.getAccountStateTrieNode(Bytes.EMPTY, ROOT_NODE_HASH)).isEmpty();
assertThat(downloadState.isDownloading()).isTrue();
}
@Test
public void shouldCancelOutstandingTasksWhenFutureIsCancelled() {
final EthTask<?> outstandingTask1 = mock(EthTask.class);
@ -260,11 +289,11 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldRestartHealWhenNewPivotBlock() {
when(snapSyncState.getPivotBlockHeader()).thenReturn(Optional.of(mock(BlockHeader.class)));
when(snapSyncState.isHealInProgress()).thenReturn(false);
when(snapSyncState.isHealTrieInProgress()).thenReturn(false);
assertThat(downloadState.pendingTrieNodeRequests.isEmpty()).isTrue();
// start heal
downloadState.checkCompletion(header);
verify(snapSyncState).setHealStatus(true);
verify(snapSyncState).setHealTrieStatus(true);
assertThat(downloadState.pendingTrieNodeRequests.isEmpty()).isFalse();
// add useless requests
downloadState.pendingTrieNodeRequests.add(
@ -272,17 +301,17 @@ public class SnapWorldDownloadStateTest {
downloadState.pendingCodeRequests.add(
BytecodeRequest.createBytecodeRequest(Bytes32.ZERO, Hash.EMPTY, Bytes32.ZERO));
// reload the heal
downloadState.reloadHeal();
verify(snapSyncState).setHealStatus(false);
downloadState.reloadTrieHeal();
verify(snapSyncState).setHealTrieStatus(false);
assertThat(downloadState.pendingTrieNodeRequests.size()).isEqualTo(1);
assertThat(downloadState.pendingCodeRequests.isEmpty()).isTrue();
}
@Test
public void shouldWaitingBlockchainWhenTooBehind() {
when(snapSyncState.isHealInProgress()).thenReturn(true);
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
downloadState.setDynamicPivotBlockManager(dynamicPivotBlockManager);
downloadState.setPivotBlockSelector(dynamicPivotBlockManager);
when(dynamicPivotBlockManager.isBlockchainBehind()).thenReturn(true);
downloadState.checkCompletion(header);
@ -301,9 +330,9 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldStopWaitingBlockchainWhenNewPivotBlockAvailable() {
when(snapSyncState.isHealInProgress()).thenReturn(true);
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
downloadState.setDynamicPivotBlockManager(dynamicPivotBlockManager);
downloadState.setPivotBlockSelector(dynamicPivotBlockManager);
when(dynamicPivotBlockManager.isBlockchainBehind()).thenReturn(true);
downloadState.checkCompletion(header);
@ -338,9 +367,9 @@ public class SnapWorldDownloadStateTest {
@Test
public void shouldStopWaitingBlockchainWhenCloseToTheHead() {
when(snapSyncState.isHealInProgress()).thenReturn(true);
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
downloadState.setDynamicPivotBlockManager(dynamicPivotBlockManager);
downloadState.setPivotBlockSelector(dynamicPivotBlockManager);
when(dynamicPivotBlockManager.isBlockchainBehind()).thenReturn(true);
downloadState.checkCompletion(header);
@ -360,4 +389,30 @@ public class SnapWorldDownloadStateTest {
verify(snapSyncState).setWaitingBlockchain(false);
}
@Test
public void shouldCompleteReturnedFutureWhenNoPendingTasksRemainAndFlatDBHealNotNeeded() {
Assume.assumeTrue(
storageFormat == DataStorageFormat.FOREST
|| (storageFormat == DataStorageFormat.BONSAI && !isFlatDbHealingEnabled));
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
downloadState.checkCompletion(header);
assertThat(future).isCompleted();
assertThat(downloadState.isDownloading()).isFalse();
}
@Test
public void shouldNotCompleteReturnedFutureWhenNoPendingTasksRemainAndFlatDBHealNeeded() {
Assume.assumeTrue(storageFormat == DataStorageFormat.BONSAI);
Assume.assumeTrue(isFlatDbHealingEnabled);
((BonsaiWorldStateKeyValueStorage) worldStateStorage).upgradeToFullFlatDbMode();
when(snapSyncState.isHealTrieInProgress()).thenReturn(true);
downloadState.checkCompletion(header);
assertThat(future).isNotDone();
verify(snapSyncState).setHealFlatDatabaseInProgress(true);
assertThat(worldStateStorage.getAccountStateTrieNode(Bytes.EMPTY, ROOT_NODE_HASH)).isEmpty();
assertThat(downloadState.isDownloading()).isTrue();
}
}

@ -0,0 +1,297 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.core.TrieGenerator;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.WorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.RangeStorageEntriesCollector;
import org.hyperledger.besu.ethereum.trie.TrieIterator;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import kotlin.collections.ArrayDeque;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.assertj.core.api.Assertions;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class AccountFlatDatabaseHealingRangeRequestTest {
@Mock private SnapWorldDownloadState downloadState;
@Mock private SnapSyncProcessState snapSyncState;
@Before
public void setup() {
Mockito.when(downloadState.getMetricsManager())
.thenReturn(Mockito.mock(SnapsyncMetricsManager.class));
Mockito.when(downloadState.getAccountsToBeRepaired()).thenReturn(new HashSet<>());
}
@Test
public void shouldReturnChildRequests() {
final WorldStateStorage worldStateStorage =
new WorldStateKeyValueStorage(new InMemoryKeyValueStorage());
final WorldStateProofProvider proofProvider = new WorldStateProofProvider(worldStateStorage);
final MerkleTrie<Bytes, Bytes> accountStateTrie =
TrieGenerator.generateTrie(worldStateStorage, 15);
// Create a collector to gather account entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, 10, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the account entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
accountStateTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Retrieve the proof related nodes for the account trie
final List<Bytes> proofs =
proofProvider.getAccountProofRelatedNodes(
Hash.wrap(accountStateTrie.getRootHash()), Hash.ZERO);
proofs.addAll(
proofProvider.getAccountProofRelatedNodes(
Hash.wrap(accountStateTrie.getRootHash()), accounts.lastKey()));
// Create a request for healing the flat database with a range from MIN_RANGE to MAX_RANGE
final AccountFlatDatabaseHealingRangeRequest request =
new AccountFlatDatabaseHealingRangeRequest(
Hash.EMPTY, RangeManager.MIN_RANGE, RangeManager.MAX_RANGE);
// Add local data to the request, including the proof provider, accounts TreeMap, and proofs as
// an ArrayDeque
request.addLocalData(proofProvider, accounts, new ArrayDeque<>(proofs));
// Verify that the start key hash of the snapDataRequest is greater than the last key in the
// accounts TreeMap
List<SnapDataRequest> childRequests =
request.getChildRequests(downloadState, worldStateStorage, snapSyncState).toList();
Assertions.assertThat(childRequests).hasSize(1);
AccountFlatDatabaseHealingRangeRequest snapDataRequest =
(AccountFlatDatabaseHealingRangeRequest) childRequests.get(0);
Assertions.assertThat(snapDataRequest.getStartKeyHash()).isGreaterThan(accounts.lastKey());
// Verify that we have storage healing request when the account need to be repaired
Mockito.when(downloadState.getAccountsToBeRepaired())
.thenReturn(
new HashSet<>(
accounts.keySet().stream()
.map(CompactEncoding::bytesToPath)
.collect(Collectors.toList())));
childRequests =
request.getChildRequests(downloadState, worldStateStorage, snapSyncState).toList();
Assertions.assertThat(childRequests).hasSizeGreaterThan(1);
Assertions.assertThat(childRequests)
.hasAtLeastOneElementOfType(AccountFlatDatabaseHealingRangeRequest.class);
Assertions.assertThat(childRequests)
.hasAtLeastOneElementOfType(StorageFlatDatabaseHealingRangeRequest.class);
}
@Test
public void shouldNotReturnChildRequestsWhenNoMoreAccounts() {
final WorldStateStorage worldStateStorage =
new WorldStateKeyValueStorage(new InMemoryKeyValueStorage());
final WorldStateProofProvider proofProvider = new WorldStateProofProvider(worldStateStorage);
final MerkleTrie<Bytes, Bytes> accountStateTrie =
TrieGenerator.generateTrie(worldStateStorage, 15);
// Create a collector to gather account entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, 15, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the account entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
accountStateTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Create a request for healing the flat database with no more accounts
final AccountFlatDatabaseHealingRangeRequest request =
new AccountFlatDatabaseHealingRangeRequest(
Hash.EMPTY, accounts.lastKey(), RangeManager.MAX_RANGE);
// Add local data to the request
request.addLocalData(proofProvider, new TreeMap<>(), new ArrayDeque<>());
// Verify that no child requests are returned from the request
final Stream<SnapDataRequest> childRequests =
request.getChildRequests(downloadState, worldStateStorage, snapSyncState);
Assertions.assertThat(childRequests).isEmpty();
}
@Test
public void doNotPersistWhenProofIsValid() {
final StorageProvider storageProvider = new InMemoryKeyValueStorageProvider();
final WorldStateStorage worldStateStorage =
new BonsaiWorldStateKeyValueStorage(storageProvider, new NoOpMetricsSystem());
final WorldStateProofProvider proofProvider = new WorldStateProofProvider(worldStateStorage);
final MerkleTrie<Bytes, Bytes> accountStateTrie =
TrieGenerator.generateTrie(worldStateStorage, 15);
// Create a collector to gather account entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, 10, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the account entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
accountStateTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Retrieve the proof related nodes for the account trie
final List<Bytes> proofs =
proofProvider.getAccountProofRelatedNodes(
Hash.wrap(accountStateTrie.getRootHash()), Hash.ZERO);
proofs.addAll(
proofProvider.getAccountProofRelatedNodes(
Hash.wrap(accountStateTrie.getRootHash()), accounts.lastKey()));
// Create a request for healing the flat database with a range from MIN_RANGE to MAX_RANGE
final AccountFlatDatabaseHealingRangeRequest request =
new AccountFlatDatabaseHealingRangeRequest(
Hash.wrap(accountStateTrie.getRootHash()),
RangeManager.MIN_RANGE,
RangeManager.MAX_RANGE);
// Add local data to the request, including the proof provider, accounts TreeMap, and proofs as
// an ArrayDeque
request.addLocalData(proofProvider, accounts, new ArrayDeque<>(proofs));
WorldStateStorage.Updater updater = Mockito.spy(worldStateStorage.updater());
request.doPersist(
worldStateStorage,
updater,
downloadState,
snapSyncState,
SnapSyncConfiguration.getDefault());
Mockito.verifyNoInteractions(updater);
}
@Test
public void doHealAndPersistWhenProofIsInvalid() {
final StorageProvider storageProvider = new InMemoryKeyValueStorageProvider();
final WorldStateStorage worldStateStorage =
new BonsaiWorldStateKeyValueStorage(storageProvider, new NoOpMetricsSystem());
final WorldStateProofProvider proofProvider = new WorldStateProofProvider(worldStateStorage);
final MerkleTrie<Bytes, Bytes> accountStateTrie =
TrieGenerator.generateTrie(worldStateStorage, 15);
// Create a collector to gather account entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, 15, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the account entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> accounts =
(TreeMap<Bytes32, Bytes>)
accountStateTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Retrieve the proof related nodes for the account trie
final List<Bytes> proofs =
proofProvider.getAccountProofRelatedNodes(
Hash.wrap(accountStateTrie.getRootHash()), Hash.ZERO);
proofs.addAll(
proofProvider.getAccountProofRelatedNodes(
Hash.wrap(accountStateTrie.getRootHash()), accounts.lastKey()));
// Remove an account in the middle of the range
final Iterator<Map.Entry<Bytes32, Bytes>> iterator = accounts.entrySet().iterator();
Map.Entry<Bytes32, Bytes> removedAccount = null;
int i = 0;
while (iterator.hasNext()) {
if (i == 7) {
removedAccount = Map.Entry.copyOf(iterator.next());
iterator.remove();
} else {
iterator.next();
}
i++;
}
// Create a request for healing the flat database with a range from MIN_RANGE to MAX_RANGE
final AccountFlatDatabaseHealingRangeRequest request =
new AccountFlatDatabaseHealingRangeRequest(
Hash.wrap(accountStateTrie.getRootHash()),
RangeManager.MIN_RANGE,
RangeManager.MAX_RANGE);
// Add local data to the request, including the proof provider, accounts TreeMap, and proofs as
// an ArrayDeque
request.addLocalData(proofProvider, accounts, new ArrayDeque<>(proofs));
BonsaiWorldStateKeyValueStorage.Updater updater =
(BonsaiWorldStateKeyValueStorage.Updater) Mockito.spy(worldStateStorage.updater());
request.doPersist(
worldStateStorage,
updater,
downloadState,
snapSyncState,
SnapSyncConfiguration.getDefault());
// check add the missing account to the updater
Mockito.verify(updater)
.putAccountInfoState(Hash.wrap(removedAccount.getKey()), removedAccount.getValue());
}
}

@ -0,0 +1,325 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import static org.apache.tuweni.rlp.RLP.decodeValue;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.core.TrieGenerator;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.RangeStorageEntriesCollector;
import org.hyperledger.besu.ethereum.trie.TrieIterator;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import kotlin.collections.ArrayDeque;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.assertj.core.api.Assertions;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class StorageFlatDatabaseHealingRangeRequestTest {
@Mock private SnapWorldDownloadState downloadState;
@Mock private SnapSyncProcessState snapSyncState;
final List<Address> accounts =
List.of(
Address.fromHexString("0xdeadbeef"),
Address.fromHexString("0xdeadbeee"),
Address.fromHexString("0xdeadbeea"),
Address.fromHexString("0xdeadbeeb"));
private MerkleTrie<Bytes, Bytes> trie;
private BonsaiWorldStateKeyValueStorage worldStateStorage;
private WorldStateProofProvider proofProvider;
private Hash account0Hash;
private Hash account0StorageRoot;
@Before
public void setup() {
final StorageProvider storageProvider = new InMemoryKeyValueStorageProvider();
worldStateStorage =
new BonsaiWorldStateKeyValueStorage(storageProvider, new NoOpMetricsSystem());
proofProvider = new WorldStateProofProvider(worldStateStorage);
trie =
TrieGenerator.generateTrie(
worldStateStorage, accounts.stream().map(Hash::hash).collect(Collectors.toList()));
account0Hash = Hash.hash(accounts.get(0));
account0StorageRoot =
trie.get(account0Hash)
.map(RLP::input)
.map(StateTrieAccountValue::readFrom)
.map(StateTrieAccountValue::getStorageRoot)
.orElseThrow();
}
@Test
public void shouldReturnChildRequests() {
final StoredMerklePatriciaTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) ->
worldStateStorage.getAccountStorageTrieNode(account0Hash, location, hash),
account0StorageRoot,
b -> b,
b -> b);
// Create a collector to gather slot entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, 1, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the slot entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> slots =
(TreeMap<Bytes32, Bytes>)
storageTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Retrieve the proof related nodes for the account trie
final List<Bytes> proofs =
proofProvider.getStorageProofRelatedNodes(
Hash.wrap(storageTrie.getRootHash()), account0Hash, slots.firstKey());
proofs.addAll(
proofProvider.getStorageProofRelatedNodes(
Hash.wrap(storageTrie.getRootHash()), account0Hash, slots.lastKey()));
// Create a request for healing the flat database with a range from MIN_RANGE to MAX_RANGE
final StorageFlatDatabaseHealingRangeRequest request =
new StorageFlatDatabaseHealingRangeRequest(
Hash.EMPTY,
account0Hash,
account0StorageRoot,
RangeManager.MIN_RANGE,
RangeManager.MAX_RANGE);
// Add local data to the request, including the proof provider, accounts TreeMap, and proofs as
// an ArrayDeque
request.addLocalData(proofProvider, slots, new ArrayDeque<>(proofs));
// Verify that the start key hash of the snapDataRequest is greater than the last key in the
// slots TreeMap
List<SnapDataRequest> childRequests =
request.getChildRequests(downloadState, worldStateStorage, snapSyncState).toList();
Assertions.assertThat(childRequests).hasSizeGreaterThan(1);
StorageFlatDatabaseHealingRangeRequest snapDataRequest =
(StorageFlatDatabaseHealingRangeRequest) childRequests.get(0);
Assertions.assertThat(snapDataRequest.getStartKeyHash()).isGreaterThan(slots.lastKey());
}
@Test
public void shouldNotReturnChildRequestsWhenNoMoreSlots() {
final StoredMerklePatriciaTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) ->
worldStateStorage.getAccountStorageTrieNode(account0Hash, location, hash),
account0StorageRoot,
b -> b,
b -> b);
// Create a collector to gather slot entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, Integer.MAX_VALUE, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the slots entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> slots =
(TreeMap<Bytes32, Bytes>)
storageTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Create a request for healing the flat database with no more slots
final StorageFlatDatabaseHealingRangeRequest request =
new StorageFlatDatabaseHealingRangeRequest(
Hash.EMPTY, account0Hash, account0StorageRoot, slots.lastKey(), RangeManager.MAX_RANGE);
// Add local data to the request
request.addLocalData(proofProvider, new TreeMap<>(), new ArrayDeque<>());
// Verify that no child requests are returned from the request
final Stream<SnapDataRequest> childRequests =
request.getChildRequests(downloadState, worldStateStorage, snapSyncState);
Assertions.assertThat(childRequests).isEmpty();
}
@Test
public void doNotPersistWhenProofIsValid() {
final StoredMerklePatriciaTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) ->
worldStateStorage.getAccountStorageTrieNode(account0Hash, location, hash),
account0StorageRoot,
b -> b,
b -> b);
// Create a collector to gather slots entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, Integer.MAX_VALUE, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the slot entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> slots =
(TreeMap<Bytes32, Bytes>)
storageTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Retrieve the proof related nodes for the account trie
final List<Bytes> proofs =
proofProvider.getStorageProofRelatedNodes(
Hash.wrap(storageTrie.getRootHash()), account0Hash, slots.firstKey());
proofs.addAll(
proofProvider.getStorageProofRelatedNodes(
Hash.wrap(storageTrie.getRootHash()), account0Hash, slots.lastKey()));
// Create a request for healing the flat database with a range from MIN_RANGE to MAX_RANGE
final StorageFlatDatabaseHealingRangeRequest request =
new StorageFlatDatabaseHealingRangeRequest(
Hash.wrap(trie.getRootHash()),
account0Hash,
Hash.wrap(storageTrie.getRootHash()),
RangeManager.MIN_RANGE,
RangeManager.MAX_RANGE);
// Add local data to the request
request.addLocalData(proofProvider, slots, new ArrayDeque<>(proofs));
WorldStateStorage.Updater updater = Mockito.spy(worldStateStorage.updater());
request.doPersist(
worldStateStorage,
updater,
downloadState,
snapSyncState,
SnapSyncConfiguration.getDefault());
Mockito.verifyNoInteractions(updater);
}
@Test
public void doHealAndPersistWhenProofIsInvalid() {
final StoredMerklePatriciaTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) ->
worldStateStorage.getAccountStorageTrieNode(account0Hash, location, hash),
account0StorageRoot,
b -> b,
b -> b);
// Create a collector to gather slots entries within a specific range
final RangeStorageEntriesCollector collector =
RangeStorageEntriesCollector.createCollector(
Hash.ZERO, RangeManager.MAX_RANGE, Integer.MAX_VALUE, Integer.MAX_VALUE);
// Create a visitor for the range collector
final TrieIterator<Bytes> visitor = RangeStorageEntriesCollector.createVisitor(collector);
// Collect the slot entries within the specified range using the trie and range collector
final TreeMap<Bytes32, Bytes> slots =
(TreeMap<Bytes32, Bytes>)
storageTrie.entriesFrom(
root ->
RangeStorageEntriesCollector.collectEntries(
collector, visitor, root, Hash.ZERO));
// Retrieve the proof related nodes for the account trie
final List<Bytes> proofs =
proofProvider.getStorageProofRelatedNodes(
Hash.wrap(storageTrie.getRootHash()), account0Hash, slots.firstKey());
proofs.addAll(
proofProvider.getStorageProofRelatedNodes(
Hash.wrap(storageTrie.getRootHash()), account0Hash, slots.lastKey()));
// Remove a slot in the middle of the range
final Iterator<Map.Entry<Bytes32, Bytes>> iterator = slots.entrySet().iterator();
Map.Entry<Bytes32, Bytes> removedSlot = null;
int i = 0;
while (iterator.hasNext()) {
if (i == 1) {
removedSlot = Map.Entry.copyOf(iterator.next());
iterator.remove();
} else {
iterator.next();
}
i++;
}
// Create a request for healing the flat database with a range from MIN_RANGE to MAX_RANGE
final StorageFlatDatabaseHealingRangeRequest request =
new StorageFlatDatabaseHealingRangeRequest(
Hash.wrap(trie.getRootHash()),
account0Hash,
Hash.wrap(storageTrie.getRootHash()),
RangeManager.MIN_RANGE,
RangeManager.MAX_RANGE);
// Add local data to the request
request.addLocalData(proofProvider, slots, new ArrayDeque<>(proofs));
BonsaiWorldStateKeyValueStorage.Updater updater =
(BonsaiWorldStateKeyValueStorage.Updater) Mockito.spy(worldStateStorage.updater());
request.doPersist(
worldStateStorage,
updater,
downloadState,
snapSyncState,
SnapSyncConfiguration.getDefault());
// check add the missing slot to the updater
Mockito.verify(updater)
.putStorageValueBySlotHash(
account0Hash,
Hash.wrap(removedSlot.getKey()),
Bytes32.leftPad(decodeValue(removedSlot.getValue())));
}
}

@ -73,9 +73,7 @@ public class RangeStorageEntriesCollector extends StorageEntriesCollector<Bytes>
if (currentSize > maxResponseBytes) {
return TrieIterator.State.STOP;
}
if (endKeyHash.isPresent()
&& !values.isEmpty()
&& keyHash.compareTo(endKeyHash.get()) > 0) {
if (endKeyHash.isPresent() && keyHash.compareTo(endKeyHash.get()) > 0) {
return TrieIterator.State.STOP;
}

@ -24,6 +24,7 @@ import java.util.Iterator;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.bytes.MutableBytes;
public class TrieIterator<V> implements PathNodeVisitor<V> {
@ -40,10 +41,15 @@ public class TrieIterator<V> implements PathNodeVisitor<V> {
@Override
public Node<V> visit(final ExtensionNode<V> node, final Bytes searchPath) {
Bytes remainingPath = searchPath;
final Bytes extensionPath;
final Bytes commonPrefixPath;
if (state == State.SEARCHING) {
final Bytes extensionPath = node.getPath();
final int commonPathLength = extensionPath.commonPrefixLength(searchPath);
remainingPath = searchPath.slice(commonPathLength);
extensionPath = node.getPath();
commonPrefixPath = searchPath.slice(0, Math.min(searchPath.size(), extensionPath.size()));
remainingPath = searchPath.slice(commonPrefixPath.size());
if (node.getPath().compareTo(commonPrefixPath) > 0) {
remainingPath = MutableBytes.create(remainingPath.size());
}
}
paths.push(node.getPath());
@ -70,7 +76,12 @@ public class TrieIterator<V> implements PathNodeVisitor<V> {
for (int i = iterateFrom; i < node.maxChild() && state.continueIterating(); i++) {
paths.push(Bytes.of(i));
final Node<V> child = node.child((byte) i);
if (i == iterateFrom) {
child.accept(this, remainingPath);
} else {
child.accept(this, MutableBytes.create(remainingPath.size()));
}
if (unload) {
child.unload();
}

@ -0,0 +1,85 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import java.util.List;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.assertj.core.api.Assertions;
import org.junit.Test;
public class RangeStorageEntriesCollectorTest {
@Test
public void shouldRetrieveAllLeavesInRangeWhenStartFromZero() {
InMemoryKeyValueStorage worldStateStorage = new InMemoryKeyValueStorage();
final MerkleTrie<Bytes, Bytes> accountStateTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateStorage.get(hash.toArrayUnsafe()).map(Bytes::wrap),
b -> b,
b -> b);
final List<Bytes32> lists =
List.of(
Bytes32.rightPad(Bytes.of(1, 1, 3, 0)),
Bytes32.rightPad(Bytes.of(1, 1, 3, 1)),
Bytes32.rightPad(Bytes.of(1, 2, 0, 0)));
lists.forEach(bytes -> accountStateTrie.put(bytes, Bytes.of(1, 2, 3)));
Assertions.assertThat(
accountStateTrie.entriesFrom(Bytes32.rightPad(Bytes.of(0, 0, 0, 0)), 3).keySet())
.containsAll(lists);
}
@Test
public void shouldRetrieveAllLeavesInRangeWhenStartFromSpecificRange() {
InMemoryKeyValueStorage worldStateStorage = new InMemoryKeyValueStorage();
final MerkleTrie<Bytes, Bytes> accountStateTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateStorage.get(hash.toArrayUnsafe()).map(Bytes::wrap),
b -> b,
b -> b);
final List<Bytes32> lists =
List.of(
Bytes32.rightPad(Bytes.of(1, 1, 3, 0)),
Bytes32.rightPad(Bytes.of(1, 1, 3, 1)),
Bytes32.rightPad(Bytes.of(1, 2, 0, 0)));
lists.forEach(bytes -> accountStateTrie.put(bytes, Bytes.of(1, 2, 3)));
Assertions.assertThat(
accountStateTrie.entriesFrom(Bytes32.rightPad(Bytes.of(1, 1, 2, 1)), 3).keySet())
.containsAll(lists);
}
@Test
public void shouldExcludeLeavesNotInRange() {
InMemoryKeyValueStorage worldStateStorage = new InMemoryKeyValueStorage();
final MerkleTrie<Bytes, Bytes> accountStateTrie =
new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateStorage.get(hash.toArrayUnsafe()).map(Bytes::wrap),
b -> b,
b -> b);
final List<Bytes32> lists =
List.of(
Bytes32.rightPad(Bytes.of(1, 1, 3, 0)),
Bytes32.rightPad(Bytes.of(1, 1, 3, 1)),
Bytes32.rightPad(Bytes.of(1, 2, 0, 0)));
lists.forEach(bytes -> accountStateTrie.put(bytes, Bytes.of(1, 2, 3)));
Assertions.assertThat(
accountStateTrie.entriesFrom(Bytes32.rightPad(Bytes.of(1, 1, 9, 9)), 1).keySet())
.contains(Bytes32.rightPad(Bytes.of(1, 2, 0, 0)));
}
}

@ -47,6 +47,7 @@ public class TrieIteratorTest {
Bytes32.fromHexString("0x5555555555555555555555555555555555555555555555555555555555555555");
private static final Bytes32 KEY_HASH2 =
Bytes32.fromHexString("0x5555555555555555555555555555555555555555555555555555555555555556");
private static final Bytes PATH1 = bytesToPath(KEY_HASH1);
private static final Bytes PATH2 = bytesToPath(KEY_HASH2);

@ -69,7 +69,7 @@ Calculated : ${currentHash}
tasks.register('checkAPIChanges', FileStateChecker) {
description = "Checks that the API for the Plugin-API project does not change without deliberate thought"
files = sourceSets.main.allJava.files
knownHash = '5qYaRONxsvjtA3/9OOX4B1GEaOVEXLyU2zBFU0Kpu0E='
knownHash = 'T7W0Yg01Qs77rIZ+qgDe4p/wUxXG2PrRDj71EZSMpYY='
}
check.dependsOn('checkAPIChanges')

@ -71,6 +71,17 @@ public interface KeyValueStorage extends Closeable {
*/
Stream<Pair<byte[], byte[]>> stream() throws StorageException;
/**
* Returns a stream of key-value pairs starting from the specified key. This method is used to
* retrieve a stream of data from the storage, starting from the given key. If no data is
* available from the specified key onwards, an empty stream is returned.
*
* @param startKey The key from which the stream should start.
* @return A stream of key-value pairs starting from the specified key.
* @throws StorageException If an error occurs while accessing the storage.
*/
Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey);
/**
* Returns a stream of all keys.
*

@ -31,6 +31,7 @@ import java.util.function.Predicate;
import java.util.stream.Stream;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.tuweni.bytes.Bytes;
import org.rocksdb.OptimisticTransactionDB;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -75,6 +76,11 @@ public class RocksDBColumnarKeyValueSnapshot implements SnappedKeyValueStorage {
return snapTx.stream();
}
@Override
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
}
@Override
public Stream<byte[]> streamKeys() {
throwIfClosed();

@ -263,6 +263,14 @@ public abstract class RocksDBColumnarKeyValueStorage
return RocksDbIterator.create(rocksIterator).toStream();
}
@Override
public Stream<Pair<byte[], byte[]>> streamFromKey(
final RocksDbSegmentIdentifier segmentHandle, final byte[] startKey) {
final RocksIterator rocksIterator = getDB().newIterator(segmentHandle.get());
rocksIterator.seek(startKey);
return RocksDbIterator.create(rocksIterator).toStream();
}
@Override
public Stream<byte[]> streamKeys(final RocksDbSegmentIdentifier segmentHandle) {
final RocksIterator rocksIterator = getDB().newIterator(segmentHandle.get());

@ -35,6 +35,7 @@ import java.util.function.Predicate;
import java.util.stream.Stream;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.tuweni.bytes.Bytes;
import org.rocksdb.BlockBasedTableConfig;
import org.rocksdb.LRUCache;
import org.rocksdb.OptimisticTransactionDB;
@ -148,6 +149,11 @@ public class RocksDBKeyValueStorage implements KeyValueStorage {
return RocksDbIterator.create(rocksIterator).toStream();
}
@Override
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
}
@Override
public Stream<byte[]> streamKeys() {
throwIfClosed();

@ -120,6 +120,11 @@ public class InMemoryKeyValueStorage
}
}
@Override
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
}
@Override
public Stream<byte[]> streamKeys() {
final Lock lock = rwLock.readLock();

@ -110,6 +110,11 @@ public class LayeredKeyValueStorage extends InMemoryKeyValueStorage
}
}
@Override
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
}
@Override
public Stream<byte[]> streamKeys() {
throwIfClosed();

@ -113,6 +113,11 @@ public class LimitedInMemoryKeyValueStorage implements KeyValueStorage {
}
}
@Override
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
}
@Override
public Stream<byte[]> streamKeys() {
final Lock lock = rwLock.readLock();

@ -78,6 +78,17 @@ public interface SegmentedKeyValueStorage<S> extends Closeable {
*/
Stream<Pair<byte[], byte[]>> stream(final S segmentHandle);
/**
* Returns a stream of key-value pairs starting from the specified key. This method is used to
* retrieve a stream of data from the storage, starting from the given key. If no data is
* available from the specified key onwards, an empty stream is returned.
*
* @param segmentHandle The segment handle whose keys we want to stream.
* @param startKey The key from which the stream should start.
* @return A stream of key-value pairs starting from the specified key.
*/
Stream<Pair<byte[], byte[]>> streamFromKey(final S segmentHandle, final byte[] startKey);
/**
* Stream keys.
*

@ -88,6 +88,11 @@ public class SegmentedKeyValueStorageAdapter<S> implements KeyValueStorage {
return storage.stream(segmentHandle);
}
@Override
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) throws StorageException {
return storage.streamFromKey(segmentHandle, startKey);
}
@Override
public Stream<byte[]> streamKeys() {
throwIfClosed();

@ -169,7 +169,7 @@ public class Pipeline<I> {
if (tracingEnabled) {
taskSpan.setStatus(StatusCode.ERROR);
}
LOG.debug("Unhandled exception in pipeline. Aborting.", t);
LOG.info("Unhandled exception in pipeline. Aborting.", t);
try {
abort(t);
} catch (final Throwable t2) {

Loading…
Cancel
Save