Merge branch 'main' of github.com:hyperledger/besu into mega-eof

mega-eof
Danno Ferrin 6 months ago
commit b185371a7c
  1. 4
      .circleci/config.yml
  2. 2
      .github/workflows/acceptance-tests.yml
  3. 2
      .github/workflows/codeql.yml
  4. 19
      .github/workflows/dco.yml
  5. 8
      .github/workflows/develop.yml
  6. 2
      .github/workflows/integration-tests.yml
  7. 6
      .github/workflows/pre-review.yml
  8. 4
      .github/workflows/reference-tests.yml
  9. 28
      .github/workflows/release.yml
  10. 6
      .github/workflows/sonarcloud.yml
  11. 15
      CHANGELOG.md
  12. 19
      NOTICE
  13. 0
      NOTICE.md
  14. 9
      besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java
  15. 4
      besu/src/main/java/org/hyperledger/besu/cli/config/ProfileName.java
  16. 88
      besu/src/main/java/org/hyperledger/besu/cli/options/stable/DataStorageOptions.java
  17. 20
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogHelper.java
  18. 4
      besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java
  19. 3
      besu/src/main/java/org/hyperledger/besu/services/BlockchainServiceImpl.java
  20. 39
      besu/src/test/java/org/hyperledger/besu/cli/BesuCommandTest.java
  21. 48
      besu/src/test/java/org/hyperledger/besu/cli/options/stable/DataStorageOptionsTest.java
  22. 60
      besu/src/test/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogHelperTest.java
  23. 2
      besu/src/test/resources/everything_config.toml
  24. 10
      build.gradle
  25. 6
      config/src/main/resources/profiles/enterprise-private.toml
  26. 1
      config/src/main/resources/profiles/minimalist-staker.toml
  27. 3
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/MergeContext.java
  28. 56
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/PayloadWrapper.java
  29. 77
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/PostMergeContext.java
  30. 5
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/TransitionContext.java
  31. 55
      consensus/merge/src/test/java/org/hyperledger/besu/consensus/merge/PostMergeContextTest.java
  32. 31
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/AbstractEngineGetPayload.java
  33. 13
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadV1.java
  34. 13
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadV2.java
  35. 10
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadV3.java
  36. 10
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadV4.java
  37. 27
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/results/BlockResultFactory.java
  38. 9
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/AbstractEngineGetPayloadTest.java
  39. 8
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadV2Test.java
  40. 9
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadV3Test.java
  41. 8
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadV4Test.java
  42. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/core/BlockValueCalculator.java
  43. 42
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/diffbased/common/worldview/accumulator/DiffBasedWorldStateUpdateAccumulator.java
  44. 26
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java
  45. 12
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/core/BlockValueCalculatorTest.java
  46. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/BackwardSyncAlgSpec.java
  47. 4
      ethereum/p2p/build.gradle
  48. 107
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/DNSDaemon.java
  49. 32
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/DNSDaemonListener.java
  50. 279
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/DNSEntry.java
  51. 237
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/DNSResolver.java
  52. 32
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/DNSVisitor.java
  53. 49
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/KVReader.java
  54. 37
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/network/DefaultP2PNetwork.java
  55. 121
      ethereum/p2p/src/test/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/DNSDaemonTest.java
  56. 71
      ethereum/p2p/src/test/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/DNSEntryTest.java
  57. 40
      ethereum/p2p/src/test/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/KVReaderTest.java
  58. 205
      ethereum/p2p/src/test/java/org/hyperledger/besu/ethereum/p2p/discovery/dns/MockDnsServerVerticle.java
  59. 29
      ethereum/p2p/src/test/java/org/hyperledger/besu/ethereum/p2p/network/DefaultP2PNetworkTest.java
  60. 137
      ethereum/p2p/src/test/resources/discovery/dns/dns-records.json
  61. 8
      ethereum/permissioning/src/main/java/org/hyperledger/besu/ethereum/permissioning/AccountLocalConfigPermissioningController.java
  62. 35
      ethereum/permissioning/src/test/java/org/hyperledger/besu/ethereum/permissioning/AccountLocalConfigPermissioningControllerTest.java
  63. 3
      ethereum/rlp/src/main/java/org/hyperledger/besu/ethereum/rlp/RLPInput.java
  64. 3
      evm/src/main/java/org/hyperledger/besu/evm/frame/MessageFrame.java
  65. 62
      gradle/verification-metadata.xml
  66. 1
      gradle/versions.gradle
  67. 3
      services/tasks/src/main/java/org/hyperledger/besu/services/tasks/InMemoryTaskQueue.java
  68. 1
      util/src/main/java/org/hyperledger/besu/util/EndianUtils.java
  69. 2
      util/src/main/java/org/hyperledger/besu/util/FutureUtils.java
  70. 2
      util/src/main/java/org/hyperledger/besu/util/log/LogUtil.java
  71. 2
      util/src/main/java/org/hyperledger/besu/util/platform/PlatformDetector.java

@ -6,7 +6,7 @@ orbs:
executors:
besu_executor_med: # 2cpu, 4G ram
docker:
- image: cimg/openjdk:17.0
- image: cimg/openjdk:21.0
resource_class: medium
working_directory: ~/project
environment:
@ -24,7 +24,7 @@ executors:
besu_executor_xl: # 8cpu, 16G ram
docker:
- image: cimg/openjdk:17.0
- image: cimg/openjdk:21.0
resource_class: xlarge
working_directory: ~/project
environment:

@ -34,7 +34,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: Install required packages
run: sudo apt-get install -y xmlstarlet
- name: setup gradle

@ -33,7 +33,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: 'temurin'
java-version: 17
java-version: 21
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@2f93e4319b2f04a2efc38fa7f78bd681bc3f7b2f

@ -0,0 +1,19 @@
name: DCO
on: [pull_request]
jobs:
dco_check:
runs-on: ubuntu-latest
name: DCO
if: ${{ github.actor != 'dependabot[bot]' }}
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@198af03565609bb4ed924d1260247b4881f09e7d
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@f2279e6e62d5a7d9115b0cb8e837b777b1b02e21
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

@ -17,7 +17,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -36,7 +36,7 @@ jobs:
matrix:
platform:
- ubuntu-22.04
- [self-hosted, ARM64]
- besu-arm64
runs-on: ${{ matrix.platform }}
steps:
- name: Prepare
@ -67,7 +67,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -106,7 +106,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:

@ -29,7 +29,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:

@ -45,7 +45,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: Setup Gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -65,7 +65,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: Setup Gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -93,7 +93,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: Install required packages
run: sudo apt-get install -y xmlstarlet
- name: Setup Gradle

@ -34,8 +34,8 @@ jobs:
- name: Set up Java
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: adopt-openj9
java-version: 17
distribution: semeru # IBM Semeru with OpenJ9
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:

@ -19,11 +19,11 @@ jobs:
steps:
- name: checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up JDK 17
- name: Set up Java
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: 'temurin'
java-version: '17'
distribution: temurin
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -60,8 +60,8 @@ jobs:
- name: Set up Java
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: adopt
java-version: 17
distribution: temurin
java-version: 21
- name: Download zip
uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe
with:
@ -108,11 +108,11 @@ jobs:
steps:
- name: checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up JDK 17
- name: Set up Java
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: 'temurin'
java-version: '17'
distribution: temurin
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -132,7 +132,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -151,7 +151,7 @@ jobs:
matrix:
platform:
- ubuntu-22.04
- [self-hosted, ARM64]
- besu-arm64
runs-on: ${{ matrix.platform }}
steps:
- name: Prepare
@ -174,7 +174,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -214,7 +214,7 @@ jobs:
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: temurin
java-version: 17
java-version: 21
- name: setup gradle
uses: gradle/actions/setup-gradle@9e899d11ad247ec76be7a60bc1cf9d3abbb9e7f1
with:
@ -248,8 +248,8 @@ jobs:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: 'temurin' # See 'Supported distributions' for available options
java-version: '17'
distribution: temurin
java-version: 21
cache: gradle
- name: login to ${{ env.registry }}
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d

@ -17,11 +17,11 @@ jobs:
steps:
- name: checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up JDK 17
- name: Set up Java
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93
with:
distribution: 'temurin'
java-version: '17'
distribution: temurin
java-version: 21
- name: Cache SonarCloud packages
uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c
with:

@ -3,10 +3,24 @@
## Next Release
### Breaking Changes
- Java 21 has been enforced as minimum version to build and run Besu.
- With --Xbonsai-limit-trie-logs-enabled by default in this release, historic trie log data will be removed from the database unless sync-mode=FULL. It respects the --bonsai-historical-block-limit setting so shouldn't break any RPCs, but may be breaking if you are accessing this data from the database directly. Can be disabled with --bonsai-limit-trie-logs-enabled=false
- In profile=ENTERPRISE, use sync-mode=FULL (instead of FAST) and data-storage-format=FOREST (instead of BONSAI) [#7186](https://github.com/hyperledger/besu/pull/7186)
- If this breaks your node, you can reset sync-mode=FAST and data-storage-format=BONSAI
### Upcoming Breaking Changes
- Receipt compaction will be enabled by default in a future version of Besu. After this change it will not be possible to downgrade to the previous Besu version.
- PKI-backed QBFT will be removed in a future version of Besu. Other forms of QBFT will remain unchanged.
- --Xbonsai-limit-trie-logs-enabled is deprecated, use --bonsai-limit-trie-logs-enabled instead
- --Xbonsai-trie-logs-pruning-window-size is deprecated, use --bonsai-trie-logs-pruning-window-size instead
### Additions and Improvements
- Add two counters to DefaultBlockchain in order to be able to calculate TPS and Mgas/s [#7105](https://github.com/hyperledger/besu/pull/7105)
- Enable --Xbonsai-limit-trie-logs-enabled by default, unless sync-mode=FULL [#7181](https://github.com/hyperledger/besu/pull/7181)
- Promote experimental --Xbonsai-limit-trie-logs-enabled to production-ready, --bonsai-limit-trie-logs-enabled [#7192](https://github.com/hyperledger/besu/pull/7192)
- Promote experimental --Xbonsai-trie-logs-pruning-window-size to production-ready, --bonsai-trie-logs-pruning-window-size [#7192](https://github.com/hyperledger/besu/pull/7192)
- `admin_nodeInfo` JSON/RPC call returns the currently active EVM version [#7127](https://github.com/hyperledger/besu/pull/7127)
- Improve the selection of the most profitable built block [#7174](https://github.com/hyperledger/besu/pull/7174)
### Bug fixes
- Make `eth_gasPrice` aware of the base fee market [#7102](https://github.com/hyperledger/besu/pull/7102)
@ -23,6 +37,7 @@
- Default bonsai to use full-flat db and code-storage-by-code-hash [#6984](https://github.com/hyperledger/besu/pull/6894)
- New RPC methods miner_setExtraData and miner_getExtraData [#7078](https://github.com/hyperledger/besu/pull/7078)
- Disconnect peers that have multiple discovery ports since they give us bad neighbours [#7089](https://github.com/hyperledger/besu/pull/7089)
- Port Tuweni dns-discovery into Besu. [#7129](https://github.com/hyperledger/besu/pull/7129)
### Known Issues
- [Frequency: occasional < 10%] Chain download halt. Only affects new syncs (new nodes syncing from scratch). Symptom: Block import halts, despite having a full set of peers and world state downloading finishing. Generally restarting besu will resolve the issue. We are tracking this in [#6884](https://github.com/hyperledger/besu/pull/6884)

@ -0,0 +1,19 @@
Hyperledger Besu
Copyright contributors to Hyperledger Besu.
This product includes software adapted from Tuweni. (https://tuweni.tmio.io/)
Copyright 2023-2024 The Machine Consultancy LLC
Licensed under Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
The following NOTICE file content is provided by Tuweni:
------------------------------------------------------------
This product includes code developed under the Apache Tuweni incubation project.
Copyright 2019-2023 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
In addition, this product includes software developed by
Copyright 2018-2019 ConsenSys, Inc.
------------------------------------------------------------

@ -1565,7 +1565,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
}
private void validateDataStorageOptions() {
dataStorageOptions.validate(commandLine);
dataStorageOptions.validate(commandLine, syncMode);
}
private void validateRequiredOptions() {
@ -1877,7 +1877,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
public BesuControllerBuilder getControllerBuilder() {
pluginCommonConfiguration
.init(dataDir(), dataDir().resolve(DATABASE_PATH), getDataStorageConfiguration())
.withMiningParameters(getMiningParameters())
.withMiningParameters(miningParametersSupplier.get())
.withJsonRpcHttpOptions(jsonRpcHttpOptions);
final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName);
return controllerBuilderFactory
@ -2792,11 +2792,12 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
builder.setHighSpecEnabled();
}
if (getDataStorageConfiguration().getUnstable().getBonsaiLimitTrieLogsEnabled()) {
if (DataStorageFormat.BONSAI.equals(getDataStorageConfiguration().getDataStorageFormat())
&& getDataStorageConfiguration().getBonsaiLimitTrieLogsEnabled()) {
builder.setLimitTrieLogsEnabled();
builder.setTrieLogRetentionLimit(getDataStorageConfiguration().getBonsaiMaxLayersToLoad());
builder.setTrieLogsPruningWindowSize(
getDataStorageConfiguration().getUnstable().getBonsaiTrieLogPruningWindowSize());
getDataStorageConfiguration().getBonsaiTrieLogPruningWindowSize());
}
builder.setSnapServerEnabled(this.unstableSynchronizerOptions.isSnapsyncServerEnabled());

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.cli.config;
import java.util.Locale;
import org.apache.commons.lang3.StringUtils;
/** Enum for profile names. Each profile corresponds to a configuration file. */
@ -53,6 +51,6 @@ public enum ProfileName {
@Override
public String toString() {
return StringUtils.capitalize(name().replaceAll("_", " ").toLowerCase(Locale.ROOT));
return StringUtils.capitalize(name().replaceAll("_", " "));
}
}

@ -14,16 +14,17 @@
*/
package org.hyperledger.besu.cli.options.stable;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_RECEIPT_COMPACTION_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_CODE_USING_CODE_HASH_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_FULL_FLAT_DB_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
import org.hyperledger.besu.cli.options.CLIOptions;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
@ -57,11 +58,35 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
paramLabel = "<LONG>",
description =
"Limit of historical layers that can be loaded with BONSAI (default: ${DEFAULT-VALUE}). When using "
+ Unstable.BONSAI_LIMIT_TRIE_LOGS_ENABLED
+ BONSAI_LIMIT_TRIE_LOGS_ENABLED
+ " it will also be used as the number of layers of trie logs to retain.",
arity = "1")
private Long bonsaiMaxLayersToLoad = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
private static final String BONSAI_LIMIT_TRIE_LOGS_ENABLED = "--bonsai-limit-trie-logs-enabled";
/** The bonsai trie logs pruning window size. */
public static final String BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE =
"--bonsai-trie-logs-pruning-window-size";
@SuppressWarnings("ExperimentalCliOptionMustBeCorrectlyDisplayed")
@CommandLine.Option(
names = {
BONSAI_LIMIT_TRIE_LOGS_ENABLED,
"--Xbonsai-limit-trie-logs-enabled",
"--Xbonsai-trie-log-pruning-enabled"
},
fallbackValue = "true",
description = "Limit the number of trie logs that are retained. (default: ${DEFAULT-VALUE})")
private Boolean bonsaiLimitTrieLogsEnabled = DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
@SuppressWarnings("ExperimentalCliOptionMustBeCorrectlyDisplayed")
@CommandLine.Option(
names = {BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE, "--Xbonsai-trie-logs-pruning-window-size"},
description =
"The max number of blocks to load and prune trie logs for at startup. (default: ${DEFAULT-VALUE})")
private Integer bonsaiTrieLogPruningWindowSize = DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
@Option(
names = "--receipt-compaction-enabled",
description = "Enables compact storing of receipts (default: ${DEFAULT-VALUE}).",
@ -76,26 +101,6 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
/** The unstable options for data storage. */
public static class Unstable {
private static final String BONSAI_LIMIT_TRIE_LOGS_ENABLED =
"--Xbonsai-limit-trie-logs-enabled";
/** The bonsai trie logs pruning window size. */
public static final String BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE =
"--Xbonsai-trie-logs-pruning-window-size";
@CommandLine.Option(
hidden = true,
names = {BONSAI_LIMIT_TRIE_LOGS_ENABLED, "--Xbonsai-trie-log-pruning-enabled"},
description =
"Limit the number of trie logs that are retained. (default: ${DEFAULT-VALUE})")
private boolean bonsaiLimitTrieLogsEnabled = DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
@CommandLine.Option(
hidden = true,
names = {BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE},
description =
"The max number of blocks to load and prune trie logs for at startup. (default: ${DEFAULT-VALUE})")
private int bonsaiTrieLogPruningWindowSize = DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
// TODO: --Xsnapsync-synchronizer-flat-db-healing-enabled is deprecated, remove it in a future
// release
@ -134,9 +139,17 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
* Validates the data storage options
*
* @param commandLine the full commandLine to check all the options specified by the user
* @param syncMode the sync mode
*/
public void validate(final CommandLine commandLine) {
if (unstableOptions.bonsaiLimitTrieLogsEnabled) {
public void validate(final CommandLine commandLine, final SyncMode syncMode) {
if (DataStorageFormat.BONSAI == dataStorageFormat && bonsaiLimitTrieLogsEnabled) {
if (SyncMode.FULL == syncMode) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
"Cannot enable " + BONSAI_LIMIT_TRIE_LOGS_ENABLED + " with sync-mode %s",
syncMode));
}
if (bonsaiMaxLayersToLoad < MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT) {
throw new CommandLine.ParameterException(
commandLine,
@ -144,22 +157,22 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD + " minimum value is %d",
MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT));
}
if (unstableOptions.bonsaiTrieLogPruningWindowSize <= 0) {
if (bonsaiTrieLogPruningWindowSize <= 0) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
unstableOptions.bonsaiTrieLogPruningWindowSize));
BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
bonsaiTrieLogPruningWindowSize));
}
if (unstableOptions.bonsaiTrieLogPruningWindowSize <= bonsaiMaxLayersToLoad) {
if (bonsaiTrieLogPruningWindowSize <= bonsaiMaxLayersToLoad) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
+ "=%d must be greater than "
+ BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD
+ "=%d",
unstableOptions.bonsaiTrieLogPruningWindowSize,
bonsaiTrieLogPruningWindowSize,
bonsaiMaxLayersToLoad));
}
}
@ -176,10 +189,9 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
dataStorageOptions.dataStorageFormat = domainObject.getDataStorageFormat();
dataStorageOptions.bonsaiMaxLayersToLoad = domainObject.getBonsaiMaxLayersToLoad();
dataStorageOptions.receiptCompactionEnabled = domainObject.getReceiptCompactionEnabled();
dataStorageOptions.unstableOptions.bonsaiLimitTrieLogsEnabled =
domainObject.getUnstable().getBonsaiLimitTrieLogsEnabled();
dataStorageOptions.unstableOptions.bonsaiTrieLogPruningWindowSize =
domainObject.getUnstable().getBonsaiTrieLogPruningWindowSize();
dataStorageOptions.bonsaiLimitTrieLogsEnabled = domainObject.getBonsaiLimitTrieLogsEnabled();
dataStorageOptions.bonsaiTrieLogPruningWindowSize =
domainObject.getBonsaiTrieLogPruningWindowSize();
dataStorageOptions.unstableOptions.bonsaiFullFlatDbEnabled =
domainObject.getUnstable().getBonsaiFullFlatDbEnabled();
dataStorageOptions.unstableOptions.bonsaiCodeUsingCodeHashEnabled =
@ -194,10 +206,10 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
.dataStorageFormat(dataStorageFormat)
.bonsaiMaxLayersToLoad(bonsaiMaxLayersToLoad)
.receiptCompactionEnabled(receiptCompactionEnabled)
.bonsaiLimitTrieLogsEnabled(bonsaiLimitTrieLogsEnabled)
.bonsaiTrieLogPruningWindowSize(bonsaiTrieLogPruningWindowSize)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(unstableOptions.bonsaiLimitTrieLogsEnabled)
.bonsaiTrieLogPruningWindowSize(unstableOptions.bonsaiTrieLogPruningWindowSize)
.bonsaiFullFlatDbEnabled(unstableOptions.bonsaiFullFlatDbEnabled)
.bonsaiCodeStoredByCodeHashEnabled(unstableOptions.bonsaiCodeUsingCodeHashEnabled)
.build())

@ -17,7 +17,7 @@ package org.hyperledger.besu.cli.subcommands.storage;
import static com.google.common.base.Preconditions.checkArgument;
import static org.hyperledger.besu.cli.options.stable.DataStorageOptions.BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import org.hyperledger.besu.cli.options.stable.DataStorageOptions;
import org.hyperledger.besu.datatypes.Hash;
@ -296,25 +296,23 @@ public class TrieLogHelper {
void validatePruneConfiguration(final DataStorageConfiguration config) {
checkArgument(
config.getBonsaiMaxLayersToLoad()
>= DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT,
>= DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT,
String.format(
BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD + " minimum value is %d",
DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT));
DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT));
checkArgument(
config.getUnstable().getBonsaiTrieLogPruningWindowSize() > 0,
config.getBonsaiTrieLogPruningWindowSize() > 0,
String.format(
DataStorageOptions.Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
+ "=%d must be greater than 0",
config.getUnstable().getBonsaiTrieLogPruningWindowSize()));
DataStorageOptions.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
config.getBonsaiTrieLogPruningWindowSize()));
checkArgument(
config.getUnstable().getBonsaiTrieLogPruningWindowSize()
> config.getBonsaiMaxLayersToLoad(),
config.getBonsaiTrieLogPruningWindowSize() > config.getBonsaiMaxLayersToLoad(),
String.format(
DataStorageOptions.Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
DataStorageOptions.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
+ "=%d must be greater than "
+ BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD
+ "=%d",
config.getUnstable().getBonsaiTrieLogPruningWindowSize(),
config.getBonsaiTrieLogPruningWindowSize(),
config.getBonsaiMaxLayersToLoad()));
}

@ -733,7 +733,7 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
final JsonRpcMethods additionalJsonRpcMethodFactory =
createAdditionalJsonRpcMethodFactory(protocolContext, protocolSchedule, miningParameters);
if (dataStorageConfiguration.getUnstable().getBonsaiLimitTrieLogsEnabled()
if (dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()
&& DataStorageFormat.BONSAI.equals(dataStorageConfiguration.getDataStorageFormat())) {
final TrieLogManager trieLogManager =
((BonsaiWorldStateProvider) worldStateArchive).getTrieLogManager();
@ -784,7 +784,7 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
blockchain,
scheduler::executeServiceTask,
dataStorageConfiguration.getBonsaiMaxLayersToLoad(),
dataStorageConfiguration.getUnstable().getBonsaiTrieLogPruningWindowSize(),
dataStorageConfiguration.getBonsaiTrieLogPruningWindowSize(),
isProofOfStake);
trieLogPruner.initialize();

@ -42,6 +42,9 @@ public class BlockchainServiceImpl implements BlockchainService {
private ProtocolSchedule protocolSchedule;
private MutableBlockchain blockchain;
/** Instantiates a new Blockchain service implementation. */
public BlockchainServiceImpl() {}
/**
* Instantiates a new Blockchain service.
*

@ -1087,8 +1087,8 @@ public class BesuCommandTest extends CommandTestAbstract {
}
@Test
public void syncMode_full() {
parseCommand("--sync-mode", "FULL");
public void syncMode_full_requires_bonsaiLimitTrieLogsToBeDisabled() {
parseCommand("--sync-mode", "FULL", "--bonsai-limit-trie-logs-enabled=false");
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
@ -1244,8 +1244,37 @@ public class BesuCommandTest extends CommandTestAbstract {
}
@Test
public void parsesValidBonsaiTrieLimitBackLayersOption() {
parseCommand("--data-storage-format", "BONSAI", "--bonsai-historical-block-limit", "11");
public void bonsaiLimitTrieLogsEnabledByDefault() {
parseCommand();
verify(mockControllerBuilder)
.dataStorageConfiguration(dataStorageConfigurationArgumentCaptor.capture());
final DataStorageConfiguration dataStorageConfiguration =
dataStorageConfigurationArgumentCaptor.getValue();
assertThat(dataStorageConfiguration.getDataStorageFormat()).isEqualTo(BONSAI);
assertThat(dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()).isTrue();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void parsesInvalidDefaultBonsaiLimitTrieLogsWhenFullSyncEnabled() {
parseCommand("--sync-mode=FULL");
Mockito.verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8))
.contains("Cannot enable --bonsai-limit-trie-logs-enabled with sync-mode FULL");
}
@Test
public void parsesValidBonsaiHistoricalBlockLimitOption() {
parseCommand(
"--bonsai-limit-trie-logs-enabled=false",
"--data-storage-format",
"BONSAI",
"--bonsai-historical-block-limit",
"11");
verify(mockControllerBuilder)
.dataStorageConfiguration(dataStorageConfigurationArgumentCaptor.capture());
@ -1258,7 +1287,7 @@ public class BesuCommandTest extends CommandTestAbstract {
}
@Test
public void parsesInvalidBonsaiTrieLimitBackLayersOption() {
public void parsesInvalidBonsaiHistoricalBlockLimitOption() {
parseCommand("--data-storage-format", "BONSAI", "--bonsai-maximum-back-layers-to-load", "ten");

@ -15,7 +15,7 @@
package org.hyperledger.besu.cli.options.stable;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
import org.hyperledger.besu.cli.options.AbstractCLIOptionsTest;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
@ -31,28 +31,45 @@ public class DataStorageOptionsTest
public void bonsaiTrieLogPruningLimitOption() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getUnstable().getBonsaiTrieLogPruningWindowSize())
.isEqualTo(600),
assertThat(dataStorageConfiguration.getBonsaiTrieLogPruningWindowSize()).isEqualTo(600),
"--bonsai-limit-trie-logs-enabled",
"--bonsai-trie-logs-pruning-window-size",
"600");
}
@Test
public void bonsaiTrieLogPruningLimitLegacyOption() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getBonsaiTrieLogPruningWindowSize()).isEqualTo(600),
"--Xbonsai-limit-trie-logs-enabled",
"--Xbonsai-trie-logs-pruning-window-size",
"600");
}
@Test
public void bonsaiTrieLogsEnabled_explicitlySetToFalse() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()).isEqualTo(false),
"--bonsai-limit-trie-logs-enabled=false");
}
@Test
public void bonsaiTrieLogPruningWindowSizeShouldBePositive() {
internalTestFailure(
"--Xbonsai-trie-logs-pruning-window-size=0 must be greater than 0",
"--Xbonsai-limit-trie-logs-enabled",
"--Xbonsai-trie-logs-pruning-window-size",
"--bonsai-trie-logs-pruning-window-size=0 must be greater than 0",
"--bonsai-limit-trie-logs-enabled",
"--bonsai-trie-logs-pruning-window-size",
"0");
}
@Test
public void bonsaiTrieLogPruningWindowSizeShouldBeAboveRetentionLimit() {
internalTestFailure(
"--Xbonsai-trie-logs-pruning-window-size=512 must be greater than --bonsai-historical-block-limit=512",
"--Xbonsai-limit-trie-logs-enabled",
"--Xbonsai-trie-logs-pruning-window-size",
"--bonsai-trie-logs-pruning-window-size=512 must be greater than --bonsai-historical-block-limit=512",
"--bonsai-limit-trie-logs-enabled",
"--bonsai-trie-logs-pruning-window-size",
"512");
}
@ -62,7 +79,7 @@ public class DataStorageOptionsTest
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getBonsaiMaxLayersToLoad())
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT + 1),
"--Xbonsai-limit-trie-logs-enabled",
"--bonsai-limit-trie-logs-enabled",
"--bonsai-historical-block-limit",
"513");
}
@ -73,7 +90,7 @@ public class DataStorageOptionsTest
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getBonsaiMaxLayersToLoad())
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT),
"--Xbonsai-limit-trie-logs-enabled",
"--bonsai-limit-trie-logs-enabled",
"--bonsai-historical-block-limit",
"512");
}
@ -82,7 +99,7 @@ public class DataStorageOptionsTest
public void bonsaiTrieLogRetentionLimitShouldBeAboveMinimum() {
internalTestFailure(
"--bonsai-historical-block-limit minimum value is 512",
"--Xbonsai-limit-trie-logs-enabled",
"--bonsai-limit-trie-logs-enabled",
"--bonsai-historical-block-limit",
"511");
}
@ -137,11 +154,8 @@ public class DataStorageOptionsTest
return ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(DataStorageFormat.BONSAI)
.bonsaiMaxLayersToLoad(513L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(514)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(514)
.build();
}

@ -17,7 +17,7 @@ package org.hyperledger.besu.cli.subcommands.storage;
import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.spy;
@ -135,10 +135,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(3L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
mockBlockchainBase();
@ -176,10 +173,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(2L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
@ -199,10 +193,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(10L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
@ -222,10 +213,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(2L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
mockBlockchainBase();
@ -246,10 +234,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(6L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
@ -271,10 +256,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(3L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
mockBlockchainBase();
@ -303,10 +285,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(511L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
TrieLogHelper helper = new TrieLogHelper();
@ -324,11 +303,8 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(512L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(0)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(0)
.build();
TrieLogHelper helper = new TrieLogHelper();
@ -336,7 +312,7 @@ class TrieLogHelperTest {
() ->
helper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(RuntimeException.class)
.hasMessage("--Xbonsai-trie-logs-pruning-window-size=0 must be greater than 0");
.hasMessage("--bonsai-trie-logs-pruning-window-size=0 must be greater than 0");
}
@Test
@ -345,11 +321,8 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(512L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(512)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(512)
.build();
TrieLogHelper helper = new TrieLogHelper();
@ -358,7 +331,7 @@ class TrieLogHelperTest {
helper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(RuntimeException.class)
.hasMessage(
"--Xbonsai-trie-logs-pruning-window-size=512 must be greater than --bonsai-historical-block-limit=512");
"--bonsai-trie-logs-pruning-window-size=512 must be greater than --bonsai-historical-block-limit=512");
}
@Test
@ -368,10 +341,7 @@ class TrieLogHelperTest {
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(3L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.bonsaiLimitTrieLogsEnabled(true)
.build();
mockBlockchainBase();

@ -213,6 +213,8 @@ ethstats-cacert-file="./root.cert"
# Data storage
data-storage-format="BONSAI"
bonsai-historical-block-limit=512
bonsai-limit-trie-logs-enabled=true
bonsai-trie-logs-pruning-window-size=100_000
receipt-compaction-enabled=true
# feature flags

@ -47,8 +47,8 @@ sonarqube {
project.tasks["sonarqube"].dependsOn "jacocoRootReport"
if (!JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_17)) {
throw new GradleException("Java 17 or later is required to build Besu.\n" +
if (!JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_21)) {
throw new GradleException("Java 21 or later is required to build Besu.\n" +
" Detected version ${JavaVersion.current()}")
}
@ -125,7 +125,7 @@ allprojects {
version = calculateVersion()
jacoco {
toolVersion = '0.8.8'
toolVersion = '0.8.11'
if (project.tasks.findByName('referenceTests')) {
applyTo referenceTests
}
@ -144,8 +144,8 @@ allprojects {
tasks.build {
dependsOn 'javadoc'
}
sourceCompatibility = 17
targetCompatibility = 17
sourceCompatibility = 21
targetCompatibility = 21
repositories {
maven {

@ -1,8 +1,10 @@
sync-mode="FAST"
sync-mode="FULL"
data-storage-format="FOREST"
sync-min-peers=1
remote-connections-limit-enabled=false
tx-pool="SEQUENCED"
tx-pool-no-local-priority=true
tx-pool-limit-by-account-percentage=0.15
rpc-http-max-active-connections=300
min-gas-price=0
min-gas-price=0
bonsai-limit-trie-logs-enabled=false

@ -1,4 +1,3 @@
sync-mode="CHECKPOINT"
data-storage-format="BONSAI"
bonsai-historical-block-limit=128
max-peers=25

@ -18,7 +18,6 @@ import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.ConsensusContext;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import org.hyperledger.besu.ethereum.core.Difficulty;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
@ -167,7 +166,7 @@ public interface MergeContext extends ConsensusContext {
* @param payloadId the payload identifier
* @return the optional block with receipts
*/
Optional<BlockWithReceipts> retrieveBlockById(final PayloadIdentifier payloadId);
Optional<PayloadWrapper> retrievePayloadById(final PayloadIdentifier payloadId);
/**
* Is configured for a post-merge from genesis.

@ -15,13 +15,53 @@
package org.hyperledger.besu.consensus.merge;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.core.BlockValueCalculator;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
/**
* Wrapper for payload plus extra info.
*
* @param payloadIdentifier Payload identifier
* @param blockWithReceipts Block With Receipts
*/
public record PayloadWrapper(
PayloadIdentifier payloadIdentifier, BlockWithReceipts blockWithReceipts) {}
/** Wrapper for payload plus extra info. */
public class PayloadWrapper {
private final PayloadIdentifier payloadIdentifier;
private final BlockWithReceipts blockWithReceipts;
private final Wei blockValue;
/**
* Construct a wrapper with the following fields.
*
* @param payloadIdentifier Payload identifier
* @param blockWithReceipts Block with receipts
*/
public PayloadWrapper(
final PayloadIdentifier payloadIdentifier, final BlockWithReceipts blockWithReceipts) {
this.blockWithReceipts = blockWithReceipts;
this.payloadIdentifier = payloadIdentifier;
this.blockValue = BlockValueCalculator.calculateBlockValue(blockWithReceipts);
}
/**
* Get the block value
*
* @return block value in Wei
*/
public Wei blockValue() {
return blockValue;
}
/**
* Get this payload identifier
*
* @return payload identifier
*/
public PayloadIdentifier payloadIdentifier() {
return payloadIdentifier;
}
/**
* Get the block with receipts
*
* @return block with receipts
*/
public BlockWithReceipts blockWithReceipts() {
return blockWithReceipts;
}
}

@ -16,12 +16,9 @@ package org.hyperledger.besu.consensus.merge;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.ConsensusContext;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockValueCalculator;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import org.hyperledger.besu.ethereum.core.Difficulty;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.util.Subscribers;
@ -29,7 +26,6 @@ import org.hyperledger.besu.util.Subscribers;
import java.util.Comparator;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.annotations.VisibleForTesting;
@ -45,13 +41,6 @@ public class PostMergeContext implements MergeContext {
static final int MAX_BLOCKS_IN_PROGRESS = 12;
private static final AtomicReference<PostMergeContext> singleton = new AtomicReference<>();
private static final Comparator<BlockWithReceipts> compareByGasUsedDesc =
Comparator.comparingLong(
(BlockWithReceipts blockWithReceipts) ->
blockWithReceipts.getBlock().getHeader().getGasUsed())
.reversed();
private final AtomicReference<SyncState> syncState;
private final AtomicReference<Difficulty> terminalTotalDifficulty;
// initial postMerge state is indeterminate until it is set:
@ -70,7 +59,6 @@ public class PostMergeContext implements MergeContext {
private final AtomicReference<BlockHeader> lastSafeBlock = new AtomicReference<>();
private final AtomicReference<Optional<BlockHeader>> terminalPoWBlock =
new AtomicReference<>(Optional.empty());
private final BlockValueCalculator blockValueCalculator = new BlockValueCalculator();
private boolean isPostMergeAtGenesis;
/** Instantiates a new Post merge context. */
@ -227,66 +215,65 @@ public class PostMergeContext implements MergeContext {
}
@Override
public void putPayloadById(final PayloadWrapper payloadWrapper) {
public void putPayloadById(final PayloadWrapper newPayload) {
final var newBlockWithReceipts = newPayload.blockWithReceipts();
final var newBlockValue = newPayload.blockValue();
synchronized (blocksInProgress) {
final Optional<BlockWithReceipts> maybeCurrBestBlock =
retrieveBlockById(payloadWrapper.payloadIdentifier());
final Optional<PayloadWrapper> maybeCurrBestPayload =
retrievePayloadById(newPayload.payloadIdentifier());
maybeCurrBestBlock.ifPresentOrElse(
currBestBlock -> {
if (compareByGasUsedDesc.compare(payloadWrapper.blockWithReceipts(), currBestBlock)
< 0) {
maybeCurrBestPayload.ifPresent(
currBestPayload -> {
if (newBlockValue.greaterThan(currBestPayload.blockValue())) {
LOG.atDebug()
.setMessage("New proposal for payloadId {} {} is better than the previous one {}")
.addArgument(payloadWrapper.payloadIdentifier())
.setMessage(
"New proposal for payloadId {} {} is better than the previous one {} by {}")
.addArgument(newPayload.payloadIdentifier())
.addArgument(() -> logBlockProposal(newBlockWithReceipts.getBlock()))
.addArgument(
() -> logBlockProposal(payloadWrapper.blockWithReceipts().getBlock()))
.addArgument(() -> logBlockProposal(currBestBlock.getBlock()))
() -> logBlockProposal(currBestPayload.blockWithReceipts().getBlock()))
.addArgument(
() ->
newBlockValue
.subtract(currBestPayload.blockValue())
.toHumanReadableString())
.log();
blocksInProgress.removeAll(
retrievePayloadsById(payloadWrapper.payloadIdentifier())
.collect(Collectors.toUnmodifiableList()));
blocksInProgress.add(
new PayloadWrapper(
payloadWrapper.payloadIdentifier(), payloadWrapper.blockWithReceipts()));
logCurrentBestBlock(payloadWrapper.blockWithReceipts());
streamPayloadsById(newPayload.payloadIdentifier()).toList());
logCurrentBestBlock(newPayload);
}
},
() ->
blocksInProgress.add(
new PayloadWrapper(
payloadWrapper.payloadIdentifier(), payloadWrapper.blockWithReceipts())));
});
blocksInProgress.add(newPayload);
}
}
private void logCurrentBestBlock(final BlockWithReceipts blockWithReceipts) {
private void logCurrentBestBlock(final PayloadWrapper payloadWrapper) {
if (LOG.isDebugEnabled()) {
final Block block = blockWithReceipts.getBlock();
final Block block = payloadWrapper.blockWithReceipts().getBlock();
final float gasUsedPerc =
100.0f * block.getHeader().getGasUsed() / block.getHeader().getGasLimit();
final int txsNum = block.getBody().getTransactions().size();
final Wei reward = blockValueCalculator.calculateBlockValue(blockWithReceipts);
LOG.debug(
"Current best proposal for block {}: txs {}, gas used {}%, reward {}",
blockWithReceipts.getNumber(),
block.getHeader().getNumber(),
txsNum,
String.format("%1.2f", gasUsedPerc),
reward.toHumanReadableString());
payloadWrapper.blockValue().toHumanReadableString());
}
}
@Override
public Optional<BlockWithReceipts> retrieveBlockById(final PayloadIdentifier payloadId) {
public Optional<PayloadWrapper> retrievePayloadById(final PayloadIdentifier payloadId) {
synchronized (blocksInProgress) {
return retrievePayloadsById(payloadId)
.map(payloadWrapper -> payloadWrapper.blockWithReceipts())
.sorted(compareByGasUsedDesc)
.findFirst();
return streamPayloadsById(payloadId).max(Comparator.comparing(PayloadWrapper::blockValue));
}
}
private Stream<PayloadWrapper> retrievePayloadsById(final PayloadIdentifier payloadId) {
private Stream<PayloadWrapper> streamPayloadsById(final PayloadIdentifier payloadId) {
return blocksInProgress.stream().filter(z -> z.payloadIdentifier().equals(payloadId));
}

@ -18,7 +18,6 @@ import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.ConsensusContext;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import org.hyperledger.besu.ethereum.core.Difficulty;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
@ -146,8 +145,8 @@ public class TransitionContext implements MergeContext {
}
@Override
public Optional<BlockWithReceipts> retrieveBlockById(final PayloadIdentifier payloadId) {
return postMergeContext.retrieveBlockById(payloadId);
public Optional<PayloadWrapper> retrievePayloadById(final PayloadIdentifier payloadId) {
return postMergeContext.retrievePayloadById(payloadId);
}
@Override

@ -26,6 +26,7 @@ import static org.mockito.Mockito.when;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
@ -138,9 +139,12 @@ public class PostMergeContextTest {
BlockWithReceipts mockBlockWithReceipts = createBlockWithReceipts(1, 21000, 1);
PayloadIdentifier firstPayloadId = new PayloadIdentifier(1L);
postMergeContext.putPayloadById(new PayloadWrapper(firstPayloadId, mockBlockWithReceipts));
final var payloadWrapper = createPayloadWrapper(firstPayloadId, mockBlockWithReceipts, Wei.ONE);
postMergeContext.putPayloadById(payloadWrapper);
assertThat(postMergeContext.retrieveBlockById(firstPayloadId)).contains(mockBlockWithReceipts);
assertThat(postMergeContext.retrievePayloadById(firstPayloadId))
.map(PayloadWrapper::blockWithReceipts)
.contains(mockBlockWithReceipts);
}
@Test
@ -149,10 +153,16 @@ public class PostMergeContextTest {
BlockWithReceipts betterBlockWithReceipts = createBlockWithReceipts(2, 11, 1);
PayloadIdentifier payloadId = new PayloadIdentifier(1L);
postMergeContext.putPayloadById(new PayloadWrapper(payloadId, zeroTxBlockWithReceipts));
postMergeContext.putPayloadById(new PayloadWrapper(payloadId, betterBlockWithReceipts));
assertThat(postMergeContext.retrieveBlockById(payloadId)).contains(betterBlockWithReceipts);
final var zeroTxPayloadWrapper =
createPayloadWrapper(payloadId, zeroTxBlockWithReceipts, Wei.ZERO);
final var betterPayloadWrapper =
createPayloadWrapper(payloadId, betterBlockWithReceipts, Wei.ONE);
postMergeContext.putPayloadById(zeroTxPayloadWrapper);
postMergeContext.putPayloadById(betterPayloadWrapper);
assertThat(postMergeContext.retrievePayloadById(payloadId))
.map(PayloadWrapper::blockWithReceipts)
.contains(betterBlockWithReceipts);
}
@Test
@ -162,25 +172,33 @@ public class PostMergeContextTest {
BlockWithReceipts smallBlockWithReceipts = createBlockWithReceipts(3, 5, 1);
PayloadIdentifier payloadId = new PayloadIdentifier(1L);
postMergeContext.putPayloadById(new PayloadWrapper(payloadId, zeroTxBlockWithReceipts));
postMergeContext.putPayloadById(new PayloadWrapper(payloadId, betterBlockWithReceipts));
postMergeContext.putPayloadById(new PayloadWrapper(payloadId, smallBlockWithReceipts));
assertThat(postMergeContext.retrieveBlockById(payloadId)).contains(betterBlockWithReceipts);
final var zeroTxPayloadWrapper =
createPayloadWrapper(payloadId, zeroTxBlockWithReceipts, Wei.ZERO);
final var betterPayloadWrapper =
createPayloadWrapper(payloadId, betterBlockWithReceipts, Wei.of(2));
final var smallPayloadWrapper =
createPayloadWrapper(payloadId, smallBlockWithReceipts, Wei.ONE);
postMergeContext.putPayloadById(zeroTxPayloadWrapper);
postMergeContext.putPayloadById(betterPayloadWrapper);
postMergeContext.putPayloadById(smallPayloadWrapper);
assertThat(postMergeContext.retrievePayloadById(payloadId))
.map(PayloadWrapper::blockWithReceipts)
.contains(betterBlockWithReceipts);
}
@Test
public void tryingToRetrieveANotYetPutPayloadIdReturnsEmpty() {
PayloadIdentifier payloadId = new PayloadIdentifier(1L);
assertThat(postMergeContext.retrieveBlockById(payloadId)).isEmpty();
assertThat(postMergeContext.retrievePayloadById(payloadId)).isEmpty();
}
@Test
public void tryingToRetrieveABlockPutButEvictedReturnsEmpty() {
PayloadIdentifier evictedPayloadId = new PayloadIdentifier(0L);
assertThat(postMergeContext.retrieveBlockById(evictedPayloadId)).isEmpty();
assertThat(postMergeContext.retrievePayloadById(evictedPayloadId)).isEmpty();
}
@Test
@ -209,6 +227,17 @@ public class PostMergeContextTest {
assertThat(postMergeContext.isSyncing()).isFalse();
}
private PayloadWrapper createPayloadWrapper(
final PayloadIdentifier firstPayloadId,
final BlockWithReceipts mockBlockWithReceipts,
final Wei blockValue) {
final var payloadWrapper = mock(PayloadWrapper.class);
when(payloadWrapper.payloadIdentifier()).thenReturn(firstPayloadId);
when(payloadWrapper.blockWithReceipts()).thenReturn(mockBlockWithReceipts);
when(payloadWrapper.blockValue()).thenReturn(blockValue);
return payloadWrapper;
}
private static BlockWithReceipts createBlockWithReceipts(
final int number, final long gasUsed, final int txCount) {
Block mockBlock = mock(Block.class, RETURNS_DEEP_STUBS);

@ -14,9 +14,9 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.ExecutionEngineJsonRpcMethod;
@ -70,10 +70,9 @@ public abstract class AbstractEngineGetPayload extends ExecutionEngineJsonRpcMet
final PayloadIdentifier payloadId = request.getRequiredParameter(0, PayloadIdentifier.class);
mergeMiningCoordinator.finalizeProposalById(payloadId);
final Optional<BlockWithReceipts> blockWithReceipts =
mergeContext.get().retrieveBlockById(payloadId);
if (blockWithReceipts.isPresent()) {
final BlockWithReceipts proposal = blockWithReceipts.get();
final Optional<PayloadWrapper> maybePayload = mergeContext.get().retrievePayloadById(payloadId);
if (maybePayload.isPresent()) {
final BlockWithReceipts proposal = maybePayload.get().blockWithReceipts();
LOG.atDebug()
.setMessage("assembledBlock for payloadId {}: {}")
.addArgument(() -> payloadId)
@ -85,37 +84,33 @@ public abstract class AbstractEngineGetPayload extends ExecutionEngineJsonRpcMet
if (!forkValidationResult.isValid()) {
return new JsonRpcErrorResponse(request.getRequest().getId(), forkValidationResult);
}
return createResponse(request, payloadId, proposal);
return createResponse(request, maybePayload.get());
}
return new JsonRpcErrorResponse(request.getRequest().getId(), RpcErrorType.UNKNOWN_PAYLOAD);
}
protected void logProposal(
final PayloadIdentifier payloadId,
final BlockWithReceipts proposal,
final Optional<Wei> maybeReward) {
final BlockHeader proposalHeader = proposal.getHeader();
protected void logProposal(final PayloadWrapper payload) {
final BlockHeader proposalHeader = payload.blockWithReceipts().getHeader();
final float gasUsedPerc = 100.0f * proposalHeader.getGasUsed() / proposalHeader.getGasLimit();
final String message =
"Fetch block proposal by identifier: {}, hash: {}, "
+ "number: {}, coinbase: {}, transaction count: {}, gas used: {}%"
+ maybeReward.map(unused -> ", reward: {}").orElse("{}");
+ " reward: {}";
LOG.atInfo()
.setMessage(message)
.addArgument(payloadId::toHexString)
.addArgument(payload.payloadIdentifier()::toHexString)
.addArgument(proposalHeader::getHash)
.addArgument(proposalHeader::getNumber)
.addArgument(proposalHeader::getCoinbase)
.addArgument(() -> proposal.getBlock().getBody().getTransactions().size())
.addArgument(
() -> payload.blockWithReceipts().getBlock().getBody().getTransactions().size())
.addArgument(() -> String.format("%1.2f", gasUsedPerc))
.addArgument(maybeReward.map(Wei::toHumanReadableString).orElse(""))
.addArgument(payload.blockValue()::toHumanReadableString)
.log();
}
protected abstract JsonRpcResponse createResponse(
final JsonRpcRequestContext request,
final PayloadIdentifier payloadId,
final BlockWithReceipts blockWithReceipts);
final JsonRpcRequestContext request, final PayloadWrapper payload);
}

@ -14,17 +14,14 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.BlockResultFactory;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import java.util.Optional;
import io.vertx.core.Vertx;
@ -46,12 +43,10 @@ public class EngineGetPayloadV1 extends AbstractEngineGetPayload {
@Override
protected JsonRpcResponse createResponse(
final JsonRpcRequestContext request,
final PayloadIdentifier payloadId,
final BlockWithReceipts blockWithReceipts) {
final JsonRpcRequestContext request, final PayloadWrapper payload) {
final var result =
blockResultFactory.payloadTransactionCompleteV1(blockWithReceipts.getBlock());
logProposal(payloadId, blockWithReceipts, Optional.empty());
blockResultFactory.payloadTransactionCompleteV1(payload.blockWithReceipts().getBlock());
logProposal(payload);
return new JsonRpcSuccessResponse(request.getRequest().getId(), result);
}
}

@ -14,9 +14,8 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
@ -24,7 +23,6 @@ import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcRespon
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.RpcErrorType;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.BlockResultFactory;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ScheduledProtocolSpec;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
@ -61,12 +59,9 @@ public class EngineGetPayloadV2 extends AbstractEngineGetPayload {
@Override
protected JsonRpcResponse createResponse(
final JsonRpcRequestContext request,
final PayloadIdentifier payloadId,
final BlockWithReceipts blockWithReceipts) {
final var result = blockResultFactory.payloadTransactionCompleteV2(blockWithReceipts);
logProposal(
payloadId, blockWithReceipts, Optional.of(Wei.fromHexString(result.getBlockValue())));
final JsonRpcRequestContext request, final PayloadWrapper payload) {
final var result = blockResultFactory.payloadTransactionCompleteV2(payload);
logProposal(payload);
return new JsonRpcSuccessResponse(request.getRequest().getId(), result);
}

@ -14,8 +14,8 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
@ -23,7 +23,6 @@ import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcRespon
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.RpcErrorType;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.BlockResultFactory;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ScheduledProtocolSpec;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
@ -60,13 +59,10 @@ public class EngineGetPayloadV3 extends AbstractEngineGetPayload {
@Override
protected JsonRpcResponse createResponse(
final JsonRpcRequestContext request,
final PayloadIdentifier payloadId,
final BlockWithReceipts blockWithReceipts) {
final JsonRpcRequestContext request, final PayloadWrapper payload) {
return new JsonRpcSuccessResponse(
request.getRequest().getId(),
blockResultFactory.payloadTransactionCompleteV3(blockWithReceipts));
request.getRequest().getId(), blockResultFactory.payloadTransactionCompleteV3(payload));
}
@Override

@ -14,8 +14,8 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
@ -23,7 +23,6 @@ import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcRespon
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.RpcErrorType;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.BlockResultFactory;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ScheduledProtocolSpec;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
@ -60,13 +59,10 @@ public class EngineGetPayloadV4 extends AbstractEngineGetPayload {
@Override
protected JsonRpcResponse createResponse(
final JsonRpcRequestContext request,
final PayloadIdentifier payloadId,
final BlockWithReceipts blockWithReceipts) {
final JsonRpcRequestContext request, final PayloadWrapper payload) {
return new JsonRpcSuccessResponse(
request.getRequest().getId(),
blockResultFactory.payloadTransactionCompleteV4(blockWithReceipts));
request.getRequest().getId(), blockResultFactory.payloadTransactionCompleteV4(payload));
}
@Override

@ -17,16 +17,14 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.results;
import static org.hyperledger.besu.ethereum.mainnet.requests.RequestUtil.getDepositRequests;
import static org.hyperledger.besu.ethereum.mainnet.requests.RequestUtil.getWithdrawalRequests;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.EngineGetPayloadBodiesResultV1.PayloadBody;
import org.hyperledger.besu.ethereum.api.query.BlockWithMetadata;
import org.hyperledger.besu.ethereum.api.query.TransactionWithMetadata;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockBody;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockValueCalculator;
import org.hyperledger.besu.ethereum.core.BlockWithReceipts;
import org.hyperledger.besu.ethereum.core.encoding.EncodingContext;
import org.hyperledger.besu.ethereum.core.encoding.TransactionEncoder;
@ -108,8 +106,8 @@ public class BlockResultFactory {
return new EngineGetPayloadResultV1(block.getHeader(), txs);
}
public EngineGetPayloadResultV2 payloadTransactionCompleteV2(
final BlockWithReceipts blockWithReceipts) {
public EngineGetPayloadResultV2 payloadTransactionCompleteV2(final PayloadWrapper payload) {
final var blockWithReceipts = payload.blockWithReceipts();
final List<String> txs =
blockWithReceipts.getBlock().getBody().getTransactions().stream()
.map(
@ -118,12 +116,11 @@ public class BlockResultFactory {
.map(Bytes::toHexString)
.collect(Collectors.toList());
final Wei blockValue = new BlockValueCalculator().calculateBlockValue(blockWithReceipts);
return new EngineGetPayloadResultV2(
blockWithReceipts.getHeader(),
txs,
blockWithReceipts.getBlock().getBody().getWithdrawals(),
Quantity.create(blockValue));
Quantity.create(payload.blockValue()));
}
public EngineGetPayloadBodiesResultV1 payloadBodiesCompleteV1(
@ -135,8 +132,8 @@ public class BlockResultFactory {
return new EngineGetPayloadBodiesResultV1(payloadBodies);
}
public EngineGetPayloadResultV3 payloadTransactionCompleteV3(
final BlockWithReceipts blockWithReceipts) {
public EngineGetPayloadResultV3 payloadTransactionCompleteV3(final PayloadWrapper payload) {
final var blockWithReceipts = payload.blockWithReceipts();
final List<String> txs =
blockWithReceipts.getBlock().getBody().getTransactions().stream()
.map(
@ -145,20 +142,18 @@ public class BlockResultFactory {
.map(Bytes::toHexString)
.collect(Collectors.toList());
final Wei blockValue = new BlockValueCalculator().calculateBlockValue(blockWithReceipts);
final BlobsBundleV1 blobsBundleV1 =
new BlobsBundleV1(blockWithReceipts.getBlock().getBody().getTransactions());
return new EngineGetPayloadResultV3(
blockWithReceipts.getHeader(),
txs,
blockWithReceipts.getBlock().getBody().getWithdrawals(),
Quantity.create(blockValue),
Quantity.create(payload.blockValue()),
blobsBundleV1);
}
public EngineGetPayloadResultV4 payloadTransactionCompleteV4(
final BlockWithReceipts blockWithReceipts) {
public EngineGetPayloadResultV4 payloadTransactionCompleteV4(final PayloadWrapper payload) {
final var blockWithReceipts = payload.blockWithReceipts();
final List<String> txs =
blockWithReceipts.getBlock().getBody().getTransactions().stream()
.map(
@ -167,8 +162,6 @@ public class BlockResultFactory {
.map(Bytes::toHexString)
.collect(Collectors.toList());
final Wei blockValue = new BlockValueCalculator().calculateBlockValue(blockWithReceipts);
final BlobsBundleV1 blobsBundleV1 =
new BlobsBundleV1(blockWithReceipts.getBlock().getBody().getTransactions());
return new EngineGetPayloadResultV4(
@ -177,7 +170,7 @@ public class BlockResultFactory {
blockWithReceipts.getBlock().getBody().getWithdrawals(),
getDepositRequests(blockWithReceipts.getBlock().getBody().getRequests()),
getWithdrawalRequests(blockWithReceipts.getBlock().getBody().getRequests()),
Quantity.create(blockValue),
Quantity.create(payload.blockValue()),
blobsBundleV1);
}

@ -20,6 +20,7 @@ import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.consensus.merge.MergeContext;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.crypto.KeyPair;
@ -97,6 +98,8 @@ public abstract class AbstractEngineGetPayloadTest extends AbstractScheduledApiT
new Block(mockHeader, new BlockBody(Collections.emptyList(), Collections.emptyList()));
protected static final BlockWithReceipts mockBlockWithReceipts =
new BlockWithReceipts(mockBlock, Collections.emptyList());
protected static final PayloadWrapper mockPayload =
new PayloadWrapper(mockPid, mockBlockWithReceipts);
private static final Block mockBlockWithWithdrawals =
new Block(
mockHeader,
@ -115,9 +118,13 @@ public abstract class AbstractEngineGetPayloadTest extends AbstractScheduledApiT
Optional.of(Collections.emptyList())));
protected static final BlockWithReceipts mockBlockWithReceiptsAndWithdrawals =
new BlockWithReceipts(mockBlockWithWithdrawals, Collections.emptyList());
protected static final PayloadWrapper mockPayloadWithWithdrawals =
new PayloadWrapper(mockPid, mockBlockWithReceiptsAndWithdrawals);
protected static final BlockWithReceipts mockBlockWithReceiptsAndDepositRequests =
new BlockWithReceipts(mockBlockWithDepositRequests, Collections.emptyList());
protected static final PayloadWrapper mockPayloadWithDepositRequests =
new PayloadWrapper(mockPid, mockBlockWithReceiptsAndDepositRequests);
@Mock protected ProtocolContext protocolContext;
@ -130,7 +137,7 @@ public abstract class AbstractEngineGetPayloadTest extends AbstractScheduledApiT
@Override
public void before() {
super.before();
when(mergeContext.retrieveBlockById(mockPid)).thenReturn(Optional.of(mockBlockWithReceipts));
when(mergeContext.retrievePayloadById(mockPid)).thenReturn(Optional.of(mockPayload));
when(protocolContext.safeConsensusContext(Mockito.any())).thenReturn(Optional.of(mergeContext));
if (methodFactory.isPresent()) {
this.method =

@ -48,9 +48,7 @@ public class EngineGetPayloadV2Test extends AbstractEngineGetPayloadTest {
@Override
public void before() {
super.before();
lenient()
.when(mergeContext.retrieveBlockById(mockPid))
.thenReturn(Optional.of(mockBlockWithReceipts));
lenient().when(mergeContext.retrievePayloadById(mockPid)).thenReturn(Optional.of(mockPayload));
when(protocolContext.safeConsensusContext(Mockito.any())).thenReturn(Optional.of(mergeContext));
this.method =
new EngineGetPayloadV2(
@ -72,8 +70,8 @@ public class EngineGetPayloadV2Test extends AbstractEngineGetPayloadTest {
@Test
public void shouldReturnBlockForKnownPayloadId() {
// should return withdrawals for a post-Shanghai block
when(mergeContext.retrieveBlockById(mockPid))
.thenReturn(Optional.of(mockBlockWithReceiptsAndWithdrawals));
when(mergeContext.retrievePayloadById(mockPid))
.thenReturn(Optional.of(mockPayloadWithWithdrawals));
final var resp = resp(RpcMethod.ENGINE_GET_PAYLOAD_V2.getMethodName(), mockPid);
assertThat(resp).isInstanceOf(JsonRpcSuccessResponse.class);

@ -21,6 +21,7 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.BlobGas;
@ -67,9 +68,7 @@ public class EngineGetPayloadV3Test extends AbstractEngineGetPayloadTest {
@Override
public void before() {
super.before();
lenient()
.when(mergeContext.retrieveBlockById(mockPid))
.thenReturn(Optional.of(mockBlockWithReceipts));
lenient().when(mergeContext.retrievePayloadById(mockPid)).thenReturn(Optional.of(mockPayload));
when(protocolContext.safeConsensusContext(Mockito.any())).thenReturn(Optional.of(mergeContext));
this.method =
new EngineGetPayloadV3(
@ -132,8 +131,10 @@ public class EngineGetPayloadV3Test extends AbstractEngineGetPayloadTest {
Optional.of(Collections.emptyList()),
Optional.of(Collections.emptyList()))),
List.of(blobReceipt));
PayloadWrapper payloadPostCancun = new PayloadWrapper(postCancunPid, postCancunBlock);
when(mergeContext.retrieveBlockById(postCancunPid)).thenReturn(Optional.of(postCancunBlock));
when(mergeContext.retrievePayloadById(postCancunPid))
.thenReturn(Optional.of(payloadPostCancun));
final var resp = resp(RpcMethod.ENGINE_GET_PAYLOAD_V3.getMethodName(), postCancunPid);
assertThat(resp).isInstanceOf(JsonRpcSuccessResponse.class);

@ -21,6 +21,7 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.consensus.merge.PayloadWrapper;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.BlobGas;
@ -70,8 +71,8 @@ public class EngineGetPayloadV4Test extends AbstractEngineGetPayloadTest {
public void before() {
super.before();
lenient()
.when(mergeContext.retrieveBlockById(mockPid))
.thenReturn(Optional.of(mockBlockWithReceiptsAndDepositRequests));
.when(mergeContext.retrievePayloadById(mockPid))
.thenReturn(Optional.of(mockPayloadWithDepositRequests));
when(protocolContext.safeConsensusContext(Mockito.any())).thenReturn(Optional.of(mergeContext));
this.method =
new EngineGetPayloadV4(
@ -134,8 +135,9 @@ public class EngineGetPayloadV4Test extends AbstractEngineGetPayloadTest {
Optional.of(Collections.emptyList()),
Optional.of(Collections.emptyList()))),
List.of(blobReceipt));
PayloadWrapper payload = new PayloadWrapper(payloadIdentifier, block);
when(mergeContext.retrieveBlockById(payloadIdentifier)).thenReturn(Optional.of(block));
when(mergeContext.retrievePayloadById(payloadIdentifier)).thenReturn(Optional.of(payload));
final var resp = resp(RpcMethod.ENGINE_GET_PAYLOAD_V4.getMethodName(), payloadIdentifier);
assertThat(resp).isInstanceOf(JsonRpcSuccessResponse.class);

@ -20,7 +20,7 @@ import java.util.List;
public class BlockValueCalculator {
public Wei calculateBlockValue(final BlockWithReceipts blockWithReceipts) {
public static Wei calculateBlockValue(final BlockWithReceipts blockWithReceipts) {
final Block block = blockWithReceipts.getBlock();
final List<Transaction> txs = block.getBody().getTransactions();
final List<TransactionReceipt> receipts = blockWithReceipts.getReceipts();

@ -45,7 +45,6 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
@ -364,27 +363,28 @@ public abstract class DiffBasedWorldStateUpdateAccumulator<ACCOUNT extends DiffB
return;
}
final TreeSet<Map.Entry<UInt256, UInt256>> entries =
new TreeSet<>(Map.Entry.comparingByKey());
entries.addAll(updatedAccount.getUpdatedStorage().entrySet());
// parallel stream here may cause database corruption
entries.forEach(
storageUpdate -> {
final UInt256 keyUInt = storageUpdate.getKey();
final StorageSlotKey slotKey =
new StorageSlotKey(hashAndSaveSlotPreImage(keyUInt), Optional.of(keyUInt));
final UInt256 value = storageUpdate.getValue();
final DiffBasedValue<UInt256> pendingValue = pendingStorageUpdates.get(slotKey);
if (pendingValue == null) {
pendingStorageUpdates.put(
slotKey,
new DiffBasedValue<>(
updatedAccount.getOriginalStorageValue(keyUInt), value));
} else {
pendingValue.setUpdated(value);
}
});
updatedAccount
.getUpdatedStorage()
.entrySet()
.forEach(
storageUpdate -> {
final UInt256 keyUInt = storageUpdate.getKey();
final StorageSlotKey slotKey =
new StorageSlotKey(
hashAndSaveSlotPreImage(keyUInt), Optional.of(keyUInt));
final UInt256 value = storageUpdate.getValue();
final DiffBasedValue<UInt256> pendingValue =
pendingStorageUpdates.get(slotKey);
if (pendingValue == null) {
pendingStorageUpdates.put(
slotKey,
new DiffBasedValue<>(
updatedAccount.getOriginalStorageValue(keyUInt), value));
} else {
pendingValue.setUpdated(value);
}
});
updatedAccount.getUpdatedStorage().clear();

@ -23,6 +23,9 @@ import org.immutables.value.Value;
public interface DataStorageConfiguration {
long DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD = 512;
boolean DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED = true;
long MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
int DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE = 30_000;
boolean DEFAULT_RECEIPT_COMPACTION_ENABLED = false;
DataStorageConfiguration DEFAULT_CONFIG =
@ -56,6 +59,16 @@ public interface DataStorageConfiguration {
Long getBonsaiMaxLayersToLoad();
@Value.Default
default boolean getBonsaiLimitTrieLogsEnabled() {
return DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
}
@Value.Default
default int getBonsaiTrieLogPruningWindowSize() {
return DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
}
@Value.Default
default boolean getReceiptCompactionEnabled() {
return DEFAULT_RECEIPT_COMPACTION_ENABLED;
@ -69,9 +82,6 @@ public interface DataStorageConfiguration {
@Value.Immutable
interface Unstable {
boolean DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED = false;
long MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
int DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE = 30_000;
boolean DEFAULT_BONSAI_FULL_FLAT_DB_ENABLED = true;
boolean DEFAULT_BONSAI_CODE_USING_CODE_HASH_ENABLED = true;
@ -81,16 +91,6 @@ public interface DataStorageConfiguration {
DataStorageConfiguration.Unstable DEFAULT_PARTIAL =
ImmutableDataStorageConfiguration.Unstable.builder().bonsaiFullFlatDbEnabled(false).build();
@Value.Default
default boolean getBonsaiLimitTrieLogsEnabled() {
return DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
}
@Value.Default
default int getBonsaiTrieLogPruningWindowSize() {
return DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
}
@Value.Default
default boolean getBonsaiFullFlatDbEnabled() {
return DEFAULT_BONSAI_FULL_FLAT_DB_ENABLED;

@ -41,8 +41,8 @@ public class BlockValueCalculatorTest {
final Block block =
new Block(blockHeader, new BlockBody(Collections.emptyList(), Collections.emptyList()));
Wei blockValue =
new BlockValueCalculator()
.calculateBlockValue(new BlockWithReceipts(block, Collections.emptyList()));
BlockValueCalculator.calculateBlockValue(
new BlockWithReceipts(block, Collections.emptyList()));
assertThat(blockValue).isEqualTo(Wei.ZERO);
}
@ -85,9 +85,8 @@ public class BlockValueCalculatorTest {
final Block block =
new Block(blockHeader, new BlockBody(List.of(tx1, tx2, tx3), Collections.emptyList()));
Wei blockValue =
new BlockValueCalculator()
.calculateBlockValue(
new BlockWithReceipts(block, List.of(receipt1, receipt2, receipt3)));
BlockValueCalculator.calculateBlockValue(
new BlockWithReceipts(block, List.of(receipt1, receipt2, receipt3)));
// Block value = 71 * 1 + (143-71) * 2 + (214-143) * 5 = 1427
assertThat(blockValue).isEqualTo(Wei.of(570L));
}
@ -114,8 +113,7 @@ public class BlockValueCalculatorTest {
final Block block =
new Block(blockHeader, new BlockBody(List.of(tx1), Collections.emptyList()));
Wei blockValue =
new BlockValueCalculator()
.calculateBlockValue(new BlockWithReceipts(block, List.of(receipt1)));
BlockValueCalculator.calculateBlockValue(new BlockWithReceipts(block, List.of(receipt1)));
// Block value =~ max_long * 2
assertThat(blockValue).isGreaterThan(Wei.of(Long.MAX_VALUE));
}

@ -189,7 +189,7 @@ public class BackwardSyncAlgSpec {
completionCaptor.getValue().onInitialSyncCompleted();
voidCompletableFuture.get(500, TimeUnit.MILLISECONDS);
voidCompletableFuture.get(800, TimeUnit.MILLISECONDS);
assertThat(voidCompletableFuture).isCompleted();
verify(context.getSyncState()).unsubscribeTTDReached(88L);

@ -50,9 +50,6 @@ dependencies {
implementation('io.tmio:tuweni-devp2p') {
exclude group:'ch.qos.logback', module:'logback-classic'
}
implementation('io.tmio:tuweni-dns-discovery'){
exclude group:'ch.qos.logback', module:'logback-classic'
}
implementation 'io.tmio:tuweni-io'
implementation 'io.tmio:tuweni-rlp'
implementation 'io.tmio:tuweni-units'
@ -84,6 +81,7 @@ dependencies {
}
testImplementation 'io.vertx:vertx-codegen'
testImplementation 'io.vertx:vertx-unit'
testImplementation 'io.vertx:vertx-junit5'
testImplementation 'org.assertj:assertj-core'
testImplementation 'org.awaitility:awaitility'
testImplementation 'org.junit.jupiter:junit-jupiter'

@ -0,0 +1,107 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import java.util.List;
import java.util.Optional;
import io.vertx.core.AbstractVerticle;
import org.apache.tuweni.devp2p.EthereumNodeRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// Adapted from https://github.com/tmio/tuweni and licensed under Apache 2.0
/**
* Resolves DNS records over time, refreshing records. This is written as a Vertx Verticle which
* allows to run outside the Vertx event loop
*/
public class DNSDaemon extends AbstractVerticle {
private static final Logger LOG = LoggerFactory.getLogger(DNSDaemon.class);
private final String enrLink;
private final long seq;
private final long initialDelay;
private final long delay;
private final Optional<DNSDaemonListener> listener;
private final Optional<String> dnsServer;
private Optional<Long> periodicTaskId = Optional.empty();
private DNSResolver dnsResolver;
/**
* Creates a new DNSDaemon.
*
* @param enrLink the ENR link to start with, of the form enrtree://PUBKEY@domain
* @param listener Listener notified when records are read and whenever they are updated.
* @param seq the sequence number of the root record. If the root record seq is higher, proceed
* with visit.
* @param initialDelay the delay in milliseconds before the first poll of DNS records.
* @param delay the delay in milliseconds at which to poll DNS records. If negative or zero, it
* runs only once.
* @param dnsServer the DNS server to use for DNS query. If null, the default DNS server will be
* used.
*/
public DNSDaemon(
final String enrLink,
final DNSDaemonListener listener,
final long seq,
final long initialDelay,
final long delay,
final String dnsServer) {
this.enrLink = enrLink;
this.listener = Optional.ofNullable(listener);
this.seq = seq;
this.initialDelay = initialDelay;
this.delay = delay;
this.dnsServer = Optional.ofNullable(dnsServer);
}
/** Starts the DNSDaemon. */
@Override
public void start() {
if (vertx == null) {
throw new IllegalStateException("DNSDaemon must be deployed as a vertx verticle.");
}
LOG.info("Starting DNSDaemon for {}, using {} DNS host.", enrLink, dnsServer.orElse("default"));
dnsResolver = new DNSResolver(vertx, enrLink, seq, dnsServer);
if (delay > 0) {
periodicTaskId = Optional.of(vertx.setPeriodic(initialDelay, delay, this::refreshENRRecords));
} else {
// do one-shot resolution
refreshENRRecords(0L);
}
}
/** Stops the DNSDaemon. */
@Override
public void stop() {
LOG.info("Stopping DNSDaemon for {}", enrLink);
periodicTaskId.ifPresent(vertx::cancelTimer);
dnsResolver.close();
}
/**
* Refresh enr records by calling dnsResolver and updating the listeners.
*
* @param taskId the task id of the periodic task
*/
void refreshENRRecords(final Long taskId) {
LOG.debug("Refreshing DNS records");
final long startTime = System.nanoTime();
final List<EthereumNodeRecord> ethereumNodeRecords = dnsResolver.collectAll();
final long endTime = System.nanoTime();
LOG.debug("Time taken to DNSResolver.collectAll: {} ms", (endTime - startTime) / 1_000_000);
listener.ifPresent(it -> it.newRecords(dnsResolver.sequence(), ethereumNodeRecords));
}
}

@ -0,0 +1,32 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import java.util.List;
import org.apache.tuweni.devp2p.EthereumNodeRecord;
// Adapted from https://github.com/tmio/tuweni and licensed under Apache 2.0
/** Callback listening to updates of the DNS records. */
@FunctionalInterface
public interface DNSDaemonListener {
/**
* Callback called when the seq is updated on the DNS server
*
* @param seq the update identifier of the records
* @param records the records stored on the server
*/
void newRecords(long seq, List<EthereumNodeRecord> records);
}

@ -0,0 +1,279 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import static org.hyperledger.besu.ethereum.p2p.discovery.dns.KVReader.readKV;
import java.net.URI;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.crypto.SECP256K1;
import org.apache.tuweni.devp2p.EthereumNodeRecord;
import org.apache.tuweni.io.Base32;
import org.apache.tuweni.io.Base64URLSafe;
import org.bouncycastle.math.ec.ECPoint;
// Adapted from https://github.com/tmio/tuweni and licensed under Apache 2.0
/** Intermediate format to write DNS entries */
public interface DNSEntry {
/**
* Read a DNS entry from a String.
*
* @param serialized the serialized form of a DNS entry
* @return DNS entry if found
* @throws IllegalArgumentException if the record cannot be read
*/
static DNSEntry readDNSEntry(final String serialized) {
final String record = trimQuotes(serialized);
final String prefix = getPrefix(record);
return switch (prefix) {
case "enrtree-root" -> new ENRTreeRoot(readKV(record));
case "enrtree-branch" -> new ENRTree(record.substring(prefix.length() + 1));
case "enr" -> new ENRNode(readKV(record));
case "enrtree" -> new ENRTreeLink(record);
default ->
throw new IllegalArgumentException(
serialized + " should contain enrtree-branch, enr, enrtree-root or enrtree");
};
}
private static String trimQuotes(final String str) {
if (str.startsWith("\"") && str.endsWith("\"")) {
return str.substring(1, str.length() - 1);
}
return str;
}
private static String getPrefix(final String input) {
final String[] parts = input.split(":", 2);
return parts.length > 0 ? parts[0] : "";
}
/** Represents a node in the ENR record. */
class ENRNode implements DNSEntry {
private final EthereumNodeRecord nodeRecord;
/**
* Constructs ENRNode with the given attributes.
*
* @param attrs the attributes of the node
*/
public ENRNode(final Map<String, String> attrs) {
if (attrs == null) {
throw new IllegalArgumentException("ENRNode attributes cannot be null");
}
nodeRecord =
Optional.ofNullable(attrs.get("enr"))
.map(Base64URLSafe::decode)
.map(EthereumNodeRecord::fromRLP)
.orElseThrow(() -> new IllegalArgumentException("Invalid ENR record"));
}
/**
* Ethereum node record.
*
* @return the instance of EthereumNodeRecord
*/
public EthereumNodeRecord nodeRecord() {
return nodeRecord;
}
@Override
public String toString() {
return nodeRecord.toString();
}
}
/** Root of the ENR tree */
class ENRTreeRoot implements DNSEntry {
private final String version;
private final Long seq;
private final SECP256K1.Signature sig;
private final String enrRoot;
private final String linkRoot;
/**
* Creates a new ENRTreeRoot
*
* @param attrs The attributes of the root
*/
public ENRTreeRoot(final Map<String, String> attrs) {
if (attrs == null) {
throw new IllegalArgumentException("ENRNode attributes cannot be null");
}
version =
Optional.ofNullable(attrs.get("enrtree-root"))
.orElseThrow(() -> new IllegalArgumentException("Missing attribute enrtree-root"));
seq =
Optional.ofNullable(attrs.get("seq"))
.map(Long::parseLong)
.orElseThrow(() -> new IllegalArgumentException("Missing attribute seq"));
sig =
Optional.ofNullable(attrs.get("sig"))
.map(Base64URLSafe::decode)
.map(
sigBytes ->
SECP256K1.Signature.fromBytes(
Bytes.concatenate(
sigBytes, Bytes.wrap(new byte[Math.max(0, 65 - sigBytes.size())]))))
.orElseThrow(() -> new IllegalArgumentException("Missing attribute sig"));
enrRoot =
Optional.ofNullable(attrs.get("e"))
.orElseThrow(() -> new IllegalArgumentException("Missing attribute e"));
linkRoot =
Optional.ofNullable(attrs.get("l"))
.orElseThrow(() -> new IllegalArgumentException("Missing attribute l"));
}
/**
* Gets sequence
*
* @return sequence
*/
public Long seq() {
return seq;
}
/**
* Link root.
*
* @return the link root.
*/
public String linkRoot() {
return linkRoot;
}
/**
* ENR root.
*
* @return the enr root.
*/
public String enrRoot() {
return enrRoot;
}
/**
* Signature.
*
* @return SECP256K1 signature
*/
public SECP256K1.Signature sig() {
return sig;
}
@Override
public String toString() {
return String.format(
"enrtree-root:%s e=%s l=%s seq=%d sig=%s",
version, enrRoot, linkRoot, seq, Base64URLSafe.encode(sig.bytes()));
}
/**
* Returns the signed content of the root
*
* @return the signed content
*/
public String signedContent() {
return String.format("enrtree-root:%s e=%s l=%s seq=%d", version, enrRoot, linkRoot, seq);
}
}
/** Represents a branch in the ENR record. */
class ENRTree implements DNSEntry {
private final List<String> entries;
/**
* Constructs ENRTree with the given entries.
*
* @param entriesAsString the entries of the branch
*/
public ENRTree(final String entriesAsString) {
entries =
Arrays.stream(entriesAsString.split("[,\"]"))
.filter(it -> it.length() > 4)
.collect(Collectors.toList());
}
/**
* Entries of the branch.
*
* @return the entries of the branch
*/
public List<String> entries() {
return entries;
}
@Override
public String toString() {
return "enrtree-branch:" + String.join(",", entries);
}
}
/** Class representing an ENR Tree link */
class ENRTreeLink implements DNSEntry {
private final String domainName;
private final String encodedPubKey;
private final SECP256K1.PublicKey pubKey;
/**
* Creates a new ENRTreeLink
*
* @param enrTreeLink The URI representing ENR Tree link
*/
public ENRTreeLink(final String enrTreeLink) {
final URI uri = URI.create(enrTreeLink);
this.domainName = uri.getHost();
this.encodedPubKey = uri.getUserInfo();
this.pubKey = fromBase32(encodedPubKey);
}
private static SECP256K1.PublicKey fromBase32(final String base32) {
final byte[] keyBytes = Base32.decodeBytes(base32);
final ECPoint ecPoint = SECP256K1.Parameters.CURVE.getCurve().decodePoint(keyBytes);
return SECP256K1.PublicKey.fromBytes(Bytes.wrap(ecPoint.getEncoded(false)).slice(1));
}
/**
* Decoded SECP256K1 public key.
*
* @return derived SECP256K1.PublicKey
*/
public SECP256K1.PublicKey publicKey() {
return pubKey;
}
/**
* Domain name.
*
* @return the domain name
*/
public String domainName() {
return domainName;
}
@Override
public String toString() {
return String.format("enrtree://%s@%s", encodedPubKey, domainName);
}
}
}

@ -0,0 +1,237 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import org.hyperledger.besu.crypto.Hash;
import org.hyperledger.besu.ethereum.p2p.discovery.dns.DNSEntry.ENRNode;
import org.hyperledger.besu.ethereum.p2p.discovery.dns.DNSEntry.ENRTreeLink;
import org.hyperledger.besu.ethereum.p2p.discovery.dns.DNSEntry.ENRTreeRoot;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.base.Splitter;
import io.vertx.core.Vertx;
import io.vertx.core.dns.DnsClient;
import io.vertx.core.dns.DnsClientOptions;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.crypto.SECP256K1;
import org.apache.tuweni.devp2p.EthereumNodeRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// Adapted from https://github.com/tmio/tuweni and licensed under Apache 2.0
/** Resolves a set of ENR nodes from a host name. */
public class DNSResolver implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DNSResolver.class);
private final ExecutorService rawTxtRecordsExecutor = Executors.newSingleThreadExecutor();
private final String enrLink;
private long seq;
private final DnsClient dnsClient;
/**
* Creates a new DNSResolver.
*
* @param vertx Vertx instance which is used to create DNS Client
* @param enrLink the ENR link to start with, of the form enrtree://PUBKEY@domain
* @param seq the sequence number of the root record. If the root record seq is higher, proceed
* with visit.
* @param dnsServer the DNS server to use for DNS query. If empty, the default DNS server will be
* used.
*/
public DNSResolver(
final Vertx vertx, final String enrLink, final long seq, final Optional<String> dnsServer) {
this.enrLink = enrLink;
this.seq = seq;
final DnsClientOptions dnsClientOptions =
dnsServer.map(DNSResolver::buildDnsClientOptions).orElseGet(DnsClientOptions::new);
dnsClient = vertx.createDnsClient(dnsClientOptions);
}
private static DnsClientOptions buildDnsClientOptions(final String server) {
final List<String> hostPort = Splitter.on(":").splitToList(server);
final DnsClientOptions dnsClientOptions = new DnsClientOptions();
dnsClientOptions.setHost(hostPort.get(0));
if (hostPort.size() > 1) {
try {
int port = Integer.parseInt(hostPort.get(1));
dnsClientOptions.setPort(port);
} catch (NumberFormatException e) {
LOG.trace("Invalid port number {}, ignoring", hostPort.get(1));
}
}
return dnsClientOptions;
}
/**
* Convenience method to read all ENRs, from a top-level record.
*
* @return all ENRs collected
*/
public List<EthereumNodeRecord> collectAll() {
final List<EthereumNodeRecord> nodes = new ArrayList<>();
final DNSVisitor visitor = nodes::add;
visitTree(new ENRTreeLink(enrLink), visitor);
if (!nodes.isEmpty()) {
LOG.debug("Resolved {} nodes from DNS for enr link {}", nodes.size(), enrLink);
} else {
LOG.debug("No nodes resolved from DNS");
}
return Collections.unmodifiableList(nodes);
}
/**
* Sequence number of the root record.
*
* @return the current sequence number of the root record
*/
public long sequence() {
return seq;
}
/**
* Reads a complete tree of record, starting with the top-level record.
*
* @param link the ENR link to start with
* @param visitor the visitor that will look at each record
*/
private void visitTree(final ENRTreeLink link, final DNSVisitor visitor) {
Optional<DNSEntry> optionalEntry = resolveRecord(link.domainName());
if (optionalEntry.isEmpty()) {
LOG.debug("No DNS record found for {}", link.domainName());
return;
}
final DNSEntry dnsEntry = optionalEntry.get();
if (!(dnsEntry instanceof ENRTreeRoot treeRoot)) {
LOG.debug("Root entry {} is not an ENR tree root", dnsEntry);
return;
}
if (!checkSignature(treeRoot, link.publicKey(), treeRoot.sig())) {
LOG.debug("ENR tree root {} failed signature check", link.domainName());
return;
}
if (treeRoot.seq() <= seq) {
LOG.debug("ENR tree root seq {} is not higher than {}, aborting", treeRoot.seq(), seq);
return;
}
seq = treeRoot.seq();
internalVisit(treeRoot.enrRoot(), link.domainName(), visitor);
internalVisit(treeRoot.linkRoot(), link.domainName(), visitor);
}
private boolean internalVisit(
final String entryName, final String domainName, final DNSVisitor visitor) {
final Optional<DNSEntry> optionalDNSEntry = resolveRecord(entryName + "." + domainName);
if (optionalDNSEntry.isEmpty()) {
LOG.debug("No DNS record found for {}", entryName + "." + domainName);
return true;
}
final DNSEntry entry = optionalDNSEntry.get();
if (entry instanceof ENRNode node) {
// TODO: this always return true because the visitor is reference to list.add
return visitor.visit(node.nodeRecord());
} else if (entry instanceof DNSEntry.ENRTree tree) {
for (String e : tree.entries()) {
// TODO: When would this ever return false?
boolean keepGoing = internalVisit(e, domainName, visitor);
if (!keepGoing) {
return false;
}
}
} else if (entry instanceof ENRTreeLink link) {
visitTree(link, visitor);
} else {
LOG.debug("Unsupported type of node {}", entry);
}
return true;
}
/**
* Resolves one DNS record associated with the given domain name.
*
* @param domainName the domain name to query
* @return the DNS entry read from the domain. Empty if no record is found.
*/
Optional<DNSEntry> resolveRecord(final String domainName) {
return resolveRawRecord(domainName).map(DNSEntry::readDNSEntry);
}
/**
* Resolves the first TXT record for a domain name and returns it.
*
* @param domainName the name of the DNS domain to query
* @return the first TXT entry of the DNS record. Empty if no record is found.
*/
Optional<String> resolveRawRecord(final String domainName) {
// vertx-dns is async, kotlin coroutines allows us to await, similarly Java 21 new thread
// model would also allow us to await. For now, we will use CountDownLatch to block the
// current thread until the DNS resolution is complete.
LOG.debug("Resolving TXT records on domain: {}", domainName);
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Optional<String>> record = new AtomicReference<>(Optional.empty());
rawTxtRecordsExecutor.submit(
() -> {
dnsClient
.resolveTXT(domainName)
.onComplete(
ar -> {
if (ar.succeeded()) {
LOG.trace(
"TXT record resolved on domain {}. Result: {}", domainName, ar.result());
record.set(ar.result().stream().findFirst());
} else {
LOG.trace(
"TXT record not resolved on domain {}, because: {}",
domainName,
ar.cause().getMessage());
}
latch.countDown();
});
});
try {
// causes the worker thread to wait. Once we move to Java 21, this can be simplified.
latch.await();
} catch (InterruptedException e) {
LOG.debug("Interrupted while waiting for DNS resolution");
}
return record.get();
}
private boolean checkSignature(
final ENRTreeRoot root, final SECP256K1.PublicKey pubKey, final SECP256K1.Signature sig) {
Bytes32 hash =
Hash.keccak256(Bytes.wrap(root.signedContent().getBytes(StandardCharsets.UTF_8)));
return SECP256K1.verifyHashed(hash, sig, pubKey);
}
@Override
public void close() {
rawTxtRecordsExecutor.shutdown();
}
}

@ -0,0 +1,32 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import org.apache.tuweni.devp2p.EthereumNodeRecord;
// Adapted from https://github.com/tmio/tuweni and licensed under Apache 2.0
/**
* Reads ENR (Ethereum Node Records) entries passed in from DNS. The visitor may decide to stop the
* visit by returning false.
*/
public interface DNSVisitor {
/**
* Visit a new ENR record.
*
* @param enr the ENR record read from DNS
* @return true to continue visiting, false otherwise
*/
boolean visit(final EthereumNodeRecord enr);
}

@ -0,0 +1,49 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import java.util.Arrays;
import java.util.Map;
import java.util.stream.Collectors;
// Adapted from https://github.com/tmio/tuweni and licensed under Apache 2.0
/** Read Key value pairs from a DNS record */
public class KVReader {
private KVReader() {}
/**
* Read a key value pair from a DNS record
*
* @param record the record to read
* @return the key value pair
*/
public static Map<String, String> readKV(final String record) {
return Arrays.stream(record.split("\\s+"))
.map(
it -> {
// if it contains an = or :, split into Map.entry from the first occurrence
if (it.contains("=")) {
return it.split("=", 2);
} else if (it.contains(":")) {
return it.split(":", 2);
} else {
// this should not happen, as the record should be well-formed
return new String[] {it};
}
})
.filter(kv -> kv.length == 2)
.collect(Collectors.toMap(kv -> kv[0], kv -> kv[1]));
}
}

@ -27,6 +27,8 @@ import org.hyperledger.besu.ethereum.p2p.discovery.DiscoveryPeer;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryAgent;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryStatus;
import org.hyperledger.besu.ethereum.p2p.discovery.VertxPeerDiscoveryAgent;
import org.hyperledger.besu.ethereum.p2p.discovery.dns.DNSDaemon;
import org.hyperledger.besu.ethereum.p2p.discovery.dns.DNSDaemonListener;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerTable;
import org.hyperledger.besu.ethereum.p2p.peers.DefaultPeerPrivileges;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURLImpl;
@ -69,17 +71,20 @@ import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.annotations.VisibleForTesting;
import io.vertx.core.DeploymentOptions;
import io.vertx.core.Future;
import io.vertx.core.ThreadingModel;
import io.vertx.core.Vertx;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.devp2p.EthereumNodeRecord;
import org.apache.tuweni.discovery.DNSDaemon;
import org.apache.tuweni.discovery.DNSDaemonListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -147,7 +152,8 @@ public class DefaultP2PNetwork implements P2PNetwork {
private final CountDownLatch shutdownLatch = new CountDownLatch(2);
private final Duration shutdownTimeout = Duration.ofSeconds(15);
private final Vertx vertx;
private DNSDaemon dnsDaemon;
private final AtomicReference<Optional<Pair<String, DNSDaemon>>> dnsDaemonRef =
new AtomicReference<>(Optional.empty());
/**
* Creates a peer networking service for production purposes.
@ -227,15 +233,26 @@ public class DefaultP2PNetwork implements P2PNetwork {
LOG.info(
"Starting DNS discovery with DNS Server override {}", dnsServer));
dnsDaemon =
final DNSDaemon dnsDaemon =
new DNSDaemon(
disco,
createDaemonListener(),
0L,
1000L, // start after 1 second
600000L,
config.getDnsDiscoveryServerOverride().orElse(null),
vertx);
dnsDaemon.start();
config.getDnsDiscoveryServerOverride().orElse(null));
// TODO: Java 21, we can move to Virtual Thread model
final DeploymentOptions options =
new DeploymentOptions()
.setThreadingModel(ThreadingModel.WORKER)
.setInstances(1)
.setWorkerPoolSize(1);
final Future<String> deployId = vertx.deployVerticle(dnsDaemon, options);
final String dnsDaemonDeployId =
deployId.toCompletionStage().toCompletableFuture().join();
dnsDaemonRef.set(Optional.of(Pair.of(dnsDaemonDeployId, dnsDaemon)));
});
final int listeningPort = rlpxAgent.start().join();
@ -282,7 +299,9 @@ public class DefaultP2PNetwork implements P2PNetwork {
return;
}
getDnsDaemon().ifPresent(DNSDaemon::close);
// since dnsDaemon is a vertx verticle, vertx.close will undeploy it.
// However, we can safely call stop as well.
dnsDaemonRef.get().map(Pair::getRight).ifPresent(DNSDaemon::stop);
peerConnectionScheduler.shutdownNow();
peerDiscoveryAgent.stop().whenComplete((res, err) -> shutdownLatch.countDown());
@ -339,7 +358,7 @@ public class DefaultP2PNetwork implements P2PNetwork {
@VisibleForTesting
Optional<DNSDaemon> getDnsDaemon() {
return Optional.ofNullable(dnsDaemon);
return dnsDaemonRef.get().map(Pair::getRight);
}
@VisibleForTesting

@ -0,0 +1,121 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.security.Security;
import java.util.concurrent.atomic.AtomicInteger;
import io.vertx.core.DeploymentOptions;
import io.vertx.core.ThreadingModel;
import io.vertx.core.Vertx;
import io.vertx.junit5.Checkpoint;
import io.vertx.junit5.VertxExtension;
import io.vertx.junit5.VertxTestContext;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@ExtendWith(VertxExtension.class)
class DNSDaemonTest {
private static final String holeskyEnr =
"enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@all.holesky.ethdisco.net";
// private static MockDNSServer mockDNSServer;
private final MockDnsServerVerticle mockDnsServerVerticle = new MockDnsServerVerticle();
private DNSDaemon dnsDaemon;
@BeforeAll
static void setup() throws IOException {
Security.addProvider(new BouncyCastleProvider());
}
@BeforeEach
@DisplayName("Deploy Mock Dns Server Verticle")
void prepare(final Vertx vertx, final VertxTestContext vertxTestContext) {
vertx.deployVerticle(mockDnsServerVerticle, vertxTestContext.succeedingThenComplete());
}
@Test
@DisplayName("Test DNS Daemon with a mock DNS server")
void testDNSDaemon(final Vertx vertx, final VertxTestContext testContext)
throws InterruptedException {
final Checkpoint checkpoint = testContext.checkpoint();
dnsDaemon =
new DNSDaemon(
holeskyEnr,
(seq, records) -> checkpoint.flag(),
0,
0,
0,
"localhost:" + mockDnsServerVerticle.port());
final DeploymentOptions options =
new DeploymentOptions().setThreadingModel(ThreadingModel.WORKER).setWorkerPoolSize(1);
vertx.deployVerticle(dnsDaemon, options);
}
@Test
@DisplayName("Test DNS Daemon with periodic lookup to a mock DNS server")
void testDNSDaemonPeriodic(final Vertx vertx, final VertxTestContext testContext)
throws InterruptedException {
// checkpoint should be flagged twice
final Checkpoint checkpoint = testContext.checkpoint(2);
final AtomicInteger pass = new AtomicInteger(0);
dnsDaemon =
new DNSDaemon(
holeskyEnr,
(seq, records) -> {
switch (pass.incrementAndGet()) {
case 1:
testContext.verify(
() -> {
assertThat(seq).isEqualTo(932);
assertThat(records).hasSize(115);
});
break;
case 2:
testContext.verify(
() -> {
assertThat(seq).isEqualTo(932);
assertThat(records).isEmpty();
});
break;
default:
testContext.failNow("Third pass is not expected");
}
checkpoint.flag();
},
0,
1, // initial delay
300, // second lookup after 300 ms (due to Mock DNS server, we are very quick).
"localhost:" + mockDnsServerVerticle.port());
final DeploymentOptions options =
new DeploymentOptions().setThreadingModel(ThreadingModel.WORKER).setWorkerPoolSize(1);
vertx.deployVerticle(dnsDaemon, options);
}
@AfterEach
@DisplayName("Check that the vertx worker verticle is still there")
void lastChecks(final Vertx vertx) {
assertThat(vertx.deploymentIDs()).isNotEmpty().hasSize(2);
}
}

@ -0,0 +1,71 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import static org.assertj.core.api.Assertions.assertThat;
import java.security.Security;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
class DNSEntryTest {
@BeforeAll
static void setup() {
Security.addProvider(new BouncyCastleProvider());
}
@Test
void enrTreeRootIsParsed() {
final String txtRecord =
"\"enrtree-root:v1 e=KVKZLGARGADDZSMCF65QQMEWLE l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=919 sig=braPmdwMk-g65lQxums6hEy553s3bWMoecW0QQ0IdykIoM9i3We0bxFT0IDONPaFcRePcN-yaOpt8GBfeQ4qDAE\"";
final DNSEntry entry = DNSEntry.readDNSEntry(txtRecord);
assertThat(entry).isInstanceOf(DNSEntry.ENRTreeRoot.class);
final DNSEntry.ENRTreeRoot enrTreeRoot = (DNSEntry.ENRTreeRoot) entry;
assertThat(enrTreeRoot.enrRoot()).isEqualTo("KVKZLGARGADDZSMCF65QQMEWLE");
assertThat(enrTreeRoot.linkRoot()).isEqualTo("FDXN3SN67NA5DKA4J2GOK7BVQI");
assertThat(enrTreeRoot.seq()).isEqualTo(919);
}
@Test
void enrTreeBranchIsParsed() {
final String txtRecord =
"\"enrtree-branch:HVKDJGU7SZMOAMNLBJYQBSKZTM,PVSVWO3NLKHTBAIWOY2NB67RFI,"
+ "6TCKCNWXNGBMNFTGSRKNRO4ERA,37NSKCRJVI5XRRHWLTHW4A6OX4,NV3IJMKDVQHHALY6MAVMPYN6ZU,"
+ "SZCFDMTYOERMIVOUXEWXSGDVEY,FZ26UT4LSG7D2NRX7SV6P3S6BI,7TWNYLCOQ7FEM4IG65WOTL4MVE,"
+ "6OJXGI7NJUESOLL2OZPS4B\" \"EC6Q,437FN4NSGMGFQLAXYWPX5JNACI,FCA7LN6NCO5IAWPG5FH7LX6XJA,"
+ "EYBOZ2NZSHDWDSNHV66XASXOHM,FUVRJMMMKJMCL4L4EBEOWCSOFA\"";
final DNSEntry entry = DNSEntry.readDNSEntry(txtRecord);
assertThat(entry).isInstanceOf(DNSEntry.ENRTree.class);
assertThat(((DNSEntry.ENRTree) entry).entries())
.containsExactly(
"HVKDJGU7SZMOAMNLBJYQBSKZTM",
"PVSVWO3NLKHTBAIWOY2NB67RFI",
"6TCKCNWXNGBMNFTGSRKNRO4ERA",
"37NSKCRJVI5XRRHWLTHW4A6OX4",
"NV3IJMKDVQHHALY6MAVMPYN6ZU",
"SZCFDMTYOERMIVOUXEWXSGDVEY",
"FZ26UT4LSG7D2NRX7SV6P3S6BI",
"7TWNYLCOQ7FEM4IG65WOTL4MVE",
"6OJXGI7NJUESOLL2OZPS4B",
"437FN4NSGMGFQLAXYWPX5JNACI",
"FCA7LN6NCO5IAWPG5FH7LX6XJA",
"EYBOZ2NZSHDWDSNHV66XASXOHM",
"FUVRJMMMKJMCL4L4EBEOWCSOFA")
.doesNotContain("EC6Q");
}
}

@ -0,0 +1,40 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Map;
import org.junit.jupiter.api.Test;
public class KVReaderTest {
// copied from `dig all.holesky.ethdisco.net txt`
private static final String txtRecord =
"enrtree-root:v1 e=KVKZLGARGADDZSMCF65QQMEWLE l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=919 sig=braPmdwMk-g65lQxums6hEy553s3bWMoecW0QQ0IdykIoM9i3We0bxFT0IDONPaFcRePcN-yaOpt8GBfeQ4qDAE";
@Test
void parseTXTRecord() {
final Map<String, String> kv = KVReader.readKV(txtRecord);
assertThat(kv)
.containsEntry("enrtree-root", "v1")
.containsEntry("e", "KVKZLGARGADDZSMCF65QQMEWLE")
.containsEntry("l", "FDXN3SN67NA5DKA4J2GOK7BVQI")
.containsEntry("seq", "919")
.containsEntry(
"sig",
"braPmdwMk-g65lQxums6hEy553s3bWMoecW0QQ0IdykIoM9i3We0bxFT0IDONPaFcRePcN-yaOpt8GBfeQ4qDAE");
}
}

@ -0,0 +1,205 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.discovery.dns;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import com.google.common.base.Splitter;
import com.google.common.io.Resources;
import io.vertx.core.AbstractVerticle;
import io.vertx.core.Promise;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.datagram.DatagramPacket;
import io.vertx.core.datagram.DatagramSocket;
import io.vertx.core.datagram.DatagramSocketOptions;
import io.vertx.core.json.JsonObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Mock DNS server verticle. */
public class MockDnsServerVerticle extends AbstractVerticle {
private static final Logger LOG = LoggerFactory.getLogger(MockDnsServerVerticle.class);
private final Map<String, String> txtRecords = new HashMap<>();
private int dnsPort;
@Override
public void start(final Promise<Void> startPromise) throws Exception {
final DatagramSocket datagramSocket = vertx.createDatagramSocket(new DatagramSocketOptions());
datagramSocket.handler(packet -> handleDatagramPacket(datagramSocket, packet));
final String dnsEntriesJsonPath =
Path.of(Resources.getResource("discovery/dns/dns-records.json").toURI()).toString();
LOG.debug("Reading DNS entries from: {}", dnsEntriesJsonPath);
vertx
.fileSystem()
.readFile(dnsEntriesJsonPath)
.compose(
buffer -> {
final JsonObject dnsEntries = new JsonObject(buffer.toString());
final Map<String, Object> jsonMap = dnsEntries.getMap();
jsonMap.forEach((key, value) -> txtRecords.put(key, value.toString()));
// start the server
return datagramSocket.listen(0, "127.0.0.1");
})
.onComplete(
res -> {
if (res.succeeded()) {
LOG.info("Mock Dns Server is now listening {}", res.result().localAddress());
dnsPort = res.result().localAddress().port();
startPromise.complete();
} else {
startPromise.fail(res.cause());
}
});
}
@Override
public void stop() {
LOG.info("Stopping Mock DNS Server");
}
private void handleDatagramPacket(final DatagramSocket socket, final DatagramPacket packet) {
LOG.debug("Packet Received");
Buffer data = packet.data();
final short queryId = getQueryId(data);
final String queryName = extractQueryName(data.getBytes());
final Buffer response;
if (txtRecords.containsKey(queryName)) {
LOG.debug("Query name found {}", queryName);
response = createTXTResponse(queryId, queryName, txtRecords.get(queryName));
} else {
LOG.debug("Query name not found: {}", queryName);
response = createErrorResponse(queryId, queryName);
}
socket.send(response, packet.sender().port(), packet.sender().host());
}
private String extractQueryName(final byte[] buffer) {
StringBuilder queryName = new StringBuilder();
int index = 12; // Skip the DNS header
while (index < buffer.length) {
int labelLength = buffer[index] & 0xFF;
if (labelLength == 0) {
break;
}
index++;
for (int i = 0; i < labelLength; i++) {
char c = (char) (buffer[index + i] & 0xFF);
queryName.append(c);
}
index += labelLength;
if (index < buffer.length && buffer[index] != 0) {
queryName.append(".");
}
}
return queryName.toString();
}
private Buffer createTXTResponse(
final short queryId, final String queryName, final String txtRecord) {
final Buffer buffer = Buffer.buffer();
// Write DNS header
buffer.appendShort(queryId); // Query Identifier
buffer.appendShort((short) 0x8180); // Flags (Standard query response, No error)
buffer.appendShort((short) 1); // Questions count
buffer.appendShort((short) 1); // Answers count
buffer.appendShort((short) 0); // Authority RRs count
buffer.appendShort((short) 0); // Additional RRs count
// Write query name
final Iterable<String> queryLabels = Splitter.on(".").split(queryName);
for (String label : queryLabels) {
buffer.appendByte((byte) label.length());
buffer.appendString(label);
}
buffer.appendByte((byte) 0); // End of query name
// Write query type and class
buffer.appendShort((short) 16); // Type (TXT)
buffer.appendShort((short) 1); // Class (IN)
// Write answer
for (String label : queryLabels) {
buffer.appendByte((byte) label.length());
buffer.appendString(label.toLowerCase(Locale.ROOT));
}
buffer.appendByte((byte) 0); // End of answer name
buffer.appendShort((short) 16); // TXT record type
buffer.appendShort((short) 1); // Class (IN)
buffer.appendInt(60); // TTL (60 seconds)
int txtRecordsLength = txtRecord.getBytes(UTF_8).length;
buffer.appendShort((short) (txtRecordsLength + 1)); // Data length
buffer.appendByte((byte) txtRecordsLength); // TXT record length
buffer.appendString(txtRecord);
return buffer;
}
private Buffer createErrorResponse(final short queryId, final String queryName) {
Buffer buffer = Buffer.buffer();
// Write DNS header
buffer.appendShort(queryId); // Query Identifier
buffer.appendShort((short) 0x8183); // Flags (Standard query response, NXDOMAIN error)
buffer.appendShort((short) 1); // Questions count
buffer.appendShort((short) 0); // Answers count
buffer.appendShort((short) 0); // Authority RRs count
buffer.appendShort((short) 0); // Additional RRs count
// Write query name
for (String label : Splitter.on(".").split(queryName)) {
buffer.appendByte((byte) label.length());
buffer.appendString(label);
}
buffer.appendByte((byte) 0); // End of query name
// Write query type and class
buffer.appendShort((short) 16); // Type (TXT)
buffer.appendShort((short) 1); // Class (IN)
return buffer;
}
private short getQueryId(final Buffer queryData) {
return (short) ((queryData.getByte(0) & 0xff) << 8 | (queryData.getByte(1) & 0xff));
}
/**
* Mock server local port
*
* @return server port
*/
public int port() {
return dnsPort;
}
}

@ -57,9 +57,7 @@ import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Stream;
import io.vertx.core.Context;
import io.vertx.core.Vertx;
import io.vertx.core.dns.DnsClient;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.crypto.SECP256K1;
import org.assertj.core.api.Assertions;
@ -335,16 +333,21 @@ public final class DefaultP2PNetworkTest {
final NetworkingConfiguration dnsConfig =
when(spy(config).getDiscovery()).thenReturn(disco).getMock();
Vertx vertx = mock(Vertx.class);
when(vertx.createDnsClient(any())).thenReturn(mock(DnsClient.class));
when(vertx.getOrCreateContext()).thenReturn(mock(Context.class));
final Vertx vertx = Vertx.vertx(); // use real instance
// spy on DefaultP2PNetwork
final DefaultP2PNetwork testClass =
(DefaultP2PNetwork) builder().vertx(vertx).config(dnsConfig).build();
testClass.start();
assertThat(testClass.getDnsDaemon()).isPresent();
try {
// the actual lookup won't work because of mock discovery url, however, a valid DNSDaemon
// should be created.
assertThat(testClass.getDnsDaemon()).isPresent();
} finally {
testClass.stop();
vertx.close();
}
}
@Test
@ -358,17 +361,19 @@ public final class DefaultP2PNetworkTest {
doReturn(disco).when(dnsConfig).getDiscovery();
doReturn(Optional.of("localhost")).when(dnsConfig).getDnsDiscoveryServerOverride();
Vertx vertx = mock(Vertx.class);
when(vertx.createDnsClient(any())).thenReturn(mock(DnsClient.class));
when(vertx.getOrCreateContext()).thenReturn(mock(Context.class));
Vertx vertx = Vertx.vertx(); // use real instance
final DefaultP2PNetwork testClass =
(DefaultP2PNetwork) builder().config(dnsConfig).vertx(vertx).build();
testClass.start();
// ensure we used the dns server override config when building DNSDaemon:
assertThat(testClass.getDnsDaemon()).isPresent();
verify(dnsConfig, times(2)).getDnsDiscoveryServerOverride();
try {
assertThat(testClass.getDnsDaemon()).isPresent();
verify(dnsConfig, times(2)).getDnsDiscoveryServerOverride();
} finally {
testClass.stop();
vertx.close();
}
}
private DefaultP2PNetwork network() {

@ -0,0 +1,137 @@
{
"all.holesky.ethdisco.net" : "enrtree-root:v1 e=QXOF2GWVHBKMKW57Y2KSKWYNFQ l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=932 sig=DuA35BkYo9-FBwJ6MPxdNnYfcGMSGunAKUyfNN2gYQhYDBCPFZkr_cfe40Wspl2Vl76w6Ccs-B8ZrXpI_YymrAA",
"I56MJYJBMXTZZEPBQR6HWNAH7A.all.holesky.ethdisco.net" : "enr:-KO4QCTGqfxu7eEfe6M83e3bQLUgkKWGCdv6Ib5D0gCuQkkvbq1D0CN8UmHgpsddZNKHa2iBl3EIiGhaLBs_NKTxGQuGAYranLv7g2V0aMfGhJsZKtCAgmlkgnY0gmlwhCKH3xKJc2VjcDI1NmsxoQMHuEvotUinJNm_8Vz7412P1yKZ6r5iz9EcYj-9v3weRYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"IPSUCB6CICZIW6SERKZL7FFTIM.all.holesky.ethdisco.net" : "enr:-KO4QD47Vx9bnFcvE6JNAhrLz6GGgPk9GaJtP95ogjSOFHxlcZjnrT5TQzW9Jyx99XO9rzhK6WSvgdFoA9_54QPQcUaGAYxBy9opg2V0aMfGhJsZKtCAgmlkgnY0gmlwhIe1TLKJc2VjcDI1NmsxoQKMO6bMfiH5DnJNFZgSqUEi6uHNnH2_bMh7hwVXRWTMl4RzbmFwwIN0Y3CCdqmDdWRwgnap",
"QJM2RVJT7USPDSI5VVFGGDW6JM.all.holesky.ethdisco.net" : "enr:-Ji4QHWntDnLOi4S9GKmF4HY7WrnJuJGaIBrnM3Y75G-8EUpZExnUkaZJtlmzOeJs6WAcYsissKSybUG5rSEiad0s86ByoNldGjHxoSbGSrQgIJpZIJ2NIJpcISLY0UEiXNlY3AyNTZrMaED5hLZweFA_nvWEc-Q7Vxqf2lIYxScF-F4ORxs-rVyHZKDdGNwgtn5g3VkcILZ-Q",
"C7SDD5OPASV7B2XUOXF5NLBBKU.all.holesky.ethdisco.net" : "enr:-KO4QLo95TIKo_axZA9xafYgl9jQ2ZTDFCQ29znjUwp-6zPUe6o8L9NO3X3n3uMZH8bFGnRmoIhyHLxNoOp2BU6O7xGGAYwoJEiug2V0aMfGhJsZKtCAgmlkgnY0gmlwhKEjElWJc2VjcDI1NmsxoQPIffRacW7k1QukSBfJ8leopO1M5wOv89G06m0SAQDB8oRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"AUQGHXRAP7J3AGI6TUOQVYTYLY.all.holesky.ethdisco.net" : "enr:-KO4QGuiAZHlxH7T5XPcPpRTovca50B0pzDLkgJ-zaxkjT_PO6zsAWQezB-b53yAnqx01Jdj5tQWsvWoaNVfiFw3iw6GAY1nSgXzg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEp2jJuJc2VjcDI1NmsxoQMTiH7XteYOVAHiXAZoacUjo5JYLhQINeFMbPbEY9sA84RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"WLME2JINTIQACAMD5WZUQ4PRJI.all.holesky.ethdisco.net" : "enr:-J24QIMYTLuDOO1F5w8AL_WGXU6CZmzi05W35Rz8PuVT8b6vUqmUatifQtzuL2Q7REuRY5kYNurUbLHscGx3j1zjTmmGAYrbbtBig2V0aMfGhJsZKtCAgmlkgnY0gmlwhETp6VKJc2VjcDI1NmsxoQKrwy6w-9JGp3dagVeXMZEd3iZGff-rEVTqd4gmkhBRBIN0Y3CCdl-DdWRwgnZf",
"5JAERI2BPJN45X7IK6SWZOZ2FI.all.holesky.ethdisco.net" : "enr:-KO4QEsCIXXCDLUsTbUOT3ILg3rpga0gZXaG9_YT9_kFk97iRwu_Pr4RlhDvdkg4Y4bl9SBVzP9bFtL7H9AU7QxE7OeGAY1lbo59g2V0aMfGhJsZKtCAgmlkgnY0gmlwhA3Xl1-Jc2VjcDI1NmsxoQPsZi79eOcjTI3Xa2GCBiXRCgtm7wFo19cVlmhQSZ-i64RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"J7ZIDKH7CKVIYY244QVU6RMPDM.all.holesky.ethdisco.net" : "enr:-KO4QN942TWYsHn5c2-vZhoeXwELvI2zVg2eHk6nzoyTN8Cwcb8n-4_cwQPIU5p7DsuOehtu-raiGxYm4vE_6WUhT6yGAY1ClkGgg2V0aMfGhJsZKtCAgmlkgnY0gmlwhJBbb1yJc2VjcDI1NmsxoQKemKJgEuvyzZofyTm82rbw716cyTA2hf6zSJ73kzl_ZIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"FFM67SMD4N3SK4ZQTY2HOMU4F4.all.holesky.ethdisco.net" : "enr:-KO4QG4O1MBSzBHtShtqoXXeAycBqcDpuBK2BBwIuK_Qppz5OpJC6Ga3-qDEQJQOtbEYm0yXMhenb50Get_H6Bu9SbGGAY16lghqg2V0aMfGhJsZKtCAgmlkgnY0gmlwhA_rV76Jc2VjcDI1NmsxoQJ1vrJ4fwXiDAhSqIJwbELp2ktd9A0zRLws1li91-aBmoRzbmFwwIN0Y3CCdmCDdWRwgnZg",
"GZVG4EONSAY6M7SMLJXDCFUF5Q.all.holesky.ethdisco.net" : "enr:-KO4QErAUNKF003oZAYn8UUocPIuq2MTpxHV3gHfrNkThYPbZ366m-mlvXezUX5JDA5iJE-LTWWEPh8JqjEDah5kVimGAYxA2nMcg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFtWv2Jc2VjcDI1NmsxoQMCgMWZQXkPyMKdwnzdBxQe6rcs25guZtULvfzDjhE71oRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"3AVV65MYCJ7AGFUCKXT2UX2DSQ.all.holesky.ethdisco.net" : "enr:-KO4QEaesvQ1kySNS_mnQfiBr4DZQmkhHmqKEGRQvgyd5K3lNDm3vjs9cMMVTY9FgENpP1o0UGBsapNNfvXIN2gaudGGAY3ulUKIg2V0aMfGhJsZKtCAgmlkgnY0gmlwhDIjTkmJc2VjcDI1NmsxoQNVbUX0SvjLX7d7psFtTTNkKT7x_zHl3f4bSVQrkbK1YoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"QDCV4SQTQAGHHA3DRA2TR4LNMY.all.holesky.ethdisco.net" : "enr:-KO4QFVXTftQWqwggnKZlVzFmnQz-U5jzIx37kQSltdvU5kuYQHT1mboaEBh92lS0BUvn-aBzLmHu7wX6hhs2xdGw-6GAYrk72-Hg2V0aMfGhJsZKtCAgmlkgnY0gmlwhCJSHduJc2VjcDI1NmsxoQNGPGIxN3h_R3TiUe0Ud2WazTigmsK028DXXuVLna9rtoRzbmFwwIN0Y3CCdTWDdWRwgnU1",
"366Y3UIIKK2G5CUS7CHNPZMPEM.all.holesky.ethdisco.net" : "enr:-KO4QK1ecw-CGrDDZ4YwFrhgqctD0tWMHKJhUVxsS4um3aUFe3yBHRtVL9uYKk16DurN1IdSKTOB1zNCvjBybjZ_KAqGAYtJ5U8wg2V0aMfGhJsZKtCAgmlkgnY0gmlwhA_MtDmJc2VjcDI1NmsxoQNXD7fj3sscyOKBiHYy14igj1vJYWdKYZH7n3T8qRpIcYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"LHFO3FXNRHB5B3YSTKUALGMEAI.all.holesky.ethdisco.net" : "enr:-KO4QEuoZibAJnKlhcAENpJiju6nVH5J2or_xnVnX_eputCuTHBVtNJmoRKgDBU0idG4kbxFPPKaWRQ3TdYeIbsAAV-GAYrax-PMg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKIOJc2VjcDI1NmsxoQNCYX5kXJqI2deaVOtsTmUQka7dIocapHSAfnNn_CTL2YRzbmFwwIN0Y3CCdmCDdWRwgnZg",
"3PMAED3BC2HUS7AMEPGJDKENQY.all.holesky.ethdisco.net" : "enr:-KO4QFMsKiZk64UQsyLVHrxTpTpmdZYzuI-Xgy0gut5NgLulRvbrgQDmdrAT2PQuxK8K9AiplRzJ_i8CtseBD_d2bwGGAYx9HQirg2V0aMfGhJsZKtCAgmlkgnY0gmlwhLBn3miJc2VjcDI1NmsxoQJSdj6d-7J6YsK6vZwRl_zzMSCKauLadYxL8zfjjzOQZYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"4PFOQY7RKTNLRPXQXKKXD3K6LY.all.holesky.ethdisco.net" : "enr:-KO4QJ8BuQCt7gBXU_26FNx8waHuCzCkG54mErQoBAJrYaw_bdUJT0yQA0vG8gr-dUoS77OX0D4tD-R6e30832ncLMaGAYrbEjQ3g2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEV4tiJc2VjcDI1NmsxoQJHEsWJ3CoSi-1JgkeSogpyRqYvBaJI77-soOcbmB_nyYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"GUBLA7OPRPBMYMQ76EXDMB7BHY.all.holesky.ethdisco.net" : "enr:-KO4QIFA0MiYPPyzvlUqi5j1dL1RGz6MdFhhLN30iXeNc1JfCS1QqTJciby7ZhlQYwFdVMuk_ptgu530WxiQR-UmZpWGAY2ia8cWg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEt3l4OJc2VjcDI1NmsxoQJRcQCiMhxUex-gtsxf2IWJ6nGita_F8BaPZxTwE310WIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"VN7EWW4RMUZM6KUOQ43GJ33DWM.all.holesky.ethdisco.net" : "enr:-KO4QI3OPqietyANJtQ9_RUOU6ELbwDgTn39MufjLJ9BEzkwTxEgDcgYekeisrchGWzrzTC8T_2zowbuQhZlTkqNXB2GAY39ItF5g2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFsBbiJc2VjcDI1NmsxoQLa2k_Jy-sjrZ-0305yX_F1ZxDNNfCKIImoL5tyDN3cEYRzbmFwwIN0Y3CCdl6DdWRwgnZe",
"FDXN3SN67NA5DKA4J2GOK7BVQI.all.holesky.ethdisco.net" : "enrtree-branch:",
"QDMF4HIR2UEXRJVOX4UJPJTPKA.all.holesky.ethdisco.net" : "enr:-KO4QCeMBK9Ur5AHbkdov7TdALRsHzc48RXNtvR861fcmcxPYH59PthmWiT_pQHfVOH3x9-HK2bG8_h2jm46sZyJ6UeGAY2ZCasag2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFs7nOJc2VjcDI1NmsxoQLT22lM9abRweZb93jeMd4jvuRsXnjQFT2oY5a0rb0cvoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"3JSDZJCCWKIEBGPH5EI5VZF4RQ.all.holesky.ethdisco.net" : "enrtree-branch:4PFOQY7RKTNLRPXQXKKXD3K6LY,3AVV65MYCJ7AGFUCKXT2UX2DSQ,5JAERI2BPJN45X7IK6SWZOZ2FI,YUZZDZD57PTNEIB5QL3FEXK2ZA,TMY2W2YBNCXUUNA3Y7QXWVQLRE,MJB42632KKOPGJ2KNY3GPKP66I,QXRV67JYJNOYJDPC5CTKUVG76M,QJM2RVJT7USPDSI5VVFGGDW6JM,I33RZWBACJHNDEXG2RRXMX",
"A63OP4WTCB3HGDZE4NGDEID6Z4.all.holesky.ethdisco.net" : "enrtree-branch:QXKEJG4XZEQSXNUY7JJYCATPEI,HIDVATDVB36L2MASAWA7SBJAII,CHFGCI2RQS3XFN2MKFP6G2ZM4U,GSONRYZILMGUJEN3PYQXYD6GYQ,OSHAABXYJSRWW35VOMYHZUKJXU,RUJF27NEYDBNQAFMI6K6SKDVRY,TXAFU343ACQLSVIMT5LYG3W2AE,R6EQA5KQEQM77JJXB4BHHTDF6Y,CF25LCQ452FBTR24CZU4C2",
"N7HAL5M6HNZBGTWM3LWFDRX4WU.all.holesky.ethdisco.net" : "enr:-Je4QI2f2MAXFFi0Q3--GhXVZwNP-jI-G6XwmiDXxT-iobTTKOBJCyWUPk5WSoTtRS2ABqJmTAK8jflXBWLHuY7AueRFg2V0aMfGhP1PAWuAgmlkgnY0gmlwhDmAXJmJc2VjcDI1NmsxoQIc4YD6HsNIoj2HVJJDzr3cSfNtcEBRLtk5kgwXNH0q8YN0Y3CCXoeDdWRwgl6H",
"QXOF2GWVHBKMKW57Y2KSKWYNFQ.all.holesky.ethdisco.net" : "enrtree-branch:A63OP4WTCB3HGDZE4NGDEID6Z4,PL27G47LAASBPPAMXUDIJ3OCRQ",
"23B5UFB3JWETLIKE5Q6B2B5MWA.all.holesky.ethdisco.net" : "enr:-KO4QGjsmd6RDlGYHJu2JnI-Lf4TZ8s1yJtLDvvtp_Kw6xIwNn7_Ti_FmEHfWbaSyy0Icl4jlErvH-LrEmi9PCzB27uGAY0MEI03g2V0aMfGhJsZKtCAgmlkgnY0gmlwhNXvxn6Jc2VjcDI1NmsxoQOzxdn7S7O58IiaWGk4n3MsiB816GGv_bAd_PM9FkWeZIRzbmFwwIN0Y3CCVTyDdWRwglU8",
"TXAFU343ACQLSVIMT5LYG3W2AE.all.holesky.ethdisco.net" : "enrtree-branch:3EMD3PTQYRFVNFBQ73BYFL4AEQ,PMLO73ISEK4Z6IRPOSWIUUKF64,5NHWEAJHKSAO5DIIYAZ7MQJVSE,LNO6EN3Y2LJA5YOJMEMODAJFWM,23B5UFB3JWETLIKE5Q6B2B5MWA,HQ5S2EHH762PSKVTHWNQ6346NI,O25DXMPE4UEZ6SFMQRBGL2E2I4,2WHIXLN5QX3JEK3SSH7MXKTJMU,WQDLDQ7U74Y2HM5LHVCOCY",
"PMLO73ISEK4Z6IRPOSWIUUKF64.all.holesky.ethdisco.net" : "enr:-KO4QBxZ1JFNQEopzO-wMumFIw4fGHkuPZkuCLvgcz0X8L8yKWUkQ-UiS7-KHBCjGOby7yjR6m7m9dqoQvlHejBncSWGAYv2B0KZg2V0aMfGhJsZKtCAgmlkgnY0gmlwhA_ML-SJc2VjcDI1NmsxoQMEi6SKsKUVTnxDWKc1Go6ZG8o5nEapNJBaWAd_YC26R4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"CDXRYZFKH6OUWTA2IJ2MYIMYPM.all.holesky.ethdisco.net" : "enr:-KO4QExy2s5Uh2eTEJtsODBEOcPZZN-CYY0WEr_8nA_uK43QV3iqmFzfXJ-29zootm5F-E-DrjzTObp2tw_klTXPRu-GAY2ZtbV8g2V0aMfGhJsZKtCAgmlkgnY0gmlwhBJ2VQmJc2VjcDI1NmsxoQJuk1X-4JUVQT-mguV7dSGSUC-RoxdnTyOmD-VrfRFQyYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"CTDMY3ALJA7FRKVS4MCTAJRRH4.all.holesky.ethdisco.net" : "enrtree-branch:UIGSNQMHERWJIZCP2OLUXSZ3KM,I56MJYJBMXTZZEPBQR6HWNAH7A,IZNO6JEFTYHQWWF3LJ2M66P7II",
"GSONRYZILMGUJEN3PYQXYD6GYQ.all.holesky.ethdisco.net" : "enrtree-branch:V4A7I7I2SFVUDFFH7AGPFXACBA,67FFSS5IYEYRGUYFEWGIJDIMII,NXU6DU7HEM2L4DN6G2VHHFT72I,3T4CNEKYCJP2PJ3EQVJSFKHH44,KYTZHQX2PSOGY6RQTCVTUKTS7E,GUBLA7OPRPBMYMQ76EXDMB7BHY,HGROZTZI7YPDW5F37QVUPQEBG4,AB27AIPX5Q5J6UAXQ6C2QFTYZA,7UIVPNORDS3RPPTLSNGYAN",
"IPU725BUGL4HIOTSOIH3KZG5ZA.all.holesky.ethdisco.net" : "enr:-KO4QLf3R97a5p3pRgY1AeN1DisWZUTnHbIMt2aPh0zvq8QIZguS9EiujNa71YXqJgxw7ztz3YmwJMWuPuhfWqSkxqWGAYrayxVYg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKIOJc2VjcDI1NmsxoQIHu5vOuFNLO8814yybwdh03yUQzfPTTyeK_X44Gz5jx4RzbmFwwIN0Y3CCdn6DdWRwgnZ-",
"V5LTO36DCXU2JCCSSJ76E35OFU.all.holesky.ethdisco.net" : "enr:-KO4QMLA20nV5gEPR5ugJWkqHSkU89wLsjSVlpdbjuXGueHRY_VJ7gfCBuPbnyxN3Y0rYJThJU7b7IAA1yBfcRx6Uo-GAYrbDOlPg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKGuJc2VjcDI1NmsxoQNZKgK-Lr-g81vUAH_bV24PDyOwNQaNPgAOBEA3qV3JxYRzbmFwwIN0Y3CCdnSDdWRwgnZ0",
"2SA643JTHJ75ZVPFHAKLXI2WXQ.all.holesky.ethdisco.net" : "enr:-KO4QEPFY3aDBt5F3VQU5qcGZED8Xr-J9v1Fi4CcGpx7q_fydrJM8h0RqAXSdcJbr03b7ysPaOR4mNUTRou4kjPsu7yGAY4QKOX4g2V0aMfGhJsZKtCAgmlkgnY0gmlwhCO4xmuJc2VjcDI1NmsxoQKaOskToHERfimE_ei_VPRsh3fimBlYooVb80PVt0k-aIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"V52J3WKNVGG56JFRKYDLSZREJY.all.holesky.ethdisco.net" : "enr:-KO4QMC1Y7Fl44QyBr8KDFTyd1IQ_h0w67Igms87RDJeZJuXbc74aIVNkOeK0t3_zEJAT2spttSVu8tKDeV-cdiPE42GAY3KEqdCg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFsGDWJc2VjcDI1NmsxoQN24zvy5pQRtLA5_iKLgnTwXjI3T1KldFTOyp7Dk6dCRoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"3PYSNKXWWSRBWV377EFNYINHSA.all.holesky.ethdisco.net" : "enr:-KO4QODvXk9lfhLNmdyttodPJQOIYQM36Rl9OZsFBZ4vMjaJcoJUXxTQCMqGLNTWmFh-1oEy0XoYKgPy4tywLZZcB8CGAYsolPILg2V0aMfGhJsZKtCAgmlkgnY0gmlwhIe1OV6Jc2VjcDI1NmsxoQPj1f0OOW-g_vRGcihewV9-kcsQZdwXBHy4r8vwinurzoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"ZV5CYUMJPF4XSTFHOG65622KEE.all.holesky.ethdisco.net" : "enr:-KO4QDd2Ia2HHxIk_v5zyXOs6LIkR7a3wVUmipLZnQTJJMZtTNVY5PnRonxr_GAQ3nqG5R8f-eiyoYprxN1XjsfRlnyGAY2jAkZPg2V0aMfGhJsZKtCAgmlkgnY0gmlwhJ7cbM2Jc2VjcDI1NmsxoQLx82Y08wCtO72z_znVjHxPl3hBz1YBlPokUpsLFKRHJ4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"DQYYUE7KVEXGEYBH4V2QHI6P7M.all.holesky.ethdisco.net" : "enr:-KO4QBhk9j1k2tfVrwV4yVRS9jn2zwS9KKADlhqvMsZsqdXFZiz0s-vCAnqtkDfszY9peGDBiWulLdNe6KFuvqgxgy6GAYtHL5rAg2V0aMfGhJsZKtCAgmlkgnY0gmlwhDmBAQqJc2VjcDI1NmsxoQLXrL6FtBz248tpC2_DxJs2HsvtHdSoNsZIBwth_MMu94RzbmFwwIN0Y3CCdmqDdWRwgnZq",
"MZGQKIQKIOFO3W2GJURJUXVYMQ.all.holesky.ethdisco.net" : "enr:-KO4QI7ESDG0rx7lEU_FFFFkALgNr9roSDrKDOzmEdxTHwsxKj5ozskjEjwFPrShowZbzTTYCp-NRhhw3uhcmKCYy0GGAY1lomwig2V0aMfGhJsZKtCAgmlkgnY0gmlwhCUbQPGJc2VjcDI1NmsxoQMV_yyKYF25xvgoJARC1swzb0S0llZJEmMLRHoINSR81YRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"MJB42632KKOPGJ2KNY3GPKP66I.all.holesky.ethdisco.net" : "enr:-KO4QEBK_p981nqXnme4a2sYvHW2FOz_OwLwstllVRvE8w-dVnDBrHxVZg7XSQwseErDQpfUgYMnBPMzpsx_bDUui5eGAY09OSoRg2V0aMfGhJsZKtCAgmlkgnY0gmlwhLzWgw-Jc2VjcDI1NmsxoQM2fE-8xAAaBUMu7dwviv_y8osp0rYMaKtaGBnbUba_J4RzbmFwwIN0Y3CCdl2DdWRwgnZd",
"QY2XP6FTX7QKFNP6TNZ2MOHZKQ.all.holesky.ethdisco.net" : "enr:-KO4QFsIg8OnqK5HJhhCaEuUTJ7RkLcTAXnUOOcaK63Ra2Kya2HsYSRgM1jRyr9utt3ib83q4fsU_IOg7kuR1jf2u2SGAYray1Tfg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKI2Jc2VjcDI1NmsxoQJiD81UDxWESi3paPaopKxnoF5vf0GAxlroaPr6bSIykIRzbmFwwIN0Y3CCdn6DdWRwgnZ-",
"3A6T36TYZJBFLOHMJOJGL5SPXA.all.holesky.ethdisco.net" : "enr:-KO4QEVn1fwLQuOHSsRVS6bBIv7Nhr39Ze03wqUhwdgEn0IUeEB9TZAx6d9E-C0tBkQdEnbwld1YxEsB9gRY0Od13nGGAYzxMuKBg2V0aMfGhJsZKtCAgmlkgnY0gmlwhAW9up-Jc2VjcDI1NmsxoQKNkgm76tKKtumISB2-1oGEHv_mZtMVh7JGrSmvlX5qqYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"WRRS22MCKTZ4YS3ZLPUJEUAHUI.all.holesky.ethdisco.net" : "enr:-KO4QG6_DN_yg3WbcE7jc_tGXK9dLJPplfz0cbzCRwT8v5o6BBk_e_ER73cttVRyYkkvBDyw-akwTHYxjCe2Jp7OWuiGAYrbEiVVg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEV49OJc2VjcDI1NmsxoQKYHwvu1PazCFKpleCUgYvxialsHK6_iLwC_R2i3J70fYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"BAY6SKB2RTQCM7RTSHAFQ6TTBQ.all.holesky.ethdisco.net" : "enr:-KO4QIo--Wic2Dyk2II8x2uTPNOFY3CRZ8wmABP5qOhmZv4LXvAfbdISPp7HDrJwrejs032LHH5rhUY5l8bj9-M-_u2GAY3GDGQWg2V0aMfGhJsZKtCAgmlkgnY0gmlwhI6E2M-Jc2VjcDI1NmsxoQM46NJn0fjSRwgGkB0G1n7QQ9VyBDwiPXPX_xgM25tDOIRzbmFwwIN0Y3CCfEyDdWRwgnxM",
"5AOSND63QKPVT6EWNMALKAFC4I.all.holesky.ethdisco.net" : "enr:-KO4QKn6DlOQ0ybfLAfyPlyPssuWtP0Zu5FEUEgr3015XppiCP4yr9SeMgnpN90AVHJA5C61F3677GiO0N-JIXGfN2uGAYra2k7ag2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKJOJc2VjcDI1NmsxoQKgKJovbRS4hL3ugMVrevOCUGDS-ixgByq_tbh4T9oihIRzbmFwwIN0Y3CCdmCDdWRwgnZg",
"PL27G47LAASBPPAMXUDIJ3OCRQ.all.holesky.ethdisco.net" : "enrtree-branch:3JSDZJCCWKIEBGPH5EI5VZF4RQ,XTJ3PFTPBB3ATDDWAA6W7RRKMY,FTTHOFPRPIHMJNZA3VZUAW5TIM,DAVKEW6RQ5YTYHKCJ2A2J5GVCI,TGKURK5IPVCPXS4QPS2KZJCP3Q,RWRV55FT3DKDVGZK7AEU2DR77Y,CTDMY3ALJA7FRKVS4MCTAJRRH4",
"HQ5S2EHH762PSKVTHWNQ6346NI.all.holesky.ethdisco.net" : "enr:-KO4QK883RN_CIppuOvhzi0O3Qg-XzHUZsdKIEqLdvZMLN5BCV5QTqqxRzJjtXa7NZjBi22UQJUUlsrQ_3g3Uavv92aGAY1BRdWag2V0aMfGhJsZKtCAgmlkgnY0gmlwhFJkOnSJc2VjcDI1NmsxoQNAyMtRruAQcNn10ZReZiiZtPnk35o_oJEhabuBKi1gLoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"XPTWTOASNO4WEBXH74WCQ5EYTQ.all.holesky.ethdisco.net" : "enr:-KO4QEk3I50HU8omZLCYgSwXs1caXS-q06lC_GnJT30gbzlIEDWBldaoC83sg9aWTQ9DdkQStosOuYEb13gxTwp4tSKGAY5DOiDfg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEVX7SJc2VjcDI1NmsxoQPCijWe7yQtKeDfuj6WXiJZ6CZ88FGXKj6DGSWdq0E0qYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"HOXANHAMPVCFA2QR54VEMWCWPE.all.holesky.ethdisco.net" : "enr:-KO4QELWztCXUz9MCxD8zbh65aa6LwP3wubhdXJVdlyrl7QzFJkUXosDXndbQqtAT2qD5nXAFpBnz5MeUSZf_GfjkI-GAY16vTc2g2V0aMfGhJsZKtCAgmlkgnY0gmlwhCZhyOSJc2VjcDI1NmsxoQMGwfOevbfZOGp3bL_2AH1SBdx3zn2_l56uG_4CWDf66YRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"BBNHIHYBPXVQTDZQ4JXK4QYDMM.all.holesky.ethdisco.net" : "enr:-KO4QCMGWCueZIoaBJyNAqOo_1wJ-QmbQ-qlTQx_rfz0gzKjF8VqFc99Q3ZMcfpK2qdLtWZHoRoiCkstH9_rWAU49DaGAYrbEkgdg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEVzHeJc2VjcDI1NmsxoQIy6rguhP2KemBUH2HJyW-GPo8prt6Ay9gjX3GugxLuGIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"LHFTDS37XFOEXQKTWJGKWRLFWM.all.holesky.ethdisco.net" : "enr:-KO4QB0RN-qZSymbzZA-AlKT8213I60xJjVVwN-AWwjW9W9aLk-2aq8jW_jbgKnJY3c_wU26oBzYFBpOVcYKIV_nd16GAYwlWhtPg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMPJ9bKJc2VjcDI1NmsxoQKlc1OTSAanfHEsPtrKjZvkgQUuHFZon2GtnCRHlDRV7oRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"67FFSS5IYEYRGUYFEWGIJDIMII.all.holesky.ethdisco.net" : "enr:-KO4QHY48cNs9Lauv6W3XCDcuBvhJbmL-0MAASBah_V-jG6iODMGVIZ9xv4K3MHSqQQht3Fc39CjU5zoYROe6D8uOq2GAY2UXNmdg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMb0yKiJc2VjcDI1NmsxoQNjc7k9S4kjTIwltOf7EH2ZmK3vZiEBpxxNqondpU3qV4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"MOBGCDYGQA4CNBQZDPN52JC5E4.all.holesky.ethdisco.net" : "enr:-KO4QDuar8SHEX1o_kUIKB0IajdUqcRb3ZkZdr5_MIxT1BUkLVk8AelLTv-_ioVGRAwNIJTzT4m8nyzV1VbsjjpeZ0yGAYrXy6Pag2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKEeJc2VjcDI1NmsxoQLy0OM9Ze8JjVHZhLQg7XejbI5iedlSDZNRmtQdTy_X8oRzbmFwwIN0Y3CCdn6DdWRwgnZ-",
"ERETBCE2BAPJRSWIEUQV3QPP7M.all.holesky.ethdisco.net" : "enr:-KO4QJY9GUtSCCXanmC9p7LYAfqk7jOUF-zxl7zd2ce5lD_aSNk9KfzBbh-Hii3qMzv-2x4a0Xal7RQMzQv9BqCkUMeGAYw1cGzgg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEVR7SJc2VjcDI1NmsxoQPQjk4qnGis3WKuSQDD20H-9n9xKFTbnLsB1DhAteyYk4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"5AHKPN5NN5IHEH365X6OYZDHDE.all.holesky.ethdisco.net" : "enr:-KO4QM-W42wfRYcTwaegJy3SfjlCkZsQU8LhGcluTdxtFjqXOwsEE5kUewkPMe7qIQpwb2MAo5xTeRpnijbdYTaG37-GAYtxMJdhg2V0aMfGhJsZKtCAgmlkgnY0gmlwhKh3IxmJc2VjcDI1NmsxoQNDrMpeyQI2aO_yoNP41RAf_BHtckptN1l2QIWxgDKOC4RzbmFwwIN0Y3CCdl-DdWRwgnZi",
"FEORTRPBZJSNRB3XCLK7KCMACQ.all.holesky.ethdisco.net" : "enr:-KO4QDO4oH5cjhncisJSk1SyGmwJ5VFjNetXw4OSSqDKc0NMDGXYYl6wUjbhcvzADYDpE1Br3lRG-1xV6iA1OAyXSAqGAY1W3CQQg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEVfkOJc2VjcDI1NmsxoQKkjEwDL0wuG0AH3RWw1wRrYHdPa8OOL1Ko4DYcZeQjQoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"AB27AIPX5Q5J6UAXQ6C2QFTYZA.all.holesky.ethdisco.net" : "enr:-KO4QNZrQF9iyry46odExoUh3kkHjWWHS0iEEjc8n2R4Orq8Rov4Ar3ozsyWt9Z-dmBtpTfmAo3UYspLFcvtHNzwjPmGAYwWbywRg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFtbbyJc2VjcDI1NmsxoQLD-zctKWrecek1fu3o2IKHUY9rkd0BYeQHrzpqElk_1oRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"NHLNHXPRKOALUKH4MZ7CIVDONU.all.holesky.ethdisco.net" : "enr:-Je4QBgb-GKZRRjU2TAE_9KyBHC8ImLYDPG8KShRKx8gbKF3F-gpoHiyeprH42weMNSg6i8GeG-n0SxwWJRe6zyXj88og2V0aMfGhJsZKtCAgmlkgnY0gmlwhCV4sKqJc2VjcDI1NmsxoQNuVgNhx7pgtJotEBq56F9YFom-6szZ97gCkea35_lDkoN0Y3CCdl-DdWRwgnZf",
"XPX2T64BHDYLVGT5UC4GVIQIVY.all.holesky.ethdisco.net" : "enr:-KO4QMlMC7rvmM9tvHXvwo2Vj5x5FePlyvH_6lBSCgZHN8HcWqtppk1peQlK9Ge79ma4j3gx_zX0hgJvprIy1ehjXXCGAY3Mer4jg2V0aMfGhJsZKtCAgmlkgnY0gmlwhIe1FCCJc2VjcDI1NmsxoQL3IIF3eENfSsr5tih2ebcm3dWL-otLdZYqUvfJ-5_9U4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"NZCRGKUE7ZXSIS4MIQ4WUS76BA.all.holesky.ethdisco.net" : "enr:-Je4QDqwqlm7UBywmVlR5UkkrQrg5B3UpkFexP7ucg4RAsdlMiJ4J1S5jT8jp6je6igMZ3OOnggdpd6l7QtQeNBICycDg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFs6LuJc2VjcDI1NmsxoQMgMreR2XspGJphg3fToxGKcMwWPE3e0gyxcqiQNLQXNIN0Y3CCdTuDdWRwgnU7",
"OF3GRGFZUAJTH3T2R2EXSYAZOU.all.holesky.ethdisco.net" : "enr:-KO4QDPbolwwOMTwV02zqyVgJFxko-HI8UWOT_K9sDcWgHG9fRDWkbSbTw_l3bcIA2-q3054lrJ9cbkHABlMZbQskUKGAY1y5VBWg2V0aMfGhJsZKtCAgmlkgnY0gmlwhKI3BbaJc2VjcDI1NmsxoQP9bM-ylBjwT9ujG09A__0to5F3Qw4QSvY4_vgyQ0A7j4RzbmFwwIN0Y3CCVTyDdWRwglU8",
"7QEPEVJFOCL5A63GZLZ4IZNMOI.all.holesky.ethdisco.net" : "enr:-KO4QNGh--C3ckyCMlUP4u86lt292CLHOQ3YIwk5Jqzz5x3OID_7BfRn5qN-ZBoBY80XOhFyHuo4eKDCIowQgYMvVY6GAY5S1aoSg2V0aMfGhJsZKtCAgmlkgnY0gmlwhFhjBqqJc2VjcDI1NmsxoQKZpCZSQgSun9pLzGrdfLboslJbD-JxtTsR4JF860eZYYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"Z5DOK3IB5X3IBOVVIK5MQCDYNA.all.holesky.ethdisco.net" : "enr:-KO4QJjEv14HV0BgdPwIa0iO-xrrwoi9k9LJRw3utIUVib5dF6s_b3y8z0NP2VyUGsJDFOwuVQq6OC6hb5_hFUqnf1yGAY12fbSog2V0aMfGhJsZKtCAgmlkgnY0gmlwhCU8-mOJc2VjcDI1NmsxoQIT2maaVlQBw4oD4loM1Q7gygn42pTQn1Sydo74LtZAZYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"CHFGCI2RQS3XFN2MKFP6G2ZM4U.all.holesky.ethdisco.net" : "enrtree-branch:XIOWDM2BTMANILG2M4LCMO7W3M,4LPHENNNENCUT7P3MWTB5IFRTY,5NQ4MKTYBPBMTGUT5IKUAXEGBE,CVK4DBIVPD2JPJOPQJQX77ZAWU,WLME2JINTIQACAMD5WZUQ4PRJI,DQYYUE7KVEXGEYBH4V2QHI6P7M,45V6KFI4NL43JFTJHL77KP6MQA,QDMF4HIR2UEXRJVOX4UJPJTPKA,BMVEIV6PNSN6RK5XEU4R4K",
"3EMD3PTQYRFVNFBQ73BYFL4AEQ.all.holesky.ethdisco.net" : "enr:-Je4QFiHIu4B3YbbG75mlr58JRaqpGUQij1n1pN3zaHoWbGrShq8aYqipbkPLaINPTX02YBfchyACgLHgH9cMarIUapQg2V0aMfGhJsZKtCAgmlkgnY0gmlwhDNRao6Jc2VjcDI1NmsxoQJHJlekJODLxBDPwpVLBtTMizLq5o9JtXkD8MQpqAIUYoN0Y3CC2fmDdWRwgtn5",
"IZNO6JEFTYHQWWF3LJ2M66P7II.all.holesky.ethdisco.net" : "enr:-KO4QLmjLLs27xB9n9N2TNC32EhbxjdiR32rroemDty1dQJsBSZQOawi3-HGqWDRifXce49pv2WewtuzF5j54iiUfbaGAYrbEiX_g2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEV5riJc2VjcDI1NmsxoQO4pbsV79dSOLkBaOX5vcv6Amy9rH2xg1wW3OsKxVA7PoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"OA4ZFFGZNFUC3LIYCUDJOPVR6U.all.holesky.ethdisco.net" : "enr:-KO4QFuTcCcZkhGlEZ9YjHVURcwzRDKDu_o8RBID9eF5lmG9fb9ZHBHnz0E-tXO4TcPfwxEtefRAaAVPj9Ycdi3OeXuGAYraylBgg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKFKJc2VjcDI1NmsxoQJRTkcLLW6c3J6kd1b1siWyumgXACvQK7ViQjsgMFx0c4RzbmFwwIN0Y3CCdn6DdWRwgnZ-",
"ZCAW4EMBOL5EH4MSMW7FY6JMEE.all.holesky.ethdisco.net" : "enr:-KO4QHc4CzkY6KkiTGR6hTovOeJ8VJuUtjkq2-UMXwUijiKvAZOFVS_Nn_GHMVZ8Ppsu31rSLYAEFRu0tMJ-LIDdSjOGAY2sYroCg2V0aMfGhJsZKtCAgmlkgnY0gmlwhIrJyaeJc2VjcDI1NmsxoQN7HHah7WnGluJb0mqKelBJ8rpvKlBb9t7848BwJ4qau4RzbmFwwIN0Y3CCIB-DdWRwgiAf",
"CVK4DBIVPD2JPJOPQJQX77ZAWU.all.holesky.ethdisco.net" : "enr:-KO4QH3RGFoO4KTuq8gxJC_mjnPBqNaDvoX_xsuYbDOqvJA4CPJs6nifZuqE2sTB3O-kHvEI0dbFJ87FoWkKhgmXYuOGAYrbEhVBg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEEV7HyJc2VjcDI1NmsxoQM1AMXary_Hiw-yesOw24q7GwMH_DmNHHitFPU46foJ24RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"O4SSPZG3DFA7PHZ5L42DVOPWDM.all.holesky.ethdisco.net" : "enr:-KO4QMrNy68H-dYJbPM7snJ1UrV-TPXA012sdna6WktkfksMED9fE3g-kzp7gBgOFEdFi1KIiEv5MXXAoOt1KOMVc1eGAYw66STEg2V0aMfGhJsZKtCAgmlkgnY0gmlwhF_ZxOCJc2VjcDI1NmsxoQJUhK-aKtl-Gc3ZnfF2k7oLFXmpGCHEi1_fBxAEfc1P54RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"TGKURK5IPVCPXS4QPS2KZJCP3Q.all.holesky.ethdisco.net" : "enrtree-branch:E4GGBOZ5L2UFPAKJIPLNBA2MKA,OF3GRGFZUAJTH3T2R2EXSYAZOU,D4GT5QUSIVDVPURCUOLE4WITZI,O6EQKGUJOO2BQGMADYZHOSZXLA,WRRS22MCKTZ4YS3ZLPUJEUAHUI,3A6T36TYZJBFLOHMJOJGL5SPXA,XPTWTOASNO4WEBXH74WCQ5EYTQ,2XMYGJOCRIHHMHAFVRH3OES5QY,FYJOHHN6LBARRNNLI6SK42",
"UIGSNQMHERWJIZCP2OLUXSZ3KM.all.holesky.ethdisco.net" : "enr:-KO4QEvn7KsE7UDJ5F3HwtSmGNSy0JSYDLfp9uJRtq5WfSsuOdkfq3V7GVSDo7IfCTXYAtQfRwT9q88II0sajv-m_CKGAYulV6JLg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMwQ9ISJc2VjcDI1NmsxoQNVsiSzpuxjNDgNToi4u48-5kCgvsjuoUsvlG9VrnEjhoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"3T4CNEKYCJP2PJ3EQVJSFKHH44.all.holesky.ethdisco.net" : "enr:-KO4QKNzTVC-wa7whLxFFss7NzEFDdVuAjgEebNGGK9xTWKjHTjqmMNfY4_h90iqlSYM_LoGyH2FFXhbHT05zKyALMSGAYuqLQ8Sg2V0aMfGhJsZKtCAgmlkgnY0gmlwhA360gaJc2VjcDI1NmsxoQPSUi99W8lkrquAhLsmslwXU4-dxxVfied-w_yqYEXpU4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"5ZWIGTGKMFAFF4LAODABANSBOQ.all.holesky.ethdisco.net" : "enr:-KO4QFP5g9U1YtBLeJi56xoBll8E4eUJeAAYMoEg1l0xPXRybh0OYNZfteBjkHMwbucccNdGB0amzNR4XtxmAfjoFWKGAYrbEjd8g2V0aMfGhJsZKtCAgmlkgnY0gmlwhF_YY_eJc2VjcDI1NmsxoQJb-JLk8MFQQPMz3QI2ya4FtZdHqZ1Nm0xD4MtOYc9ifYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"7J7D5R4SH6LVDCFZ5ELOP46IP4.all.holesky.ethdisco.net" : "enr:-KO4QArnrdy9hfW8KHAwokQf-x0LbdoDQ4WoK0H2dXqhP4D-TflLF_Ywf2lh4ybXao8jjNSxC53MzmFPUI9A8k06XDqGAY2KrTRtg2V0aMfGhJsZKtCAgmlkgnY0gmlwhLWkyzGJc2VjcDI1NmsxoQKSAHhJOTtkBlasBECB1mrukuL6jqOEtxWkpH-PfDp8h4RzbmFwwIN0Y3CCequDdWRwgnqr",
"5NHWEAJHKSAO5DIIYAZ7MQJVSE.all.holesky.ethdisco.net" : "enr:-KO4QMiOfa0JolrO470F-mRKslpq85BBGRz_JjoAOOnLuUdxBObP7579igrB-YGSdaUtb3Ih9QXBHRAl0PWYmE5NW32GAYtIvzSsg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMF6nk2Jc2VjcDI1NmsxoQI57ZbbGPYfl0DtrsmjeKDosRG14eV2qLI8AE6FsP2oy4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"OGKXEQEVGNJ574WCU7KFNTXNQI.all.holesky.ethdisco.net" : "enr:-KO4QH_WNLhz4qmL-EOMCQFfhE3QOOvyG5uyX0RR77gYrsdcJnUxS5OBQWyMqhRQLO2P9htXeBxJ4kR9ez13qQRiBp6GAY3m5RB8g2V0aMfGhJsZKtCAgmlkgnY0gmlwhIj0RveJc2VjcDI1NmsxoQJFRRKo2aP4qXB8sZnA6_rJqKgo8FZGT2i3ipEzwXCayYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"D4GT5QUSIVDVPURCUOLE4WITZI.all.holesky.ethdisco.net" : "enr:-J24QAgjCyFyNsDJHJwpo6uy0l7yqJD5r6WF58M1T7_PqsyKC0t6y1iTsQX4Crz6iv5Ijc8LqPZIC6vhA_F5GqkdkgSGAYtfbIIbg2V0aMfGhJsZKtCAgmlkgnY0gmlwhCvIu-OJc2VjcDI1NmsxoQKq0Xs_18RfgehL1rawMx2B3VMaH9d-YNCY5kmHuaCnqYN0Y3CCdl-DdWRwgnZf",
"OOQAXEUVKR5EJ722HM2UANT3R4.all.holesky.ethdisco.net" : "enr:-KO4QBU5Jlmco2sEPOXNEctSfBJfxdgANrWXVLS-BVDQYrU3N_oaeceDQIH82zZ6BSi_F-FQpSKhEVYtPDxI1BA4OW-GAY5bg91Qg2V0aMfGhJsZKtCAgmlkgnY0gmlwhIrJH32Jc2VjcDI1NmsxoQMXPocPKx0wRocEEzASwUfzDLKd1f-Jifv2pyIu2M8vaIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"ZJOONJUDYAQ53XOJU4KG5W4ATM.all.holesky.ethdisco.net" : "enr:-KO4QBGQGqvIoSv3C7d-bCiMst5GGEvIQaQ5pAHs5On0MeNfcVjcZgd72l2UUoqf13HoKHUnVG37cmHw-11H4Y59F6iGAYrbEhVng2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFtO36Jc2VjcDI1NmsxoQJ3QRuoMLUn6djLfYCFTKSV8kUWRQTOethOOhLDW6ezdYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"INA5QYTPU6WIOBT7IC7U3WGDIY.all.holesky.ethdisco.net" : "enr:-KO4QDpTzc6voIJiaQO1T8cbJvPej2OvifSuuDkQVrAcC3imJXT9J9zt5DelQaMAeu_uk11kFFgYJHBAE88J2cvxYGqGAY1olkLcg2V0aMfGhJsZKtCAgmlkgnY0gmlwhLnRsD6Jc2VjcDI1NmsxoQKfdroTKrg5QqtuIyfF7LivDR4GjeBnA8xQrAg5ma4pzYRzbmFwwIN0Y3CCy_KDdWRwgsvy",
"HTMBFBAIZBACDG3EZZ4JVANCAY.all.holesky.ethdisco.net" : "enr:-KO4QHHstlNCTI8xMnZ5mcIRvd8lfo9YoVeyv4pqvhtkxGI5PhYqDDqu1W5XqHWCMcTt8fpVgu9KojZHcyi7fhQhPdyGAY2_zUcvg2V0aMfGhJsZKtCAgmlkgnY0gmlwhL5czy-Jc2VjcDI1NmsxoQJpsTl5abUECWCfSno785tWBNPiVCJ7GG4eQTf2Cf1qBIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"KEC4722NVHE3KX3IYNZC34C7NY.all.holesky.ethdisco.net" : "enr:-KO4QMjtE87kVQpcuRPXV31w7fsvyit4Fw995wyZ_h6uK8atWGOhXpsi3xXsBQLgJFBWmNHgTw-V4JvZNFZkFHLU_8mGAY5hRoXMg2V0aMfGhJsZKtCAgmlkgnY0gmlwhC1Mp4CJc2VjcDI1NmsxoQPETNdXN5Cshfr8-rIIMev6E_MGqD-jzqcXW2yfzT3C54RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"LNO6EN3Y2LJA5YOJMEMODAJFWM.all.holesky.ethdisco.net" : "enr:-KO4QMIX_--Ar7-XnFcUVlJpBBUfZdfNOftELgvHhCGvbLGTMT_Yqs2M15BCG0qoHsYkVBP5b4wd2wqNyNIfFaTtQ7qGAYrXxOTEg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKEeJc2VjcDI1NmsxoQKG05NXftvaats582r82_zpOi2C54WjSeYwxKfSNeSJaoRzbmFwwIN0Y3CCdmqDdWRwgnZq",
"R6EQA5KQEQM77JJXB4BHHTDF6Y.all.holesky.ethdisco.net" : "enrtree-branch:5AOSND63QKPVT6EWNMALKAFC4I,WAU46WR54TX2VBK4QM7NFTA73Y,BBNHIHYBPXVQTDZQ4JXK4QYDMM,OA4ZFFGZNFUC3LIYCUDJOPVR6U,KMY5HFBGCUDWN2ZXE6QNUQHGYY,OGKXEQEVGNJ574WCU7KFNTXNQI,CDXRYZFKH6OUWTA2IJ2MYIMYPM,IPU725BUGL4HIOTSOIH3KZG5ZA,G4MENE2MCGXFPYEBF7SQEU",
"45V6KFI4NL43JFTJHL77KP6MQA.all.holesky.ethdisco.net" : "enr:-Je4QEedukMKHefwNNtNam8wvx8_0GMFHLC9nA-TWWPMTZ9FD4C1CrjqFxLNruZPGL4T1ApKPtes_ApKbwwOEc4dNxIDg2V0aMfGhJsZKtCAgmlkgnY0gmlwhIrJ_vOJc2VjcDI1NmsxoQIKnge3StO67CequDTr_QwD_V_qhS7TnGO-tgE472AFroN0Y3CCdl-DdWRwgnZf",
"5RYAVZN3SSZZQ2KISUO7XDBPE4.all.holesky.ethdisco.net" : "enr:-KO4QGMFa1U36mwISUTSlJN8q2EKJSw4WHa6PAMrHkhL9-naVdAWmGuwfpel1E5_NBSnPWRsG8FzmmUH61iq8fodXcKGAYrbEkT3g2V0aMfGhJsZKtCAgmlkgnY0gmlwhEFtb_iJc2VjcDI1NmsxoQJ62Txw9OKK7DTE_CZRC6tX_223dks-FcinnSL1uXUbTYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"KMY5HFBGCUDWN2ZXE6QNUQHGYY.all.holesky.ethdisco.net" : "enr:-KO4QLBXgetH_VHZgzalnkfF-iibF4km3OCLzY1TK27U-yG0ZA7Z4C8DL-da5x65khpgvOBa8c2CXffZBdTtD2BZQCqGAYrg47Ahg2V0aMfGhJsZKtCAgmlkgnY0gmlwhJT7RHyJc2VjcDI1NmsxoQKeRdcysv01DZ_Mzk9t1jcWW-YTjP1fPIrHVpw9hc_Q_oRzbmFwwIN0Y3CCdn-DdWRwgnZ_",
"YUZZDZD57PTNEIB5QL3FEXK2ZA.all.holesky.ethdisco.net" : "enr:-KO4QKuFVKbKbtK1djMy6q3TLuAWYDAjoC20cYPdhcOk7PhUdA67TXr9vTGfZZ9AO7ivmQZyRW4w9TFk-29_xP72bkKGAY2EAMvIg2V0aMfGhJsZKtCAgmlkgnY0gmlwhF_ZKGKJc2VjcDI1NmsxoQN4FBojS_W4gviNwaTDsIXiBEGXaQMtUzxQHwKWDmMR74RzbmFwwIN0Y3CCequDdWRwgnqr",
"2XMYGJOCRIHHMHAFVRH3OES5QY.all.holesky.ethdisco.net" : "enr:-KO4QAMrUvclx-EHZrO6x4K6fWTwaKh2zS5oWJzVU6oZuzZ2GG5xdQky8tIrpUUSKGB6XGvr8TvmiS7Dimvxx9Hb1OOGAYy06xQvg2V0aMfGhP1PAWuAgmlkgnY0gmlwhEFsSLGJc2VjcDI1NmsxoQJiz0q4ywyhHvmXBrWbFykzflq_J6cmLFIbepO9PLBwhoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"5NQ4MKTYBPBMTGUT5IKUAXEGBE.all.holesky.ethdisco.net" : "enr:-Ke4QLrsveUYt2tacm5EZETFc1F3EyvNYfRRkRhljyeLMIzccRlPI1kKmBWuELQs5iAIRZgv92P3Fxx_zJ3xyUbN3muGAY0x7p4Ng2V0aMvKhMYaYJiEZRbqwIJpZIJ2NIJpcIRU94PXiXNlY3AyNTZrMaEDBxF5W6guB9qZoR-c_zuDUE1UHyaH0FMjJKHC9Jq4ji6Ec25hcMCDdGNwgnZfg3VkcIJ2Xw",
"6WAXAZL7FYKEJBZOPWIHCI4F6E.all.holesky.ethdisco.net" : "enr:-KO4QCagHy1Q7UED3AJfgVHxFKOn_DRS8UTUg0okE7fW8VTcDCW7wlzWufdWCtdUwHpnaf3EZpRn2YRuo9u4LS-Oh3qGAYrayS-wg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKI2Jc2VjcDI1NmsxoQJYC74L06jkGeWkmh1aSjeBYVvzDCWEvtbd111Vu8WiG4RzbmFwwIN0Y3CCdmqDdWRwgnZq",
"ZRVWX74UFHUWOPCC7LOPLYGIZA.all.holesky.ethdisco.net" : "enr:-KO4QBfm8BEr7OIRv6UT1t5mhE2szefodQuIIBKMFnyvbky_K-wff-l6pqLwfLyUblqFNxqM6Xtcnari4ItzO9236DuGAY08Swa9g2V0aMfGhJsZKtCAgmlkgnY0gmlwhC7r5ZiJc2VjcDI1NmsxoQIH5RAmLOvYi50mYcMnglYJlz-EC0E2YsetPVUzvbs5BYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"2WHIXLN5QX3JEK3SSH7MXKTJMU.all.holesky.ethdisco.net" : "enr:-KO4QF2wWLadNPZ_qwOIZkLxPEJjj3N23892JKdtSOeeC-X-V0bZsjQ3QqpidJviGT0WrBakFANcbSa9IqFSOslJLV6GAYrayMEMg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKIGJc2VjcDI1NmsxoQKQ_STV3aCaM5we4KxMPMpdG63xPZieKw8A36OR2tuouIRzbmFwwIN0Y3CCdmqDdWRwgnZq",
"I2Q5U7BDYSM7O3O2IKIRL3KTZY.all.holesky.ethdisco.net" : "enr:-KO4QIvQh_txoF6gwXTRsMzzvHPdwQrkCHQYt4egwTWqUNQiMQh1yJ71U8UXs8b9pP9R1mUDTaiQ7-e0eB2Zbvouyb-GAYvSz8f0g2V0aMfGhP1PAWuAgmlkgnY0gmlwhJVmk6SJc2VjcDI1NmsxoQIuT88b-jLJBI40hV4VY3YeJnmYT65v2QPjyzuNG5dyF4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"NQHD4WNRRGE5JHHX6IDKGIF7KU.all.holesky.ethdisco.net" : "enr:-KO4QMTq7zJT6fLS-LY7n3hWmcwJqxRJraGJcM60U9Cej2lRRE-u_KhOd9r712IBEOI39uiVQEXGdBZvc-dJIESUcOOGAY2dUsseg2V0aMfGhJsZKtCAgmlkgnY0gmlwhFD5eBSJc2VjcDI1NmsxoQO-UyE5ad7DaM-f7fZi4QEwjfxIR6HS-l-lTSlv4ZLdB4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"PX6K4ZETH5UX56IKHKL5TNOOMU.all.holesky.ethdisco.net" : "enr:-KO4QJqslvj0RYuX2CI61yCK7VuyY9Ik3c5EHtpkKehriXyqchhCDMx4sRuewcCAiWO_frsUevV9GXMPnsO2nzCrgFGGAY25poLHg2V0aMfGhJsZKtCAgmlkgnY0gmlwhKfrsZeJc2VjcDI1NmsxoQOe9l9K5UW0RAuQTH1q6CwG_UzbJyKLGSt4lUt5_tVmyIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"CK3HQLGRQATDVF5CEGHX4DVQRE.all.holesky.ethdisco.net" : "enr:-KO4QJy9FHDJZfMqgGGIVGn-j0rHI6JDjTWEo7sN1mBkzO68ZId8jkRQfgZ5dJkv4GGm2rsvrWk6OJYF0td7n0Ve9GqGAYu0QPj0g2V0aMfGhJsZKtCAgmlkgnY0gmlwhHTK4wuJc2VjcDI1NmsxoQIYyd-9-wdwfPPu6uRcnTKkQaCekdhR7488-2pxl36AZ4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"IDXHFSATDDENJYOYJC2TUSUYOY.all.holesky.ethdisco.net" : "enr:-KO4QNWpVQT5pw-oUuoa7JDOVs1KheGaPRAR_fIarAflnc1yHawwEpptaB5JWkRXgh-wJtk2KlHE5zGICXefHD23Z4qGAY2-EYHHg2V0aMfGhJsZKtCAgmlkgnY0gmlwhK35GlyJc2VjcDI1NmsxoQPV0hDwmz1HaZ4pLm1hzee38NMugxA_7j3v6hbxP_LRCIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"UYESJQUB2BHP2MSJNUSDRGXQ34.all.holesky.ethdisco.net" : "enr:-KO4QB9gKzGbKvqoPJZymq3AiwZvWSHOup-36D8ec3CwNZDFRoEmp64UmWaP0St3OXDIV4Q2Rm0MnelRnAjvn3HRfyyGAY5EPB9Jg2V0aMfGhJsZKtCAgmlkgnY0gmlwhIrJ3_qJc2VjcDI1NmsxoQKHt6N7ehDnwT8_lFh-C4KgqUplYwDvYy6rOP6DFRO8lYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"76OAILAEE52CRVMYFBIBMSEUXE.all.holesky.ethdisco.net" : "enr:-KO4QDRyM88zIGJ2P0Ya1W8fucbp8kRTbVzV8k_2eawB0W3ZHMQ3o0HlbXVw3j1rMgN167LmsaMt1iO015HMVKGT2GqGAY0rjdqhg2V0aMfGhP1PAWuAgmlkgnY0gmlwhKFhcJuJc2VjcDI1NmsxoQJ2emahe6-2fq_hQqxm99rgYi4TSzQ1ky4utO3Tcpe-yYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"NLYAF5XVTTQUOMK4LYUF3FXNPE.all.holesky.ethdisco.net" : "enr:-KO4QMiia-4vyLiHQQhxlbmCKdOTRqOiZin-BdAdi18LxNqqHEiuIQQ-F1dZu8sKi63vEk9zwr5gfMnxXQZCThEaiDSGAY2UmHG4g2V0aMfGhJsZKtCAgmlkgnY0gmlwhMb0_ESJc2VjcDI1NmsxoQLb1ZwjPQw7AjCvlHQpt9bmePVD85rbHbnFZ0naOB1RNYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"QXRV67JYJNOYJDPC5CTKUVG76M.all.holesky.ethdisco.net" : "enr:-KO4QN1MRyJdifSbU3Mg_GUI0_ApfQQ_BcPgDV0FXjVvk4iEWX6EgMjXkI62H1S0DFnyWeHnvr-caabqiKtlvbIBICyGAY5Cyclbg2V0aMfGhJsZKtCAgmlkgnY0gmlwhK9jhAaJc2VjcDI1NmsxoQL8tsMGtv3EuJfUdI8AzGBxYynVj2HlNoz4wi3COC4TTYRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"4LPHENNNENCUT7P3MWTB5IFRTY.all.holesky.ethdisco.net" : "enr:-KO4QF8nbKqJkOeYlF-akOVBYzusVwSjI_ra_t7TmM52mEisQLCwNAjujSSc9-6ECErhss-gVTgBDzhY5QmVcF0PuZOGAY3Li2SUg2V0aMfGhJsZKtCAgmlkgnY0gmlwhCL__BOJc2VjcDI1NmsxoQLHimHBtxJxch5bc20e_O8OdeN3UQx6TQQWqaYfyi66OYRzbmFwwIN0Y3CCdW-DdWRwgnVv",
"5FNWKKKVUPIHRKWZY5P33A7E74.all.holesky.ethdisco.net" : "enr:-KO4QMD5jLnGVTSO75tQPB1JOCFKmtSPRi5pmE7wMwjDRfVKdwpZJvr8qfv1WmfRnJFzH5qV2sfC187nT9mp-xE_-gGGAYupHwgxg2V0aMfGhJsZKtCAgmlkgnY0gmlwhE6KPgeJc2VjcDI1NmsxoQP9YVLocwkml9Z9YNDRpmzVceRY-Ts45qiPOBC1OsxuUIRzbmFwwIN0Y3CCdmmDdWRwgnZp",
"HIDVATDVB36L2MASAWA7SBJAII.all.holesky.ethdisco.net" : "enrtree-branch:N7HAL5M6HNZBGTWM3LWFDRX4WU,PX6K4ZETH5UX56IKHKL5TNOOMU,I2Q5U7BDYSM7O3O2IKIRL3KTZY,O4SSPZG3DFA7PHZ5L42DVOPWDM,Z5DOK3IB5X3IBOVVIK5MQCDYNA,ZJOONJUDYAQ53XOJU4KG5W4ATM,NLYAF5XVTTQUOMK4LYUF3FXNPE,XPX2T64BHDYLVGT5UC4GVIQIVY,AHB67R6KGAKWRSBL7CRAO3",
"FTTHOFPRPIHMJNZA3VZUAW5TIM.all.holesky.ethdisco.net" : "enrtree-branch:V52J3WKNVGG56JFRKYDLSZREJY,ZCAW4EMBOL5EH4MSMW7FY6JMEE,2SA643JTHJ75ZVPFHAKLXI2WXQ,5RYAVZN3SSZZQ2KISUO7XDBPE4,366Y3UIIKK2G5CUS7CHNPZMPEM,7J7D5R4SH6LVDCFZ5ELOP46IP4,7QEPEVJFOCL5A63GZLZ4IZNMOI,OOWRWZXNDJVJUCDDOJFOY4YB2Y,H6XMM4W6QZEQDAPVXGHJSU",
"O6EQKGUJOO2BQGMADYZHOSZXLA.all.holesky.ethdisco.net" : "enr:-KO4QAg_or5YgVU8ScSgcgvNmLMISW0LA4L5GtRLxmyVUlGRR4GNuEZ9q_tKtZdAbLH5B-FN2ie8hp0U6P90d39xtyWGAY3d9Tplg2V0aMfGhJsZKtCAgmlkgnY0gmlwhAMRsEKJc2VjcDI1NmsxoQJ7STjsgZvt1OOj5krr2l2iAwlY5AGl-dgTziPZsCd9qIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"XIOWDM2BTMANILG2M4LCMO7W3M.all.holesky.ethdisco.net" : "enr:-Je4QCvStdCVA-jcYsFoXmDXjXlp-Cd_9sSMdi9Y1LM3tGH2C3k49yrWy-Y2Jg93ikASYljvD9qVUES0T9q5htcbL60ag2V0aMfGhJsZKtCAgmlkgnY0gmlwhLkIa-2Jc2VjcDI1NmsxoQKcjBIYl8vFVYVBs5trEl-Zn3IE7u1ZiiV986p0QCrAL4N0Y3CCdl2DdWRwgnZd",
"OL6KYVYI7SGS7U34FW3RFBA76A.all.holesky.ethdisco.net" : "enr:-KO4QIlf7XLihA1hw9d44SB0ENJ40RT1RmF2KrT9a2kRZfZoWPdO1jRi3GNnNSCLDxLo-aFc6uaGi3zxhCD8lg1aZgSGAYuoht2-g2V0aMfGhJsZKtCAgmlkgnY0gmlwhDNR0MWJc2VjcDI1NmsxoQKFvOHK4c-F1tHwdgsUa2yru5RrWyiVP9kRLnrw1kXcd4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"NXU6DU7HEM2L4DN6G2VHHFT72I.all.holesky.ethdisco.net" : "enr:-KO4QLbrxgLUdY0NhpBPVfilpwn4d3gPFIEqoq62P1piOkVmGwh5qpbKtor5FfE4FM_eezlzMdeCM7bzau8ciKZU1GiGAY0-AhsZg2V0aMfGhJsZKtCAgmlkgnY0gmlwhEp2iPuJc2VjcDI1NmsxoQOGZRfYj6RgBYWulaw90MVPDt_9F-oM3PzevNe-RS6KyoRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"RUQLNXP5XBWPAOEKRUEBAQXXXU.all.holesky.ethdisco.net" : "enr:-KO4QPpoyIf1DfiiOW8Tt34tBuA68Qd55cyWcqGOCFJtcHKtcqYr373NJPyQVd8ktsuqZU8L_ERSRAfKesEwdBARg3OGAY45u3jEg2V0aMfGhJsZKtCAgmlkgnY0gmlwhJRxo0WJc2VjcDI1NmsxoQKhMTQ31q7EAIHkisiA02KVyFZZLq2Q9w4_jgmiYCsF_oRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"RUJF27NEYDBNQAFMI6K6SKDVRY.all.holesky.ethdisco.net" : "enrtree-branch:OOQAXEUVKR5EJ722HM2UANT3R4,HOXANHAMPVCFA2QR54VEMWCWPE,INA5QYTPU6WIOBT7IC7U3WGDIY,3PMAED3BC2HUS7AMEPGJDKENQY,VJJPHI6PWJ7NFBTW2XJH3JMMNY,5ZWIGTGKMFAFF4LAODABANSBOQ,UYESJQUB2BHP2MSJNUSDRGXQ34,CK3HQLGRQATDVF5CEGHX4DVQRE,AYB76YY6WPJVIBLRB5U7L5",
"ZM7CMNKKBVIXY7WGMB7GHYUXMA.all.holesky.ethdisco.net" : "enr:-KO4QNhl-I-sZux_gBOVLLbMdw8kMK9fSEBBNYukE2r95hDLJ9rOuhZgJO2dvNU_vCcuiqJNJHj8N3olsc7srxryuuCGAY0cB7Qsg2V0aMfGhP1PAWuAgmlkgnY0gmlwhJ7caOWJc2VjcDI1NmsxoQIyp37cpS7gvrKk0f3VxW4D9Jx7mOvSAQ6vOaQppsYp2oRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"FKSM7XWAT4KW3TDLRHWSXZPMKQ.all.holesky.ethdisco.net" : "enr:-Ke4QNDn6wmnaZU1IYGb9lX8zb2QFUpa_yen8vFTnuXpW3_iah53AKPZX1D05HE23bw-UtR6zXOpCquB9XDdxvGhZrGGAY4oYEAkg2V0aMvKhMYaYJiEZRbqwIJpZIJ2NIJpcIQlPPlxiXNlY3AyNTZrMaEDiVRaEbnUMSPCtpxluUoBgOy3Loaa6SaVW9QK8BGTFJiEc25hcMCDdGNwgnZ9g3VkcIJ2fQ",
"QXKEJG4XZEQSXNUY7JJYCATPEI.all.holesky.ethdisco.net" : "enrtree-branch:FEORTRPBZJSNRB3XCLK7KCMACQ,2M2GRZYAFKAVHHU5HTLUHPYHCM,KEC4722NVHE3KX3IYNZC34C7NY,NZCRGKUE7ZXSIS4MIQ4WUS76BA,3PYSNKXWWSRBWV377EFNYINHSA,C7SDD5OPASV7B2XUOXF5NLBBKU,QY2XP6FTX7QKFNP6TNZ2MOHZKQ,76OAILAEE52CRVMYFBIBMSEUXE,2QHFEUHLG6Q35B5LUS5BF3",
"OSHAABXYJSRWW35VOMYHZUKJXU.all.holesky.ethdisco.net" : "enrtree-branch:LHFO3FXNRHB5B3YSTKUALGMEAI,5FNWKKKVUPIHRKWZY5P33A7E74,NHLNHXPRKOALUKH4MZ7CIVDONU,BAY6SKB2RTQCM7RTSHAFQ6TTBQ,ERETBCE2BAPJRSWIEUQV3QPP7M,MFPFKTJREYMESD7STJJOFYDVV4,MOBGCDYGQA4CNBQZDPN52JC5E4,RUQLNXP5XBWPAOEKRUEBAQXXXU,NJCWGLUAGBNZQYPU752O5L",
"WAU46WR54TX2VBK4QM7NFTA73Y.all.holesky.ethdisco.net" : "enr:-KO4QE70KmVYr_jd9JOMBFTIJomf3oliyhugCm4kFnzYD7dcJk3y7dVYyakJdlWHBuk1t4hDjW05BzeVf2f-V0LQ-r6GAY0-Aab1g2V0aMfGhJsZKtCAgmlkgnY0gmlwhCUbP0KJc2VjcDI1NmsxoQMQLfyyzC-YY6A53fEu5a8tP-JWCUM3vkX-c7nRyu28d4RzbmFwwIN0Y3CCequDdWRwgnqr",
"CZ7ESLY5ZH4ULXKQ54XIFDCOUI.all.holesky.ethdisco.net" : "enr:-KO4QOQjO7c9J74CpllRngkrEXHLw-W8VqtJXxkLWPRIXC1CFL4TCjEM4ZfRW6MFlqXERaY3h9mztZXLAo1lf1DAS1qGAY2D0c0_g2V0aMfGhJsZKtCAgmlkgnY0gmlwhC06cBWJc2VjcDI1NmsxoQN8oyz6RCujPq6Hj9N8dBn3EeJ4A69HYeYnyLRiXLZ1UIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"5HSHHSS4QV3FKCNDETO7CEXMZM.all.holesky.ethdisco.net" : "enr:-KO4QAOJNWWAbN0bM51WFaFZimloFw1eBc7Pi3x7uMejmIToDZxBoAVtvdwTg3p6T0Mwmvuot6uT7PG2OjGxkk3P-iGGAY5cIO3Hg2V0aMfGhJsZKtCAgmlkgnY0gmlwhLAJA5uJc2VjcDI1NmsxoQPdRE5vPW9Wj-HRwzubnKAA-1n0p6CAOGLP-E-GzE9EnIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"DAVKEW6RQ5YTYHKCJ2A2J5GVCI.all.holesky.ethdisco.net" : "enrtree-branch:6WAXAZL7FYKEJBZOPWIHCI4F6E,OL6KYVYI7SGS7U34FW3RFBA76A,FKSM7XWAT4KW3TDLRHWSXZPMKQ,5HSHHSS4QV3FKCNDETO7CEXMZM,CZ7ESLY5ZH4ULXKQ54XIFDCOUI,ZRVWX74UFHUWOPCC7LOPLYGIZA,FFM67SMD4N3SK4ZQTY2HOMU4F4,IPSUCB6CICZIW6SERKZL7FFTIM,7XE2DGFKCGOST6BJ46JPNK",
"ELYMU6HPEMGLIYO2UO4YNUI7CQ.all.holesky.ethdisco.net" : "enr:-KO4QOWLzzuDHlwrEZhdOmeHEuFi87mX6iwJ_V820RsBL0mCLngiBaLbhumS1Q8zIc6YPzCYN07nigSMr-OXf19z3WSGAYra1TQYg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKFGJc2VjcDI1NmsxoQO4QASxyNUTDVIRkvEDQejetqJ4roUtGDD6U8CoI9wm-YRzbmFwwIN0Y3CCdnSDdWRwgnZ0",
"VNVQZHCW3CS7ELWR3ANLO7RFBA.all.holesky.ethdisco.net" : "enr:-Ji4QBY7W0c9-rsM9r3yeBngIOe0LFJx8wew3ZatidckbRSzAbXbvc5ahmadpUcrPG2oDm9ziwxw7maQtzEs4J2AnWeBr4NldGjHxoSbGSrQgIJpZIJ2NIJpcISU--s8iXNlY3AyNTZrMaECwwCgmDrePUxVSsOZqgS0USzkXwzkqF0-QK7a59EsnrCDdGNwgnX7g3VkcIJ1-w",
"MFPFKTJREYMESD7STJJOFYDVV4.all.holesky.ethdisco.net" : "enr:-KO4QJfXFSQOpwMtEgz0BSB0gJ0zXQZ6yMzoBfXYCK3ATxulM_mkkQhMWUHsrNyNH9cQYfxRbN_hEGfP3id3IfLiv7KGAY5iEk-hg2V0aMfGhJsZKtCAgmlkgnY0gmlwhGQmekqJc2VjcDI1NmsxoQOoFPqpQmRhcIfVINZ0knq8hxe2tUYnTSLEC-6Xoly7jIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"HGROZTZI7YPDW5F37QVUPQEBG4.all.holesky.ethdisco.net" : "enr:-KO4QI3-xQxY2u-misXaxFtpfezZr0Fk0jiL6U7ZAbwV4yCRcWxgNVOxLifoUOm9uMUqH4wlGxIYL7onmzlavqRIjDGGAYs5av4dg2V0aMfGhJsZKtCAgmlkgnY0gmlwhJRxqGeJc2VjcDI1NmsxoQPc4dhahamqQVxv9D89fNMO9DOP_YaPJRQ3EmT2XantU4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"VJJPHI6PWJ7NFBTW2XJH3JMMNY.all.holesky.ethdisco.net" : "enr:-KO4QHCu9ZPe68fKUXeUzje0Jwv0D8UCrAQ_K5W9KHrIpTdUYWIGwbzyl8heZ1cxijlyUhN1WB0HZpEBThUmrdm-uLiGAYra3Fk3g2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKJOJc2VjcDI1NmsxoQP0AAgBs1kDG4XBe55HY0J4F7GjHiwQVUY-xFKYXOB594RzbmFwwIN0Y3CCdnSDdWRwgnZ0",
"2M2GRZYAFKAVHHU5HTLUHPYHCM.all.holesky.ethdisco.net" : "enr:-KO4QIPWnAyt-tyaS1cnMwEJaFdvDd-MpwEcVv26cfaZ3Ffkc4WCpHqzPmd_SifEsxQf7ontOz9a1EPjbxlsu_V1XZqGAY1UHtm_g2V0aMfGhJsZKtCAgmlkgnY0gmlwhCU85MWJc2VjcDI1NmsxoQNyzc5NiamRt8y5TQ9Qj-EZAYeCb7ZaclF5sOdPrErpbIRzbmFwwIN0Y3CCdl-DdWRwgnZf",
"OOWRWZXNDJVJUCDDOJFOY4YB2Y.all.holesky.ethdisco.net" : "enr:-KO4QBKCkXrhG-hN-IY-O03b5A4JHaweb5xuuSZP4OdJ4uSSZ7E1-yioWl_0WSF8ShOfNU_7wgSCIwBPg0d4RD4w4sOGAY4oBl8og2V0aMfGhJsZKtCAgmlkgnY0gmlwhFJB7YiJc2VjcDI1NmsxoQL-RfOTNeu0SmlCfWAVQaQBUXF0WDEUCyGXv1LWC2_xqoRzbmFwwIN0Y3CCequDdWRwgnqr",
"E4GGBOZ5L2UFPAKJIPLNBA2MKA.all.holesky.ethdisco.net" : "enr:-Ke4QD_Gz-YGWvzpAe-a1l2KHz2pxG_nPUYTcHQDN1Wuvkk7TrEDq-HKIXtA_mbPukz9qLLW5sfkJH5mYda9eDssvkSGAY1Mq2nIg2V0aMvKhMYaYJiEZRbqwIJpZIJ2NIJpcIQf3GvbiXNlY3AyNTZrMaECyosSeovqxsDXTua8c47z7EK--WogcD_FCwDX6tHvVQSEc25hcMCDdGNwgnZfg3VkcIJ2Xw",
"KYTZHQX2PSOGY6RQTCVTUKTS7E.all.holesky.ethdisco.net" : "enr:-KO4QJn2ahKcTeE2karHSHOiJgE9b2BJWo54NS3nzjjCDnsvZ1JjDl8yulvzn8qQA48zCtHkiVMEVNHtdf_LM_N0Cs6GAYxElMbzg2V0aMfGhJsZKtCAgmlkgnY0gmlwhJ3m3GOJc2VjcDI1NmsxoQJzs6KencrR0_ylBVNGuZaknO_zQUYOrq4rjj_FmTlHN4RzbmFwwIN0Y3CCdl-DdWRwgnZf",
"O25DXMPE4UEZ6SFMQRBGL2E2I4.all.holesky.ethdisco.net" : "enr:-KO4QLC4qJt9OIKLkxROeQMNcj4ZuH_QZsVcS1v-NhzBGAf8B-5xF3GYqtMp40n6nwDg8o1yUs7xEx0kgf6wBJF-AuqGAYrbCvwJg2V0aMfGhJsZKtCAgmlkgnY0gmlwhMIhKGuJc2VjcDI1NmsxoQPogWsyNFpt9VYFYrsdUS4dxK2BMeL4FY41ZzgBjeEGRoRzbmFwwIN0Y3CCdmCDdWRwgnZg",
"RWRV55FT3DKDVGZK7AEU2DR77Y.all.holesky.ethdisco.net" : "enrtree-branch:ZV5CYUMJPF4XSTFHOG65622KEE,QDCV4SQTQAGHHA3DRA2TR4LNMY,MZGQKIQKIOFO3W2GJURJUXVYMQ,V5LTO36DCXU2JCCSSJ76E35OFU,NQHD4WNRRGE5JHHX6IDKGIF7KU,ELYMU6HPEMGLIYO2UO4YNUI7CQ,VN7EWW4RMUZM6KUOQ43GJ33DWM,J7ZIDKH7CKVIYY244QVU6RMPDM,QSK67XVHDPEQH7Q54HACQS",
"TMY2W2YBNCXUUNA3Y7QXWVQLRE.all.holesky.ethdisco.net" : "enr:-Ke4QEsOrFCJqnbVXiuSh3DKjvS3RlUoOrrLRlKp2mAqmCuJQBVLFREqABNWRvZfLOxDPEtvcRzDtDW2juSa1vd2xj6GAY025-JNg2V0aMvKhP1PAWuEZcNqwIJpZIJ2NIJpcIRU961viXNlY3AyNTZrMaECoqR94LV1BQMpEDbmfJdd8adEPWHV7qP09nePPVm9nLqEc25hcMCDdGNwgnZfg3VkcIJ2Xw",
"XTJ3PFTPBB3ATDDWAA6W7RRKMY.all.holesky.ethdisco.net" : "enrtree-branch:5AHKPN5NN5IHEH365X6OYZDHDE,AUQGHXRAP7J3AGI6TUOQVYTYLY,IDXHFSATDDENJYOYJC2TUSUYOY,GZVG4EONSAY6M7SMLJXDCFUF5Q,LHFTDS37XFOEXQKTWJGKWRLFWM,HTMBFBAIZBACDG3EZZ4JVANCAY,ZM7CMNKKBVIXY7WGMB7GHYUXMA,VNVQZHCW3CS7ELWR3ANLO7RFBA,PA4IN34FHBJOUK2MS7YRA3",
"V4A7I7I2SFVUDFFH7AGPFXACBA.all.holesky.ethdisco.net" : "enr:-KO4QMIHR0XgkMyX4E9Xh8Fw3rFr-QWyH49lpFmrEhx1Cc_WMxm6atHPm0g-bXmzradBORfb0S9_6dyi-GnQbtlm7GOGAYr8tSZLg2V0aMfGhJsZKtCAgmlkgnY0gmlwhA_MQUKJc2VjcDI1NmsxoQMrgP3au96duBa9PQzAFPhlQ7ikkA96YWAkrItcm5zCxIRzbmFwwIN0Y3CCdl-DdWRwgnZf"
}

@ -84,7 +84,13 @@ public class AccountLocalConfigPermissioningController implements TransactionPer
private void readAccountsFromConfig(final LocalPermissioningConfiguration configuration) {
if (configuration != null && configuration.isAccountAllowlistEnabled()) {
if (!configuration.getAccountAllowlist().isEmpty()) {
addAccounts(configuration.getAccountAllowlist());
AllowlistOperationResult result = addAccounts(configuration.getAccountAllowlist());
if (result != AllowlistOperationResult.SUCCESS) {
throw new IllegalStateException(
String.format(
"Error reloading permissions file. Invalid accounts allowlist, validation failed due to \"%s\"",
result));
}
}
}
}

@ -17,6 +17,7 @@ package org.hyperledger.besu.ethereum.permissioning;
import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.catchThrowable;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
@ -108,6 +109,40 @@ public class AccountLocalConfigPermissioningControllerTest {
.containsExactly("0xfe3b557e8fb62b89f4916b721be55ceb828dbd73");
}
@Test
public void whenLoadingDuplicateAccountsFromConfigShouldThrowError() {
when(permissioningConfig.isAccountAllowlistEnabled()).thenReturn(true);
when(permissioningConfig.getAccountAllowlist())
.thenReturn(
List.of(
"0xcb88953e60948e3a76fa658d65b7c2d5043c6409",
"0xdd76406b124f9e3ae9fbeb47e4d8dc0ab143902d",
"0x432132e8561785c33afe931762cf8eeb9c80e3ad",
"0xcb88953e60948e3a76fa658d65b7c2d5043c6409"));
assertThrows(
IllegalStateException.class,
() -> {
controller =
new AccountLocalConfigPermissioningController(
permissioningConfig, allowlistPersistor, metricsSystem);
});
}
@Test
public void whenLoadingInvalidAccountsFromConfigShouldThrowError() {
when(permissioningConfig.isAccountAllowlistEnabled()).thenReturn(true);
when(permissioningConfig.getAccountAllowlist()).thenReturn(List.of("0x0", "0xzxy"));
assertThrows(
IllegalStateException.class,
() -> {
controller =
new AccountLocalConfigPermissioningController(
permissioningConfig, allowlistPersistor, metricsSystem);
});
}
@Test
public void whenPermConfigContainsEmptyListOfAccountsContainsShouldReturnFalse() {
when(permissioningConfig.isAccountAllowlistEnabled()).thenReturn(true);

@ -50,8 +50,7 @@ import org.apache.tuweni.units.bigints.UInt64;
* word, a method like {@link #readLongScalar()} does not expect an encoded value of exactly 8 bytes
* (by opposition to {@link #readLong}), but rather one that is "up to" 8 bytes.
*
* @see BytesValueRLPInput for a {@link RLPInput} that decode an RLP encoded value stored in a
* {@link Bytes}.
* @see BytesValueRLPInput
*/
public interface RLPInput {

@ -1371,6 +1371,9 @@ public class MessageFrame {
private Optional<List<VersionedHash>> versionedHashes = Optional.empty();
/** Instantiates a new Builder. */
public Builder() {}
/**
* The "parent" message frame. When present some fields will be populated from the parent and
* ignored if passed in via builder

@ -4746,41 +4746,41 @@
<sha256 value="f1c00e59fb8e446f0710bdc1719e61952f27c1614ae478452c2f0011035322ae" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.jacoco" name="org.jacoco.agent" version="0.8.8">
<artifact name="org.jacoco.agent-0.8.8.jar">
<sha256 value="072ecbd496896623899a696fff12c01c1615f737616d2792e6d0e10cdf8a610d" origin="Generated by Gradle"/>
<component group="org.jacoco" name="org.jacoco.agent" version="0.8.11">
<artifact name="org.jacoco.agent-0.8.11.jar">
<sha256 value="d3ed85dea78a9ed55846a7738e3a0ca15c702c661ee4bc8cbfe02a8b9f4a99c0" origin="Generated by Gradle"/>
</artifact>
<artifact name="org.jacoco.agent-0.8.8.pom">
<sha256 value="7dd13c80aff315032983357c650a887d6fdb4c8a8870b207bb4802c49809e7b4" origin="Generated by Gradle"/>
<artifact name="org.jacoco.agent-0.8.11.pom">
<sha256 value="16e05e9f49621b87c53e69350140f3c46d42d966c67a933bdf4b063a2b1c8fc5" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.jacoco" name="org.jacoco.ant" version="0.8.8">
<artifact name="org.jacoco.ant-0.8.8.jar">
<sha256 value="02e33bd2c48dc0be67c2fea84d43beececfd400da6797c58153253d4c30aca15" origin="Generated by Gradle"/>
<component group="org.jacoco" name="org.jacoco.ant" version="0.8.11">
<artifact name="org.jacoco.ant-0.8.11.jar">
<sha256 value="81d7eb8890d9be30a939612c295603541063529cdd03a53265aba74474b70b7c" origin="Generated by Gradle"/>
</artifact>
<artifact name="org.jacoco.ant-0.8.8.pom">
<sha256 value="fafff75819609030f4626509313c0861428c2c26c8d36e9a8938334a04478909" origin="Generated by Gradle"/>
<artifact name="org.jacoco.ant-0.8.11.pom">
<sha256 value="7ed103d959d0cee7babfb1307fa6e451b1696ffd3527061553b550de55201d85" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.jacoco" name="org.jacoco.build" version="0.8.8">
<artifact name="org.jacoco.build-0.8.8.pom">
<sha256 value="f4ce0b1285fd24fc6c772f42857298315904dde8fd5677267a0fad5ff9ce2aef" origin="Generated by Gradle"/>
<component group="org.jacoco" name="org.jacoco.build" version="0.8.11">
<artifact name="org.jacoco.build-0.8.11.pom">
<sha256 value="5b84b15cf2eef3e59eb91bc22784833100b09df9911e3319030c3bc648bd8b0b" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.jacoco" name="org.jacoco.core" version="0.8.8">
<artifact name="org.jacoco.core-0.8.8.jar">
<sha256 value="474c782f809d88924713dfdbf0acb79d330f904be576484803463d0465611643" origin="Generated by Gradle"/>
<component group="org.jacoco" name="org.jacoco.core" version="0.8.11">
<artifact name="org.jacoco.core-0.8.11.jar">
<sha256 value="fcd188c688473fc8dcc0c6caaf355e7b389502243527c33b9597a3ec28791f47" origin="Generated by Gradle"/>
</artifact>
<artifact name="org.jacoco.core-0.8.8.pom">
<sha256 value="f5fab5a48df823b83c0ea35026032368cc9b81800efb257cc7a5928298fee225" origin="Generated by Gradle"/>
<artifact name="org.jacoco.core-0.8.11.pom">
<sha256 value="bb6135f10a36349cb84a5600fd8cf73fc1296a135b2f14adcd83de8cf24cabb1" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.jacoco" name="org.jacoco.report" version="0.8.8">
<artifact name="org.jacoco.report-0.8.8.jar">
<sha256 value="2c129110f3e3fcaa1f8179578ea3894586199cb0826be5c7790278084c9622a9" origin="Generated by Gradle"/>
<component group="org.jacoco" name="org.jacoco.report" version="0.8.11">
<artifact name="org.jacoco.report-0.8.11.jar">
<sha256 value="8393295ae24680ed10cad8333907040f928b871332491581ca5bc784e2cb4fbe" origin="Generated by Gradle"/>
</artifact>
<artifact name="org.jacoco.report-0.8.8.pom">
<sha256 value="5213af2916bb2690be871917d82a0f9c2ba1e88b796a59343fc03df8ae138716" origin="Generated by Gradle"/>
<artifact name="org.jacoco.report-0.8.11.pom">
<sha256 value="8e3b734779d5e3fd683ec015413d52f961984c50cb9045fda2b23bff5eb42381" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.java-websocket" name="Java-WebSocket" version="1.5.3">
@ -5637,6 +5637,14 @@
<sha256 value="02824e839f2a2f0e72959fdd30b4897240f05afc43de42d7ba0b18437601c070" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.ow2.asm" name="asm-commons" version="9.6">
<artifact name="asm-commons-9.6.jar">
<sha256 value="7aefd0d5c0901701c69f7513feda765fb6be33af2ce7aa17c5781fc87657c511" origin="Generated by Gradle"/>
</artifact>
<artifact name="asm-commons-9.6.pom">
<sha256 value="a98ae4895334baf8ff86bd66516210dbd9a03f1a6e15e47dda82afcf6b53d77c" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.ow2.asm" name="asm-tree" version="9.2">
<artifact name="asm-tree-9.2.jar">
<sha256 value="aabf9bd23091a4ebfc109c1f3ee7cf3e4b89f6ba2d3f51c5243f16b3cffae011" origin="Generated by Gradle"/>
@ -5645,6 +5653,14 @@
<sha256 value="f61f3ebea5520ddf19f452b03c426c7231bdd8a81d7ac28765cb5271225ac378" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.ow2.asm" name="asm-tree" version="9.6">
<artifact name="asm-tree-9.6.jar">
<sha256 value="c43ecf17b539c777e15da7b5b86553b377e2d39a683de6285567d5283888e7ef" origin="Generated by Gradle"/>
</artifact>
<artifact name="asm-tree-9.6.pom">
<sha256 value="1bcb481d7fc16b955bb60ca07c8cfa2424bcee78bdc405bba31c7d6f5dc2d113" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.ow2.asm" name="asm-util" version="9.2">
<artifact name="asm-util-9.2.jar">
<sha256 value="ff5b3cd331ae8a9a804768280da98f50f424fef23dd3c788bb320e08c94ee598" origin="Generated by Gradle"/>

@ -108,7 +108,6 @@ dependencyManagement {
entry 'tuweni-concurrent'
entry 'tuweni-crypto'
entry 'tuweni-devp2p'
entry 'tuweni-dns-discovery'
entry 'tuweni-io'
entry 'tuweni-net'
entry 'tuweni-rlp'

@ -32,6 +32,9 @@ public class InMemoryTaskQueue<T> implements TaskCollection<T> {
private final Set<InMemoryTask<T>> unfinishedOutstandingTasks = new HashSet<>();
private final AtomicBoolean closed = new AtomicBoolean(false);
/** Default constructor. */
public InMemoryTaskQueue() {}
@Override
public synchronized void add(final T taskData) {
assertNotClosed();

@ -18,6 +18,7 @@ package org.hyperledger.besu.util;
public class EndianUtils {
// next two methods adopted from:
// https://github.com/bcgit/bc-java/blob/master/core/src/main/java/org/bouncycastle/util/Pack.java
private EndianUtils() {}
/**
* Long to big endian.

@ -25,6 +25,8 @@ import java.util.function.Supplier;
/** The Future utils. */
public class FutureUtils {
private FutureUtils() {}
/**
* Returns a new CompletionStage that, when the provided stage completes exceptionally, is
* executed with the provided stage's exception as the argument to the supplied function.

@ -28,6 +28,8 @@ public class LogUtil {
static final String BESU_NAMESPACE = "org.hyperledger.besu";
static final int MAX_SUMMARY_DEPTH = 20;
private LogUtil() {}
/**
* Throttles logging to a given logger.
*

@ -43,6 +43,8 @@ public class PlatformDetector {
private static String _glibc;
private static String _jemalloc;
private PlatformDetector() {}
/**
* Gets OS type.
*

Loading…
Cancel
Save