Signed-off-by: Karim Taam <karim.t2am@gmail.com>
pull/6209/head
Karim Taam 10 months ago
commit 36ee9397c5
  1. 2
      .circleci/config.yml
  2. 27
      CHANGELOG.md
  3. 273
      besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java
  4. 54
      besu/src/main/java/org/hyperledger/besu/cli/ConfigurationOverviewBuilder.java
  5. 4
      besu/src/main/java/org/hyperledger/besu/cli/DefaultCommandValues.java
  6. 41
      besu/src/main/java/org/hyperledger/besu/cli/config/ProfileName.java
  7. 60
      besu/src/main/java/org/hyperledger/besu/cli/options/MiningOptions.java
  8. 78
      besu/src/main/java/org/hyperledger/besu/cli/options/stable/DataStorageOptions.java
  9. 266
      besu/src/main/java/org/hyperledger/besu/cli/options/stable/RpcWebsocketOptions.java
  10. 6
      besu/src/main/java/org/hyperledger/besu/cli/options/unstable/NetworkingOptions.java
  11. 5
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/ValidateConfigSubCommand.java
  12. 229
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogHelper.java
  13. 123
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogSubCommand.java
  14. 125
      besu/src/main/java/org/hyperledger/besu/cli/util/AbstractConfigurationFinder.java
  15. 6
      besu/src/main/java/org/hyperledger/besu/cli/util/CascadingDefaultProvider.java
  16. 100
      besu/src/main/java/org/hyperledger/besu/cli/util/ConfigFileFinder.java
  17. 67
      besu/src/main/java/org/hyperledger/besu/cli/util/ConfigOptionSearchAndRunHandler.java
  18. 76
      besu/src/main/java/org/hyperledger/besu/cli/util/ProfileFinder.java
  19. 97
      besu/src/main/java/org/hyperledger/besu/cli/util/TomlConfigurationDefaultProvider.java
  20. 49
      besu/src/main/java/org/hyperledger/besu/controller/BesuControllerBuilder.java
  21. 3
      besu/src/main/resources/log4j2.xml
  22. 404
      besu/src/test/java/org/hyperledger/besu/cli/BesuCommandTest.java
  23. 306
      besu/src/test/java/org/hyperledger/besu/cli/CascadingDefaultProviderTest.java
  24. 4
      besu/src/test/java/org/hyperledger/besu/cli/CommandLineUtilsTest.java
  25. 61
      besu/src/test/java/org/hyperledger/besu/cli/CommandTestAbstract.java
  26. 32
      besu/src/test/java/org/hyperledger/besu/cli/ConfigurationOverviewBuilderTest.java
  27. 41
      besu/src/test/java/org/hyperledger/besu/cli/TomlConfigurationDefaultProviderTest.java
  28. 44
      besu/src/test/java/org/hyperledger/besu/cli/options/AbstractCLIOptionsTest.java
  29. 27
      besu/src/test/java/org/hyperledger/besu/cli/options/MiningOptionsTest.java
  30. 2
      besu/src/test/java/org/hyperledger/besu/cli/options/NetworkingOptionsTest.java
  31. 249
      besu/src/test/java/org/hyperledger/besu/cli/options/RpcWebsocketOptionsTest.java
  32. 4
      besu/src/test/java/org/hyperledger/besu/cli/options/SynchronizerOptionsTest.java
  33. 64
      besu/src/test/java/org/hyperledger/besu/cli/options/stable/DataStorageOptionsTest.java
  34. 433
      besu/src/test/java/org/hyperledger/besu/cli/subcommands/storage/TrieLogHelperTest.java
  35. 5
      besu/src/test/java/org/hyperledger/besu/cli/util/ConfigOptionSearchAndRunHandlerTest.java
  36. 2
      besu/src/test/java/org/hyperledger/besu/services/TraceServiceImplTest.java
  37. 3
      besu/src/test/resources/everything_config.toml
  38. 3
      besu/src/test/resources/partial_config.toml
  39. 1
      config/src/main/resources/profiles/dev.toml
  40. 5
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/ApiConfiguration.java
  41. 18
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/TraceFilter.java
  42. 6
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/methods/TraceJsonRpcMethods.java
  43. 79
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/TraceFilterTest.java
  44. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/mainnet/MainnetTransactionProcessor.java
  45. 32
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/storage/keyvalue/KeyValueSegmentIdentifier.java
  46. 15
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/BonsaiValue.java
  47. 7
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/BonsaiWorldStateProvider.java
  48. 2
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/trielog/NoOpTrieLogManager.java
  49. 7
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/trielog/TrieLogManager.java
  50. 47
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/trielog/TrieLogPruner.java
  51. 6
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/worldview/BonsaiWorldState.java
  52. 8
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/trie/bonsai/worldview/BonsaiWorldStateUpdateAccumulator.java
  53. 20
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/DataStorageConfiguration.java
  54. 4
      ethereum/core/src/test-support/java/org/hyperledger/besu/ethereum/core/InMemoryKeyValueStorageProvider.java
  55. 4
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/AbstractIsolationTests.java
  56. 4
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/BonsaiWorldStateProviderTest.java
  57. 5
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/trielog/TrieLogManagerTests.java
  58. 68
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/trie/bonsai/trielog/TrieLogPrunerTest.java
  59. 1
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/EthPeers.java
  60. 124
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/EthProtocolManager.java
  61. 4
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/EthScheduler.java
  62. 6
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/AbstractSyncTargetManager.java
  63. 4
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/PipelineChainDownloader.java
  64. 26
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/BackwardSyncContext.java
  65. 6
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/checkpointsync/CheckpointSyncChainDownloader.java
  66. 4
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncChainDownloader.java
  67. 4
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloadPipelineFactory.java
  68. 14
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloader.java
  69. 6
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/ImportBlocksStep.java
  70. 3
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/PivotBlockRetriever.java
  71. 2
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/SyncError.java
  72. 14
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/SyncException.java
  73. 8
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/SyncTargetManager.java
  74. 4
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fullsync/FullSyncTargetManager.java
  75. 5
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapSyncDownloader.java
  76. 8
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapSyncMetricsManager.java
  77. 6
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadState.java
  78. 6
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldStateDownloader.java
  79. 2
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/AccountRangeDataRequest.java
  80. 2
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/AccountFlatDatabaseHealingRangeRequest.java
  81. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/PipelineChainDownloaderTest.java
  82. 4
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/BackwardSyncContextTest.java
  83. 4
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/ForwardSyncStepTest.java
  84. 12
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastImportBlocksPercentageCalculationTest.java
  85. 18
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncDownloaderTest.java
  86. 6
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/ImportBlocksStepTest.java
  87. 12
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/PivotBlockRetrieverTest.java
  88. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/PersistDataStepTest.java
  89. 2
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/SnapWorldDownloadStateTest.java
  90. 4
      ethereum/eth/src/test/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/request/heal/AccountFlatDatabaseHealingRangeRequestTest.java
  91. 1
      ethereum/evmtool/src/main/java/org/hyperledger/besu/evmtool/T8nExecutor.java
  92. 2
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/config/DiscoveryConfiguration.java
  93. 1
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/config/NetworkingConfiguration.java
  94. 64
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/PeerDiscoveryAgent.java
  95. 7
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/VertxPeerDiscoveryAgent.java
  96. 55
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/internal/PeerDiscoveryController.java
  97. 14
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/internal/PeerTable.java
  98. 12
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/network/DefaultP2PNetwork.java
  99. 20
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/rlpx/RlpxAgent.java
  100. 85
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/rlpx/connections/netty/AbstractHandshakeHandler.java
  101. Some files were not shown because too many files have changed in this diff Show More

@ -204,7 +204,7 @@ jobs:
at: ~/project
- run:
name: ReferenceTests
no_output_timeout: 30m
no_output_timeout: 40m
command: |
git submodule update --init --recursive
./gradlew --no-daemon referenceTest

@ -1,11 +1,31 @@
# Changelog
## 24.1.1-SNAPSHOT
## 24.1.2-SNAPSHOT
### Breaking Changes
- The `trace-filter` method in JSON-RPC API now has a default block range limit of 1000, adjustable with `--rpc-max-trace-filter-range` [#6446](https://github.com/hyperledger/besu/pull/6446)
### Deprecations
### Additions and Improvements
- Add `OperationTracer.tracePrepareTransaction`, where the sender account has not yet been altered[#6453](https://github.com/hyperledger/besu/pull/6453)
- Improve the high spec flag by limiting it to a few column families [#6354](https://github.com/hyperledger/besu/pull/6354)
### Bug fixes
- Fix the way an advertised host configured with `--p2p-host` is treated when communicating with the originator of a PING packet [#6225](https://github.com/hyperledger/besu/pull/6225)
- Fix `poa-block-txs-selection-max-time` option that was inadvertently reset to its default after being configured [#6444](https://github.com/hyperledger/besu/pull/6444)
### Download Links
## 24.1.1
### Breaking Changes
- New `EXECUTION_HALTED` error returned if there is an error executing or simulating a transaction, with the reason for execution being halted. Replaces the generic `INTERNAL_ERROR` return code in certain cases which some applications may be checking for [#6343](https://github.com/hyperledger/besu/pull/6343)
- The Besu Docker images with `openjdk-latest` tags since 23.10.3 were incorrectly using UID 1001 instead of 1000 for the container's `besu` user. The user now uses 1000 again. Containers created from or migrated to images using UID 1001 will need to chown their persistent database files to UID 1000 [#6360](https://github.com/hyperledger/besu/pull/6360)
- The deprecated `--privacy-onchain-groups-enabled` option has now been removed. Use the `--privacy-flexible-groups-enabled` option instead. [#6411](https://github.com/hyperledger/besu/pull/6411)
- Requesting the Ethereum Node Record (ENR) to acquire the fork id from bonded peers is now enabled by default, so the following change has been made [#5628](https://github.com/hyperledger/besu/pull/5628):
- `--Xfilter-on-enr-fork-id` has been removed. To disable the feature use `--filter-on-enr-fork-id=false`.
- The time that can be spent selecting transactions during block creation is not capped at 5 seconds for PoS and PoW networks, and for PoA networks, at 75% of the block period specified in the genesis, this to prevent possible DoS in case a single transaction is taking too long to execute, and to have a stable block production rate, but it could be a breaking change if an existing network used to have transactions that takes more time to executed that the newly introduced limit, if it is mandatory for these network to keep processing these long processing transaction, then the default value of `block-txs-selection-max-time` or `poa-block-txs-selection-max-time` needs to be tuned accordingly.
### Deprecations
@ -20,6 +40,8 @@
- Upgrade Mockito [#6397](https://github.com/hyperledger/besu/pull/6397)
- Upgrade `tech.pegasys.discovery:discovery` [#6414](https://github.com/hyperledger/besu/pull/6414)
- Options to tune the max allowed time that can be spent selecting transactions during block creation are now stable [#6423](https://github.com/hyperledger/besu/pull/6423)
- Introduce `--Xbonsai-limit-trie-logs-enabled` experimental feature which by default will only retain the latest 512 trie logs, saving about 3GB per week in database growth [#5390](https://github.com/hyperledger/besu/issues/5390)
- Introduce `besu storage x-trie-log prune` experimental offline subcommand which will prune all redundant trie logs except the latest 512 [#6303](https://github.com/hyperledger/besu/pull/6303)
### Bug fixes
- INTERNAL_ERROR from `eth_estimateGas` JSON/RPC calls [#6344](https://github.com/hyperledger/besu/issues/6344)
@ -30,7 +52,6 @@
### Download Links
## 24.1.0
### Breaking Changes
@ -44,6 +65,7 @@
- Set Ethereum Classic mainnet activation block for Spiral network upgrade [#6267](https://github.com/hyperledger/besu/pull/6267)
- Add custom genesis file name to config overview if specified [#6297](https://github.com/hyperledger/besu/pull/6297)
- Update Gradle plugins and replace unmaintained License Gradle Plugin with the actively maintained Gradle License Report [#6275](https://github.com/hyperledger/besu/pull/6275)
- Optimize RocksDB WAL files, allows for faster restart and a more linear disk space utilization [#6328](https://github.com/hyperledger/besu/pull/6328)
### Bug fixes
- Hotfix for selfdestruct preimages on bonsai [#6359]((https://github.com/hyperledger/besu/pull/6359)
@ -87,7 +109,6 @@ https://hyperledger.jfrog.io/artifactory/besu-binaries/besu/23.10.3-hotfix/besu-
### Bug fixes
- Fix Docker image name clash between Besu and evmtool [#6194](https://github.com/hyperledger/besu/pull/6194)
- Fix `logIndex` in `eth_getTransactionReceipt` JSON RPC method [#6206](https://github.com/hyperledger/besu/pull/6206)
- Fix the way an advertised host configured with `--p2p-host` is treated when communicating with the originator of a PING packet [#6225](https://github.com/hyperledger/besu/pull/6225)
### Download Links
https://hyperledger.jfrog.io/artifactory/besu-binaries/besu/23.10.3/besu-23.10.3.zip / sha256 da7ef8a6ceb88d3e327cacddcdb32218d1750b464c14165a74068f6dc6e0871a

@ -31,7 +31,6 @@ import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEF
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.VALID_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.authentication.EngineAuthService.EPHEMERAL_JWT_FILE;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
@ -46,6 +45,7 @@ import org.hyperledger.besu.chainimport.JsonBlockImporter;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.config.ProfileName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
@ -60,6 +60,7 @@ import org.hyperledger.besu.cli.options.stable.EthstatsOptions;
import org.hyperledger.besu.cli.options.stable.LoggingLevelOption;
import org.hyperledger.besu.cli.options.stable.NodePrivateKeyFileOption;
import org.hyperledger.besu.cli.options.stable.P2PTLSConfigOptions;
import org.hyperledger.besu.cli.options.stable.RpcWebsocketOptions;
import org.hyperledger.besu.cli.options.unstable.ChainPruningOptions;
import org.hyperledger.besu.cli.options.unstable.DnsOptions;
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
@ -123,7 +124,6 @@ import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider;
import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.ImmutableMiningParameters;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
@ -224,6 +224,7 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
@ -536,6 +537,13 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
+ " (default: ${DEFAULT-VALUE})")
private final NetworkName network = null;
@Option(
names = {PROFILE_OPTION_NAME},
paramLabel = PROFILE_FORMAT_HELP,
description =
"Overwrite default settings. Possible values are ${COMPLETION-CANDIDATES}. (default: none)")
private final ProfileName profile = null;
@Option(
names = {"--nat-method"},
description =
@ -798,93 +806,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
// JSON-RPC Websocket Options
@CommandLine.ArgGroup(validate = false, heading = "@|bold JSON-RPC Websocket Options|@%n")
JsonRPCWebsocketOptionGroup jsonRPCWebsocketOptionGroup = new JsonRPCWebsocketOptionGroup();
static class JsonRPCWebsocketOptionGroup {
@Option(
names = {"--rpc-ws-authentication-jwt-algorithm"},
description =
"Encryption algorithm used for Websockets JWT public key. Possible values are ${COMPLETION-CANDIDATES}"
+ " (default: ${DEFAULT-VALUE})",
arity = "1")
private final JwtAlgorithm rpcWebsocketsAuthenticationAlgorithm = DEFAULT_JWT_ALGORITHM;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost;
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description =
"Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-max-frame-size"},
description =
"Maximum size in bytes for JSON-RPC WebSocket frames (default: ${DEFAULT-VALUE}). If this limit is exceeded, the websocket will be disconnected.",
arity = "1")
private final Integer rpcWsMaxFrameSize = DEFAULT_WS_MAX_FRAME_SIZE;
@Option(
names = {"--rpc-ws-max-active-connections"},
description =
"Maximum number of WebSocket connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcWsMaxConnections = DEFAULT_WS_MAX_CONNECTIONS;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = " {0,1}, {0,1}",
arity = "1..*",
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<String> rpcWsApis = DEFAULT_RPC_APIS;
@Option(
names = {"--rpc-ws-api-methods-no-auth", "--rpc-ws-api-method-no-auth"},
paramLabel = "<api name>",
split = " {0,1}, {0,1}",
arity = "1..*",
description =
"Comma separated list of RPC methods to exclude from RPC authentication services, RPC WebSocket authentication must be enabled")
private final List<String> rpcWsApiMethodsNoAuth = new ArrayList<String>();
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC WebSocket authentication",
arity = "1")
private final File rpcWsAuthenticationPublicKeyFile = null;
}
RpcWebsocketOptions rpcWebsocketOptions = new RpcWebsocketOptions();
// Privacy Options Group
@CommandLine.ArgGroup(validate = false, heading = "@|bold Privacy Options|@%n")
@ -1259,6 +1181,12 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
description = "Specifies the number of last blocks to cache (default: ${DEFAULT-VALUE})")
private final Integer numberOfblocksToCache = 0;
@Option(
names = {"--rpc-max-trace-filter-range"},
description =
"Specifies the maximum number of blocks for the trace_filter method. Must be >=0. 0 specifies no limit (default: $DEFAULT-VALUE)")
private final Long maxTraceFilterRange = 1000L;
@Mixin private P2PTLSConfigOptions p2pTLSConfigOptions;
@Mixin private PkiBlockCreationOptions pkiBlockCreationOptions;
@ -1806,6 +1734,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
validateDnsOptionsParams();
ensureValidPeerBoundParams();
validateRpcOptionsParams();
validateRpcWsOptions();
validateChainDataPruningParams();
validatePostMergeCheckpointBlockRequirements();
validateTransactionPoolOptions();
@ -1948,15 +1877,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
+ invalidHttpApis.toString());
}
if (!jsonRPCWebsocketOptionGroup.rpcWsApis.stream().allMatch(configuredApis)) {
final List<String> invalidWsApis =
new ArrayList<String>(jsonRPCWebsocketOptionGroup.rpcWsApis);
invalidWsApis.removeAll(VALID_APIS);
throw new ParameterException(
this.commandLine,
"Invalid value for option '--rpc-ws-api': invalid entries found " + invalidWsApis);
}
final boolean validHttpApiMethods =
jsonRPCHttpOptionGroup.rpcHttpApiMethodsNoAuth.stream()
.allMatch(RpcMethod::rpcMethodExists);
@ -1966,16 +1886,15 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
this.commandLine,
"Invalid value for option '--rpc-http-api-methods-no-auth', options must be valid RPC methods");
}
}
final boolean validWsApiMethods =
jsonRPCWebsocketOptionGroup.rpcWsApiMethodsNoAuth.stream()
.allMatch(RpcMethod::rpcMethodExists);
if (!validWsApiMethods) {
throw new ParameterException(
this.commandLine,
"Invalid value for option '--rpc-ws-api-methods-no-auth', options must be valid RPC methods");
}
private void validateRpcWsOptions() {
final Predicate<String> configuredApis =
apiName ->
Arrays.stream(RpcApis.values())
.anyMatch(builtInApi -> apiName.equals(builtInApi.name()))
|| rpcEndpointServiceImpl.hasNamespace(apiName);
rpcWebsocketOptions.validate(logger, commandLine, configuredApis);
}
private void validateChainDataPruningParams() {
@ -2078,10 +1997,10 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
p2pTLSConfiguration = p2pTLSConfigOptions.p2pTLSConfiguration(commandLine);
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration =
webSocketConfiguration(
jsonRPCWebsocketOptionGroup.rpcWsPort,
jsonRPCWebsocketOptionGroup.rpcWsApis,
hostsAllowlist);
rpcWebsocketOptions.webSocketConfiguration(
hostsAllowlist,
p2PDiscoveryOptionGroup.autoDiscoverDefaultIP().getHostAddress(),
unstableRPCOptions.getWsTimeoutSec());
jsonRpcIpcConfiguration =
jsonRpcIpcConfiguration(
unstableIpcOptions.isEnabled(),
@ -2450,71 +2369,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
return jsonRPCHttpOptionGroup.isRpcHttpEnabled && jsonRPCHttpOptionGroup.isRpcHttpTlsEnabled;
}
private WebSocketConfiguration webSocketConfiguration(
final Integer listenPort, final List<String> apiGroups, final List<String> allowCallsFrom) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!jsonRPCWebsocketOptionGroup.isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-api-method-no-auth",
"--rpc-ws-api-methods-no-auth",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-max-frame-size",
"--rpc-ws-max-active-connections",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file",
"--rpc-ws-authentication-jwt-algorithm"));
if (jsonRPCWebsocketOptionGroup.isRpcWsAuthenticationEnabled) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-authentication-public-key-file",
jsonRPCWebsocketOptionGroup.rpcWsAuthenticationPublicKeyFile == null,
asList("--rpc-ws-authentication-jwt-algorithm"));
}
if (jsonRPCWebsocketOptionGroup.isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile() == null
&& jsonRPCWebsocketOptionGroup.rpcWsAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(jsonRPCWebsocketOptionGroup.isRpcWsEnabled);
webSocketConfiguration.setHost(
Strings.isNullOrEmpty(jsonRPCWebsocketOptionGroup.rpcWsHost)
? p2PDiscoveryOptionGroup.autoDiscoverDefaultIP().getHostAddress()
: jsonRPCWebsocketOptionGroup.rpcWsHost);
webSocketConfiguration.setPort(listenPort);
webSocketConfiguration.setMaxFrameSize(jsonRPCWebsocketOptionGroup.rpcWsMaxFrameSize);
webSocketConfiguration.setMaxActiveConnections(jsonRPCWebsocketOptionGroup.rpcWsMaxConnections);
webSocketConfiguration.setRpcApis(apiGroups);
webSocketConfiguration.setRpcApisNoAuth(
jsonRPCWebsocketOptionGroup.rpcWsApiMethodsNoAuth.stream()
.distinct()
.collect(Collectors.toList()));
webSocketConfiguration.setAuthenticationEnabled(
jsonRPCWebsocketOptionGroup.isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsAllowlist(allowCallsFrom);
webSocketConfiguration.setAuthenticationPublicKeyFile(
jsonRPCWebsocketOptionGroup.rpcWsAuthenticationPublicKeyFile);
webSocketConfiguration.setAuthenticationAlgorithm(
jsonRPCWebsocketOptionGroup.rpcWebsocketsAuthenticationAlgorithm);
webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec());
return webSocketConfiguration;
}
private ApiConfiguration apiConfiguration() {
checkApiOptionsDependencies();
var builder =
@ -2526,7 +2380,8 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
.gasPriceMax(apiGasPriceMax)
.maxLogsRange(rpcMaxLogsRange)
.gasCap(rpcGasCap)
.isGasAndPriorityFeeLimitingEnabled(apiGasAndPriorityFeeLimitingEnabled);
.isGasAndPriorityFeeLimitingEnabled(apiGasAndPriorityFeeLimitingEnabled)
.maxTraceFilterRange(maxTraceFilterRange);
if (apiGasAndPriorityFeeLimitingEnabled) {
if (apiGasAndPriorityFeeLowerBoundCoefficient > apiGasAndPriorityFeeUpperBoundCoefficient) {
throw new ParameterException(
@ -2596,7 +2451,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (jsonRPCHttpOptionGroup.rpcHttpApis.contains(RpcApis.PERM.name())
|| jsonRPCWebsocketOptionGroup.rpcWsApis.contains(RpcApis.PERM.name())) {
|| rpcWebsocketOptions.getRpcWsApis().contains(RpcApis.PERM.name())) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
@ -2799,9 +2654,9 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
private boolean anyPrivacyApiEnabled() {
return jsonRPCHttpOptionGroup.rpcHttpApis.contains(RpcApis.EEA.name())
|| jsonRPCWebsocketOptionGroup.rpcWsApis.contains(RpcApis.EEA.name())
|| rpcWebsocketOptions.getRpcWsApis().contains(RpcApis.EEA.name())
|| jsonRPCHttpOptionGroup.rpcHttpApis.contains(RpcApis.PRIV.name())
|| jsonRPCWebsocketOptionGroup.rpcWsApis.contains(RpcApis.PRIV.name());
|| rpcWebsocketOptions.getRpcWsApis().contains(RpcApis.PRIV.name());
}
private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) {
@ -2903,32 +2758,28 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
private MiningParameters getMiningParameters() {
if (miningParameters == null) {
final var miningParametersBuilder =
ImmutableMiningParameters.builder().from(miningOptions.toDomainObject());
final var actualGenesisOptions = getActualGenesisConfigOptions();
if (actualGenesisOptions.isPoa()) {
miningParametersBuilder.genesisBlockPeriodSeconds(
getGenesisBlockPeriodSeconds(actualGenesisOptions));
}
miningParameters = miningParametersBuilder.build();
miningOptions.setGenesisBlockPeriodSeconds(
getGenesisBlockPeriodSeconds(getActualGenesisConfigOptions()));
miningParameters = miningOptions.toDomainObject();
}
return miningParameters;
}
private int getGenesisBlockPeriodSeconds(final GenesisConfigOptions genesisConfigOptions) {
private OptionalInt getGenesisBlockPeriodSeconds(
final GenesisConfigOptions genesisConfigOptions) {
if (genesisConfigOptions.isClique()) {
return genesisConfigOptions.getCliqueConfigOptions().getBlockPeriodSeconds();
return OptionalInt.of(genesisConfigOptions.getCliqueConfigOptions().getBlockPeriodSeconds());
}
if (genesisConfigOptions.isIbft2()) {
return genesisConfigOptions.getBftConfigOptions().getBlockPeriodSeconds();
return OptionalInt.of(genesisConfigOptions.getBftConfigOptions().getBlockPeriodSeconds());
}
if (genesisConfigOptions.isQbft()) {
return genesisConfigOptions.getQbftConfigOptions().getBlockPeriodSeconds();
return OptionalInt.of(genesisConfigOptions.getQbftConfigOptions().getBlockPeriodSeconds());
}
throw new IllegalArgumentException("Should only be called for a PoA network");
return OptionalInt.empty();
}
private boolean isPruningEnabled() {
@ -3176,15 +3027,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
final String filename = jsonRPCWebsocketOptionGroup.rpcWsAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir()
+ System.getProperty("file.separator")
@ -3322,9 +3164,7 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
jsonRPCHttpOptionGroup.rpcHttpPort,
jsonRPCHttpOptionGroup.isRpcHttpEnabled);
addPortIfEnabled(
effectivePorts,
jsonRPCWebsocketOptionGroup.rpcWsPort,
jsonRPCWebsocketOptionGroup.isRpcWsEnabled);
effectivePorts, rpcWebsocketOptions.getRpcWsPort(), rpcWebsocketOptions.isRpcWsEnabled());
addPortIfEnabled(effectivePorts, engineRPCOptionGroup.engineRpcPort, isEngineApiEnabled());
addPortIfEnabled(
effectivePorts, metricsOptionGroup.metricsPort, metricsOptionGroup.isMetricsEnabled);
@ -3402,7 +3242,12 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
return genesisConfigOptions.getEcCurve();
}
private GenesisConfigOptions getActualGenesisConfigOptions() {
/**
* Return the genesis config options after applying any specified config overrides
*
* @return the genesis config options after applying any specified config overrides
*/
protected GenesisConfigOptions getActualGenesisConfigOptions() {
return Optional.ofNullable(genesisConfigOptions)
.orElseGet(
() ->
@ -3508,6 +3353,10 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
builder.setNetwork(network.normalize());
}
if (profile != null) {
builder.setProfile(profile.toString());
}
builder.setHasCustomGenesis(genesisFile != null);
if (genesisFile != null) {
builder.setCustomGenesis(genesisFile.getAbsolutePath());
@ -3543,12 +3392,12 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
builder.setHighSpecEnabled();
}
if (dataStorageOptions.toDomainObject().getUnstable().getBonsaiTrieLogPruningEnabled()) {
builder.setTrieLogPruningEnabled();
builder.setTrieLogRetentionThreshold(
dataStorageOptions.toDomainObject().getUnstable().getBonsaiTrieLogRetentionThreshold());
builder.setTrieLogPruningLimit(
dataStorageOptions.toDomainObject().getUnstable().getBonsaiTrieLogPruningLimit());
if (dataStorageOptions.toDomainObject().getUnstable().getBonsaiLimitTrieLogsEnabled()) {
builder.setLimitTrieLogsEnabled();
builder.setTrieLogRetentionLimit(
dataStorageOptions.toDomainObject().getBonsaiMaxLayersToLoad());
builder.setTrieLogsPruningWindowSize(
dataStorageOptions.toDomainObject().getUnstable().getBonsaiTrieLogPruningWindowSize());
}
builder.setTxPoolImplementation(buildTransactionPoolConfiguration().getTxPoolImplementation());

@ -41,6 +41,7 @@ public class ConfigurationOverviewBuilder {
private String network;
private BigInteger networkId;
private String profile;
private boolean hasCustomGenesis;
private String customGenesisFileName;
private String dataStorage;
@ -51,9 +52,9 @@ public class ConfigurationOverviewBuilder {
private Collection<String> engineApis;
private String engineJwtFilePath;
private boolean isHighSpec = false;
private boolean isTrieLogPruningEnabled = false;
private long trieLogRetentionThreshold = 0;
private Integer trieLogPruningLimit = null;
private boolean isBonsaiLimitTrieLogsEnabled = false;
private long trieLogRetentionLimit = 0;
private Integer trieLogsPruningWindowSize = null;
private TransactionPoolConfiguration.Implementation txPoolImplementation;
private EvmConfiguration.WorldUpdaterMode worldStateUpdateMode;
private Map<String, String> environment;
@ -88,6 +89,17 @@ public class ConfigurationOverviewBuilder {
return this;
}
/**
* Sets profile.
*
* @param profile the profile
* @return the profile
*/
public ConfigurationOverviewBuilder setProfile(final String profile) {
this.profile = profile;
return this;
}
/**
* Sets whether a custom genesis has been specified.
*
@ -187,34 +199,34 @@ public class ConfigurationOverviewBuilder {
}
/**
* Sets trie log pruning enabled
* Sets limit trie logs enabled
*
* @return the builder
*/
public ConfigurationOverviewBuilder setTrieLogPruningEnabled() {
isTrieLogPruningEnabled = true;
public ConfigurationOverviewBuilder setLimitTrieLogsEnabled() {
isBonsaiLimitTrieLogsEnabled = true;
return this;
}
/**
* Sets trie log retention threshold
* Sets trie log retention limit
*
* @param threshold the number of blocks to retain trie logs for
* @param limit the number of blocks to retain trie logs for
* @return the builder
*/
public ConfigurationOverviewBuilder setTrieLogRetentionThreshold(final long threshold) {
trieLogRetentionThreshold = threshold;
public ConfigurationOverviewBuilder setTrieLogRetentionLimit(final long limit) {
trieLogRetentionLimit = limit;
return this;
}
/**
* Sets trie log pruning limit
* Sets trie logs pruning window size
*
* @param limit the max number of blocks to load and prune trie logs for at startup
* @param size the max number of blocks to load and prune trie logs for at startup
* @return the builder
*/
public ConfigurationOverviewBuilder setTrieLogPruningLimit(final int limit) {
trieLogPruningLimit = limit;
public ConfigurationOverviewBuilder setTrieLogsPruningWindowSize(final int size) {
trieLogsPruningWindowSize = size;
return this;
}
@ -290,6 +302,10 @@ public class ConfigurationOverviewBuilder {
lines.add("Network Id: " + networkId);
}
if (profile != null) {
lines.add("Profile: " + profile);
}
if (dataStorage != null) {
lines.add("Data storage: " + dataStorage);
}
@ -323,13 +339,13 @@ public class ConfigurationOverviewBuilder {
lines.add("Using " + worldStateUpdateMode + " worldstate update mode");
if (isTrieLogPruningEnabled) {
if (isBonsaiLimitTrieLogsEnabled) {
final StringBuilder trieLogPruningString = new StringBuilder();
trieLogPruningString
.append("Trie log pruning enabled: retention: ")
.append(trieLogRetentionThreshold);
if (trieLogPruningLimit != null) {
trieLogPruningString.append("; prune limit: ").append(trieLogPruningLimit);
.append("Limit trie logs enabled: retention: ")
.append(trieLogRetentionLimit);
if (trieLogsPruningWindowSize != null) {
trieLogPruningString.append("; prune window: ").append(trieLogsPruningWindowSize);
}
lines.add(trieLogPruningString.toString());
}

@ -54,6 +54,10 @@ public interface DefaultCommandValues {
String MANDATORY_MODE_FORMAT_HELP = "<MODE>";
/** The constant MANDATORY_NETWORK_FORMAT_HELP. */
String MANDATORY_NETWORK_FORMAT_HELP = "<NETWORK>";
/** The constant PROFILE_OPTION_NAME. */
String PROFILE_OPTION_NAME = "--profile";
/** The constant PROFILE_FORMAT_HELP. */
String PROFILE_FORMAT_HELP = "<PROFILE>";
/** The constant MANDATORY_NODE_ID_FORMAT_HELP. */
String MANDATORY_NODE_ID_FORMAT_HELP = "<NODEID>";
/** The constant PERMISSIONING_CONFIG_LOCATION. */

@ -0,0 +1,41 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.config;
/** Enum for profile names. Each profile corresponds to a configuration file. */
public enum ProfileName {
/** The 'DEV' profile. Corresponds to the 'profiles/dev.toml' configuration file. */
DEV("profiles/dev.toml");
private final String configFile;
/**
* Constructs a new ProfileName.
*
* @param configFile the configuration file corresponding to the profile
*/
ProfileName(final String configFile) {
this.configFile = configFile;
}
/**
* Gets the configuration file corresponding to the profile.
*
* @return the configuration file
*/
public String getConfigFile() {
return configFile;
}
}

@ -40,6 +40,7 @@ import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.util.number.PositiveNumber;
import java.util.List;
import java.util.OptionalInt;
import org.apache.tuweni.bytes.Bytes;
import org.slf4j.Logger;
@ -188,6 +189,8 @@ public class MiningOptions implements CLIOptions<MiningParameters> {
DEFAULT_POS_BLOCK_CREATION_REPETITION_MIN_DURATION;
}
private OptionalInt maybeGenesisBlockPeriodSeconds;
private MiningOptions() {}
/**
@ -199,6 +202,16 @@ public class MiningOptions implements CLIOptions<MiningParameters> {
return new MiningOptions();
}
/**
* Set the optional genesis block period per seconds
*
* @param genesisBlockPeriodSeconds if the network is PoA then the block period in seconds
* specified in the genesis file, otherwise empty.
*/
public void setGenesisBlockPeriodSeconds(final OptionalInt genesisBlockPeriodSeconds) {
maybeGenesisBlockPeriodSeconds = genesisBlockPeriodSeconds;
}
/**
* Validate that there are no inconsistencies in the specified options. For example that the
* options are valid for the selected implementation.
@ -285,6 +298,7 @@ public class MiningOptions implements CLIOptions<MiningParameters> {
static MiningOptions fromConfig(final MiningParameters miningParameters) {
final MiningOptions miningOptions = MiningOptions.create();
miningOptions.setGenesisBlockPeriodSeconds(miningParameters.getGenesisBlockPeriodSeconds());
miningOptions.isMiningEnabled = miningParameters.isMiningEnabled();
miningOptions.iStratumMiningEnabled = miningParameters.isStratumMiningEnabled();
miningOptions.stratumNetworkInterface = miningParameters.getStratumNetworkInterface();
@ -319,6 +333,11 @@ public class MiningOptions implements CLIOptions<MiningParameters> {
@Override
public MiningParameters toDomainObject() {
if (maybeGenesisBlockPeriodSeconds == null) {
throw new IllegalStateException(
"genesisBlockPeriodSeconds must be set before using this object");
}
final var updatableInitValuesBuilder =
MutableInitValues.builder()
.isMiningEnabled(isMiningEnabled)
@ -334,27 +353,26 @@ public class MiningOptions implements CLIOptions<MiningParameters> {
updatableInitValuesBuilder.coinbase(coinbase);
}
final var miningParametersBuilder =
ImmutableMiningParameters.builder()
.mutableInitValues(updatableInitValuesBuilder.build())
.isStratumMiningEnabled(iStratumMiningEnabled)
.stratumNetworkInterface(stratumNetworkInterface)
.stratumPort(stratumPort)
.nonPoaBlockTxsSelectionMaxTime(nonPoaBlockTxsSelectionMaxTime)
.poaBlockTxsSelectionMaxTime(poaBlockTxsSelectionMaxTime)
.unstable(
ImmutableMiningParameters.Unstable.builder()
.remoteSealersLimit(unstableOptions.remoteSealersLimit)
.remoteSealersTimeToLive(unstableOptions.remoteSealersTimeToLive)
.powJobTimeToLive(unstableOptions.powJobTimeToLive)
.maxOmmerDepth(unstableOptions.maxOmmersDepth)
.stratumExtranonce(unstableOptions.stratumExtranonce)
.posBlockCreationMaxTime(unstableOptions.posBlockCreationMaxTime)
.posBlockCreationRepetitionMinDuration(
unstableOptions.posBlockCreationRepetitionMinDuration)
.build());
return miningParametersBuilder.build();
return ImmutableMiningParameters.builder()
.genesisBlockPeriodSeconds(maybeGenesisBlockPeriodSeconds)
.mutableInitValues(updatableInitValuesBuilder.build())
.isStratumMiningEnabled(iStratumMiningEnabled)
.stratumNetworkInterface(stratumNetworkInterface)
.stratumPort(stratumPort)
.nonPoaBlockTxsSelectionMaxTime(nonPoaBlockTxsSelectionMaxTime)
.poaBlockTxsSelectionMaxTime(poaBlockTxsSelectionMaxTime)
.unstable(
ImmutableMiningParameters.Unstable.builder()
.remoteSealersLimit(unstableOptions.remoteSealersLimit)
.remoteSealersTimeToLive(unstableOptions.remoteSealersTimeToLive)
.powJobTimeToLive(unstableOptions.powJobTimeToLive)
.maxOmmerDepth(unstableOptions.maxOmmersDepth)
.stratumExtranonce(unstableOptions.stratumExtranonce)
.posBlockCreationMaxTime(unstableOptions.posBlockCreationMaxTime)
.posBlockCreationRepetitionMinDuration(
unstableOptions.posBlockCreationRepetitionMinDuration)
.build())
.build();
}
@Override

@ -17,10 +17,9 @@
package org.hyperledger.besu.cli.options.stable;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_LIMIT;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_RETENTION_THRESHOLD;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_THRESHOLD;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
import org.hyperledger.besu.cli.options.CLIOptions;
import org.hyperledger.besu.cli.util.CommandLineUtils;
@ -39,7 +38,8 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
private static final String DATA_STORAGE_FORMAT = "--data-storage-format";
private static final String BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD =
/** The maximum number of historical layers to load. */
public static final String BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD =
"--bonsai-historical-block-limit";
// Use Bonsai DB
@ -54,34 +54,37 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
names = {BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD, "--bonsai-maximum-back-layers-to-load"},
paramLabel = "<LONG>",
description =
"Limit of historical layers that can be loaded with BONSAI (default: ${DEFAULT-VALUE}).",
"Limit of historical layers that can be loaded with BONSAI (default: ${DEFAULT-VALUE}). When using "
+ Unstable.BONSAI_LIMIT_TRIE_LOGS_ENABLED
+ " it will also be used as the number of layers of trie logs to retain.",
arity = "1")
private Long bonsaiMaxLayersToLoad = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
@CommandLine.ArgGroup(validate = false)
private final DataStorageOptions.Unstable unstableOptions = new Unstable();
static class Unstable {
/** The unstable options for data storage. */
public static class Unstable {
private static final String BONSAI_LIMIT_TRIE_LOGS_ENABLED =
"--Xbonsai-limit-trie-logs-enabled";
@CommandLine.Option(
hidden = true,
names = {"--Xbonsai-trie-log-pruning-enabled"},
description = "Enable trie log pruning. (default: ${DEFAULT-VALUE})")
private boolean bonsaiTrieLogPruningEnabled = DEFAULT_BONSAI_TRIE_LOG_PRUNING_ENABLED;
/** The bonsai trie logs pruning window size. */
public static final String BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE =
"--Xbonsai-trie-logs-pruning-window-size";
@CommandLine.Option(
hidden = true,
names = {"--Xbonsai-trie-log-retention-threshold"},
names = {BONSAI_LIMIT_TRIE_LOGS_ENABLED},
description =
"The number of blocks for which to retain trie logs. (default: ${DEFAULT-VALUE})")
private long bonsaiTrieLogRetentionThreshold = DEFAULT_BONSAI_TRIE_LOG_RETENTION_THRESHOLD;
"Limit the number of trie logs that are retained. (default: ${DEFAULT-VALUE})")
private boolean bonsaiLimitTrieLogsEnabled = DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
@CommandLine.Option(
hidden = true,
names = {"--Xbonsai-trie-log-pruning-limit"},
names = {BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE},
description =
"The max number of blocks to load and prune trie logs for at startup. (default: ${DEFAULT-VALUE})")
private int bonsaiTrieLogPruningLimit = DEFAULT_BONSAI_TRIE_LOG_PRUNING_LIMIT;
private int bonsaiTrieLogPruningWindowSize = DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
}
/**
* Create data storage options.
@ -98,21 +101,31 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
* @param commandLine the full commandLine to check all the options specified by the user
*/
public void validate(final CommandLine commandLine) {
if (unstableOptions.bonsaiTrieLogPruningEnabled) {
if (unstableOptions.bonsaiTrieLogRetentionThreshold
< MINIMUM_BONSAI_TRIE_LOG_RETENTION_THRESHOLD) {
if (unstableOptions.bonsaiLimitTrieLogsEnabled) {
if (bonsaiMaxLayersToLoad < MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD + " minimum value is %d",
MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT));
}
if (unstableOptions.bonsaiTrieLogPruningWindowSize <= 0) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
"--Xbonsai-trie-log-retention-threshold minimum value is %d",
MINIMUM_BONSAI_TRIE_LOG_RETENTION_THRESHOLD));
Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
unstableOptions.bonsaiTrieLogPruningWindowSize));
}
if (unstableOptions.bonsaiTrieLogPruningLimit <= 0) {
if (unstableOptions.bonsaiTrieLogPruningWindowSize <= bonsaiMaxLayersToLoad) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
"--Xbonsai-trie-log-pruning-limit=%d must be greater than 0",
unstableOptions.bonsaiTrieLogPruningLimit));
Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
+ "=%d must be greater than "
+ BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD
+ "=%d",
unstableOptions.bonsaiTrieLogPruningWindowSize,
bonsaiMaxLayersToLoad));
}
}
}
@ -121,12 +134,10 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
final DataStorageOptions dataStorageOptions = DataStorageOptions.create();
dataStorageOptions.dataStorageFormat = domainObject.getDataStorageFormat();
dataStorageOptions.bonsaiMaxLayersToLoad = domainObject.getBonsaiMaxLayersToLoad();
dataStorageOptions.unstableOptions.bonsaiTrieLogPruningEnabled =
domainObject.getUnstable().getBonsaiTrieLogPruningEnabled();
dataStorageOptions.unstableOptions.bonsaiTrieLogRetentionThreshold =
domainObject.getUnstable().getBonsaiTrieLogRetentionThreshold();
dataStorageOptions.unstableOptions.bonsaiTrieLogPruningLimit =
domainObject.getUnstable().getBonsaiTrieLogPruningLimit();
dataStorageOptions.unstableOptions.bonsaiLimitTrieLogsEnabled =
domainObject.getUnstable().getBonsaiLimitTrieLogsEnabled();
dataStorageOptions.unstableOptions.bonsaiTrieLogPruningWindowSize =
domainObject.getUnstable().getBonsaiTrieLogPruningWindowSize();
return dataStorageOptions;
}
@ -138,9 +149,8 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
.bonsaiMaxLayersToLoad(bonsaiMaxLayersToLoad)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiTrieLogPruningEnabled(unstableOptions.bonsaiTrieLogPruningEnabled)
.bonsaiTrieLogRetentionThreshold(unstableOptions.bonsaiTrieLogRetentionThreshold)
.bonsaiTrieLogPruningLimit(unstableOptions.bonsaiTrieLogPruningLimit)
.bonsaiLimitTrieLogsEnabled(unstableOptions.bonsaiLimitTrieLogsEnabled)
.bonsaiTrieLogPruningWindowSize(unstableOptions.bonsaiTrieLogPruningWindowSize)
.build())
.build();
}

@ -0,0 +1,266 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.options.stable;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.VALID_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import org.hyperledger.besu.cli.DefaultCommandValues;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.authentication.JwtAlgorithm;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import com.google.common.base.Strings;
import org.slf4j.Logger;
import picocli.CommandLine;
/** This class represents the WebSocket options for the RPC. */
public class RpcWebsocketOptions {
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-algorithm"},
description =
"Encryption algorithm used for Websockets JWT public key. Possible values are ${COMPLETION-CANDIDATES}"
+ " (default: ${DEFAULT-VALUE})",
arity = "1")
private final JwtAlgorithm rpcWebsocketsAuthenticationAlgorithm =
DefaultCommandValues.DEFAULT_JWT_ALGORITHM;
@CommandLine.Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-host"},
paramLabel = DefaultCommandValues.MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost;
@CommandLine.Option(
names = {"--rpc-ws-port"},
paramLabel = DefaultCommandValues.MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@CommandLine.Option(
names = {"--rpc-ws-max-frame-size"},
description =
"Maximum size in bytes for JSON-RPC WebSocket frames (default: ${DEFAULT-VALUE}). If this limit is exceeded, the websocket will be disconnected.",
arity = "1")
private final Integer rpcWsMaxFrameSize = DefaultCommandValues.DEFAULT_WS_MAX_FRAME_SIZE;
@CommandLine.Option(
names = {"--rpc-ws-max-active-connections"},
description =
"Maximum number of WebSocket connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcWsMaxConnections = DefaultCommandValues.DEFAULT_WS_MAX_CONNECTIONS;
@CommandLine.Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = " {0,1}, {0,1}",
arity = "1..*",
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<String> rpcWsApis = DEFAULT_RPC_APIS;
@CommandLine.Option(
names = {"--rpc-ws-api-methods-no-auth", "--rpc-ws-api-method-no-auth"},
paramLabel = "<api name>",
split = " {0,1}, {0,1}",
arity = "1..*",
description =
"Comma separated list of RPC methods to exclude from RPC authentication services, RPC WebSocket authentication must be enabled")
private final List<String> rpcWsApiMethodsNoAuth = new ArrayList<String>();
@CommandLine.Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-authentication-credentials-file"},
paramLabel = DefaultCommandValues.MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-public-key-file"},
paramLabel = DefaultCommandValues.MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC WebSocket authentication",
arity = "1")
private final File rpcWsAuthenticationPublicKeyFile = null;
/**
* Validates the WebSocket options.
*
* @param logger Logger instance
* @param commandLine CommandLine instance
* @param configuredApis Predicate for configured APIs
*/
public void validate(
final Logger logger, final CommandLine commandLine, final Predicate<String> configuredApis) {
checkOptionDependencies(logger, commandLine);
if (!rpcWsApis.stream().allMatch(configuredApis)) {
final List<String> invalidWsApis = new ArrayList<>(rpcWsApis);
invalidWsApis.removeAll(VALID_APIS);
throw new CommandLine.ParameterException(
commandLine,
"Invalid value for option '--rpc-ws-api': invalid entries found " + invalidWsApis);
}
final boolean validWsApiMethods =
rpcWsApiMethodsNoAuth.stream().allMatch(RpcMethod::rpcMethodExists);
if (!validWsApiMethods) {
throw new CommandLine.ParameterException(
commandLine,
"Invalid value for option '--rpc-ws-api-methods-no-auth', options must be valid RPC methods");
}
if (isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile(commandLine) == null
&& rpcWsAuthenticationPublicKeyFile == null) {
throw new CommandLine.ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
}
/**
* Checks the dependencies of the WebSocket options.
*
* @param logger Logger instance
* @param commandLine CommandLine instance
*/
private void checkOptionDependencies(final Logger logger, final CommandLine commandLine) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
List.of(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-api-method-no-auth",
"--rpc-ws-api-methods-no-auth",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-max-frame-size",
"--rpc-ws-max-active-connections",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file",
"--rpc-ws-authentication-jwt-algorithm"));
if (isRpcWsAuthenticationEnabled) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-authentication-public-key-file",
rpcWsAuthenticationPublicKeyFile == null,
List.of("--rpc-ws-authentication-jwt-algorithm"));
}
}
/**
* Creates a WebSocket configuration based on the WebSocket options.
*
* @param hostsAllowlist List of allowed hosts
* @param defaultHostAddress Default host address
* @param wsTimoutSec WebSocket timeout in seconds
* @return WebSocketConfiguration instance
*/
public WebSocketConfiguration webSocketConfiguration(
final List<String> hostsAllowlist, final String defaultHostAddress, final Long wsTimoutSec) {
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(
Strings.isNullOrEmpty(rpcWsHost) ? defaultHostAddress : rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setMaxFrameSize(rpcWsMaxFrameSize);
webSocketConfiguration.setMaxActiveConnections(rpcWsMaxConnections);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setRpcApisNoAuth(
rpcWsApiMethodsNoAuth.stream().distinct().collect(Collectors.toList()));
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile);
webSocketConfiguration.setHostsAllowlist(hostsAllowlist);
webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile);
webSocketConfiguration.setAuthenticationAlgorithm(rpcWebsocketsAuthenticationAlgorithm);
webSocketConfiguration.setTimeoutSec(wsTimoutSec);
return webSocketConfiguration;
}
/**
* Validates the authentication credentials file for the WebSocket.
*
* @param commandLine CommandLine instance
* @return Filename of the authentication credentials file
*/
private String rpcWsAuthenticationCredentialsFile(final CommandLine commandLine) {
final String filename = rpcWsAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
/**
* Returns the list of APIs for the WebSocket.
*
* @return List of APIs
*/
public List<String> getRpcWsApis() {
return rpcWsApis;
}
/**
* Checks if the WebSocket service is enabled.
*
* @return Boolean indicating if the WebSocket service is enabled
*/
public Boolean isRpcWsEnabled() {
return isRpcWsEnabled;
}
/**
* Returns the port for the WebSocket service.
*
* @return Port number
*/
public Integer getRpcWsPort() {
return rpcWsPort;
}
}

@ -37,7 +37,7 @@ public class NetworkingOptions implements CLIOptions<NetworkingConfiguration> {
private final String DNS_DISCOVERY_SERVER_OVERRIDE_FLAG = "--Xp2p-dns-discovery-server";
private final String DISCOVERY_PROTOCOL_V5_ENABLED = "--Xv5-discovery-enabled";
/** The constant FILTER_ON_ENR_FORK_ID. */
public static final String FILTER_ON_ENR_FORK_ID = "--Xfilter-on-enr-fork-id";
public static final String FILTER_ON_ENR_FORK_ID = "--filter-on-enr-fork-id";
@CommandLine.Option(
names = INITIATE_CONNECTIONS_FREQUENCY_FLAG,
@ -76,9 +76,9 @@ public class NetworkingOptions implements CLIOptions<NetworkingConfiguration> {
@CommandLine.Option(
names = FILTER_ON_ENR_FORK_ID,
hidden = true,
defaultValue = "false",
defaultValue = "true",
description = "Whether to enable filtering of peers based on the ENR field ForkId)")
private final Boolean filterOnEnrForkId = false;
private final Boolean filterOnEnrForkId = NetworkingConfiguration.DEFAULT_FILTER_ON_ENR_FORK_ID;
@CommandLine.Option(
hidden = true,

@ -19,7 +19,7 @@ import static org.hyperledger.besu.cli.subcommands.ValidateConfigSubCommand.COMM
import org.hyperledger.besu.cli.BesuCommand;
import org.hyperledger.besu.cli.DefaultCommandValues;
import org.hyperledger.besu.cli.util.TomlConfigFileDefaultProvider;
import org.hyperledger.besu.cli.util.TomlConfigurationDefaultProvider;
import org.hyperledger.besu.cli.util.VersionProvider;
import java.io.PrintWriter;
@ -69,7 +69,8 @@ public class ValidateConfigSubCommand implements Runnable {
public void run() {
checkNotNull(parentCommand);
try {
new TomlConfigFileDefaultProvider(commandLine, dataPath.toFile()).loadConfigurationFromFile();
TomlConfigurationDefaultProvider.fromFile(commandLine, dataPath.toFile())
.loadConfigurationFromFile();
} catch (Exception e) {
this.out.println(e);
return;

@ -16,13 +16,20 @@
package org.hyperledger.besu.cli.subcommands.storage;
import static com.google.common.base.Preconditions.checkArgument;
import static org.hyperledger.besu.cli.options.stable.DataStorageOptions.BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import org.hyperledger.besu.cli.options.stable.DataStorageOptions;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogFactoryImpl;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogLayer;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import java.io.File;
@ -32,6 +39,7 @@ import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.PrintWriter;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.IdentityHashMap;
@ -39,6 +47,8 @@ import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.annotations.VisibleForTesting;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -50,7 +60,7 @@ public class TrieLogHelper {
private static final int ROCKSDB_MAX_INSERTS_PER_TRANSACTION = 1000;
private static final Logger LOG = LoggerFactory.getLogger(TrieLogHelper.class);
static void prune(
void prune(
final DataStorageConfiguration config,
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final MutableBlockchain blockchain,
@ -60,17 +70,22 @@ public class TrieLogHelper {
validatePruneConfiguration(config);
final long layersToRetain = config.getUnstable().getBonsaiTrieLogRetentionThreshold();
final long layersToRetain = config.getBonsaiMaxLayersToLoad();
final long chainHeight = blockchain.getChainHeadBlockNumber();
final long lastBlockNumberToRetainTrieLogsFor = chainHeight - layersToRetain + 1;
if (!validPruneRequirements(blockchain, chainHeight, lastBlockNumberToRetainTrieLogsFor)) {
if (!validatePruneRequirements(
blockchain,
chainHeight,
lastBlockNumberToRetainTrieLogsFor,
rootWorldStateStorage,
layersToRetain)) {
return;
}
final long numberOfBatches = calculateNumberofBatches(layersToRetain);
final long numberOfBatches = calculateNumberOfBatches(layersToRetain);
processTrieLogBatches(
rootWorldStateStorage,
@ -80,15 +95,27 @@ public class TrieLogHelper {
numberOfBatches,
batchFileNameBase);
if (rootWorldStateStorage.streamTrieLogKeys(layersToRetain).count() == layersToRetain) {
deleteFiles(batchFileNameBase, numberOfBatches);
LOG.info("Prune ran successfully. Enjoy some disk space back! \uD83D\uDE80");
// Should only be layersToRetain left but loading extra just in case of an unforeseen bug
final long countAfterPrune =
rootWorldStateStorage
.streamTrieLogKeys(layersToRetain + DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE)
.count();
if (countAfterPrune == layersToRetain) {
if (deleteFiles(batchFileNameBase, numberOfBatches)) {
LOG.info("Prune ran successfully. Enjoy some disk space back! \uD83D\uDE80");
} else {
throw new IllegalStateException(
"There was an error deleting the trie log backup files. Please ensure besu is working before deleting them manually.");
}
} else {
LOG.error("Prune failed. Re-run the subcommand to load the trie logs from file.");
throw new IllegalStateException(
String.format(
"Remaining trie logs (%d) did not match %s (%d). Trie logs backup files have not been deleted, it is safe to rerun the subcommand.",
countAfterPrune, BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD, layersToRetain));
}
}
private static void processTrieLogBatches(
private void processTrieLogBatches(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final MutableBlockchain blockchain,
final long chainHeight,
@ -97,16 +124,15 @@ public class TrieLogHelper {
final String batchFileNameBase) {
for (long batchNumber = 1; batchNumber <= numberOfBatches; batchNumber++) {
final String batchFileName = batchFileNameBase + "-" + batchNumber;
final long firstBlockOfBatch = chainHeight - ((batchNumber - 1) * BATCH_SIZE);
final long lastBlockOfBatch =
Math.max(chainHeight - (batchNumber * BATCH_SIZE), lastBlockNumberToRetainTrieLogsFor);
final List<Hash> trieLogKeys =
getTrieLogKeysForBlocks(blockchain, firstBlockOfBatch, lastBlockOfBatch);
saveTrieLogBatches(batchFileNameBase, rootWorldStateStorage, batchNumber, trieLogKeys);
LOG.info("Saving trie logs to retain in file (batch {})...", batchNumber);
saveTrieLogBatches(batchFileName, rootWorldStateStorage, trieLogKeys);
}
LOG.info("Clear trie logs...");
@ -117,23 +143,20 @@ public class TrieLogHelper {
}
}
private static void saveTrieLogBatches(
final String batchFileNameBase,
private void saveTrieLogBatches(
final String batchFileName,
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final long batchNumber,
final List<Hash> trieLogKeys) {
LOG.info("Saving trie logs to retain in file (batch {})...", batchNumber);
try {
saveTrieLogsInFile(trieLogKeys, rootWorldStateStorage, batchNumber, batchFileNameBase);
saveTrieLogsInFile(trieLogKeys, rootWorldStateStorage, batchFileName);
} catch (IOException e) {
LOG.error("Error saving trie logs to file: {}", e.getMessage());
throw new RuntimeException(e);
}
}
private static void restoreTrieLogBatches(
private void restoreTrieLogBatches(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final long batchNumber,
final String batchFileNameBase) {
@ -147,19 +170,25 @@ public class TrieLogHelper {
}
}
private static void deleteFiles(final String batchFileNameBase, final long numberOfBatches) {
private boolean deleteFiles(final String batchFileNameBase, final long numberOfBatches) {
LOG.info("Deleting files...");
for (long batchNumber = 1; batchNumber <= numberOfBatches; batchNumber++) {
File file = new File(batchFileNameBase + "-" + batchNumber);
if (file.exists()) {
file.delete();
try {
for (long batchNumber = 1; batchNumber <= numberOfBatches; batchNumber++) {
File file = new File(batchFileNameBase + "-" + batchNumber);
if (file.exists()) {
file.delete();
}
}
return true;
} catch (Exception e) {
LOG.error("Error deleting files", e);
return false;
}
}
private static List<Hash> getTrieLogKeysForBlocks(
private List<Hash> getTrieLogKeysForBlocks(
final MutableBlockchain blockchain,
final long firstBlockOfBatch,
final long lastBlockOfBatch) {
@ -173,14 +202,17 @@ public class TrieLogHelper {
return trieLogKeys;
}
private static long calculateNumberofBatches(final long layersToRetain) {
private long calculateNumberOfBatches(final long layersToRetain) {
return layersToRetain / BATCH_SIZE + ((layersToRetain % BATCH_SIZE == 0) ? 0 : 1);
}
private static boolean validPruneRequirements(
private boolean validatePruneRequirements(
final MutableBlockchain blockchain,
final long chainHeight,
final long lastBlockNumberToRetainTrieLogsFor) {
final long lastBlockNumberToRetainTrieLogsFor,
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final long layersToRetain) {
if (lastBlockNumberToRetainTrieLogsFor < 0) {
throw new IllegalArgumentException(
"Trying to retain more trie logs than chain length ("
@ -188,6 +220,19 @@ public class TrieLogHelper {
+ "), skipping pruning");
}
// Need to ensure we're loading at least layersToRetain if they exist
// plus extra threshold to account forks and orphans
final long clampedCountBeforePruning =
rootWorldStateStorage
.streamTrieLogKeys(layersToRetain + DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE)
.count();
if (clampedCountBeforePruning < layersToRetain) {
throw new IllegalArgumentException(
String.format(
"Trie log count (%d) is less than retention limit (%d), skipping pruning",
clampedCountBeforePruning, layersToRetain));
}
final Optional<Hash> finalizedBlockHash = blockchain.getFinalized();
if (finalizedBlockHash.isEmpty()) {
@ -204,15 +249,14 @@ public class TrieLogHelper {
return true;
}
private static void recreateTrieLogs(
private void recreateTrieLogs(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final long batchNumber,
final String batchFileNameBase)
throws IOException {
// process in chunk to avoid OOM
IdentityHashMap<byte[], byte[]> trieLogsToRetain =
readTrieLogsFromFile(batchFileNameBase, batchNumber);
final String batchFileName = batchFileNameBase + "-" + batchNumber;
IdentityHashMap<byte[], byte[]> trieLogsToRetain = readTrieLogsFromFile(batchFileName);
final int chunkSize = ROCKSDB_MAX_INSERTS_PER_TRANSACTION;
List<byte[]> keys = new ArrayList<>(trieLogsToRetain.keySet());
@ -221,7 +265,7 @@ public class TrieLogHelper {
}
}
private static void processTransactionChunk(
private void processTransactionChunk(
final int startIndex,
final int chunkSize,
final List<byte[]> keys,
@ -241,35 +285,39 @@ public class TrieLogHelper {
updater.getTrieLogStorageTransaction().commit();
}
private static void validatePruneConfiguration(final DataStorageConfiguration config) {
@VisibleForTesting
void validatePruneConfiguration(final DataStorageConfiguration config) {
checkArgument(
config.getUnstable().getBonsaiTrieLogRetentionThreshold()
>= config.getBonsaiMaxLayersToLoad(),
config.getBonsaiMaxLayersToLoad()
>= DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT,
String.format(
"--Xbonsai-trie-log-retention-threshold minimum value is %d",
config.getBonsaiMaxLayersToLoad()));
BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD + " minimum value is %d",
DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT));
checkArgument(
config.getUnstable().getBonsaiTrieLogPruningLimit() > 0,
config.getUnstable().getBonsaiTrieLogPruningWindowSize() > 0,
String.format(
"--Xbonsai-trie-log-pruning-limit=%d must be greater than 0",
config.getUnstable().getBonsaiTrieLogPruningLimit()));
DataStorageOptions.Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
+ "=%d must be greater than 0",
config.getUnstable().getBonsaiTrieLogPruningWindowSize()));
checkArgument(
config.getUnstable().getBonsaiTrieLogPruningLimit()
> config.getUnstable().getBonsaiTrieLogRetentionThreshold(),
config.getUnstable().getBonsaiTrieLogPruningWindowSize()
> config.getBonsaiMaxLayersToLoad(),
String.format(
"--Xbonsai-trie-log-pruning-limit=%d must greater than --Xbonsai-trie-log-retention-threshold=%d",
config.getUnstable().getBonsaiTrieLogPruningLimit(),
config.getUnstable().getBonsaiTrieLogRetentionThreshold()));
DataStorageOptions.Unstable.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
+ "=%d must be greater than "
+ BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD
+ "=%d",
config.getUnstable().getBonsaiTrieLogPruningWindowSize(),
config.getBonsaiMaxLayersToLoad()));
}
private static void saveTrieLogsInFile(
private void saveTrieLogsInFile(
final List<Hash> trieLogsKeys,
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final long batchNumber,
final String batchFileNameBase)
final String batchFileName)
throws IOException {
File file = new File(batchFileNameBase + "-" + batchNumber);
File file = new File(batchFileName);
if (file.exists()) {
LOG.error("File already exists, skipping file creation");
return;
@ -285,17 +333,14 @@ public class TrieLogHelper {
}
@SuppressWarnings("unchecked")
private static IdentityHashMap<byte[], byte[]> readTrieLogsFromFile(
final String batchFileNameBase, final long batchNumber) {
IdentityHashMap<byte[], byte[]> readTrieLogsFromFile(final String batchFileName) {
IdentityHashMap<byte[], byte[]> trieLogs;
try (FileInputStream fis = new FileInputStream(batchFileNameBase + "-" + batchNumber);
try (FileInputStream fis = new FileInputStream(batchFileName);
ObjectInputStream ois = new ObjectInputStream(fis)) {
trieLogs = (IdentityHashMap<byte[], byte[]>) ois.readObject();
} catch (IOException | ClassNotFoundException e) {
LOG.error(e.getMessage());
throw new RuntimeException(e);
}
@ -303,7 +348,53 @@ public class TrieLogHelper {
return trieLogs;
}
private static IdentityHashMap<byte[], byte[]> getTrieLogs(
private void saveTrieLogsAsRlpInFile(
final List<Hash> trieLogsKeys,
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final String batchFileName) {
File file = new File(batchFileName);
if (file.exists()) {
LOG.error("File already exists, skipping file creation");
return;
}
final IdentityHashMap<byte[], byte[]> trieLogs =
getTrieLogs(trieLogsKeys, rootWorldStateStorage);
final Bytes rlp =
RLP.encode(
o ->
o.writeList(
trieLogs.entrySet(), (val, out) -> out.writeRaw(Bytes.wrap(val.getValue()))));
try {
Files.write(file.toPath(), rlp.toArrayUnsafe());
} catch (IOException e) {
LOG.error(e.getMessage());
throw new RuntimeException(e);
}
}
IdentityHashMap<byte[], byte[]> readTrieLogsAsRlpFromFile(final String batchFileName) {
try {
final Bytes file = Bytes.wrap(Files.readAllBytes(Path.of(batchFileName)));
final BytesValueRLPInput input = new BytesValueRLPInput(file, false);
input.enterList();
final IdentityHashMap<byte[], byte[]> trieLogs = new IdentityHashMap<>();
while (!input.isEndOfCurrentList()) {
final Bytes trieLogBytes = input.currentListAsBytes();
TrieLogLayer trieLogLayer =
TrieLogFactoryImpl.readFrom(new BytesValueRLPInput(Bytes.wrap(trieLogBytes), false));
trieLogs.put(trieLogLayer.getBlockHash().toArrayUnsafe(), trieLogBytes.toArrayUnsafe());
}
input.leaveList();
return trieLogs;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private IdentityHashMap<byte[], byte[]> getTrieLogs(
final List<Hash> trieLogKeys, final BonsaiWorldStateKeyValueStorage rootWorldStateStorage) {
IdentityHashMap<byte[], byte[]> trieLogsToRetain = new IdentityHashMap<>();
@ -316,7 +407,7 @@ public class TrieLogHelper {
return trieLogsToRetain;
}
static TrieLogCount getCount(
TrieLogCount getCount(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final int limit,
final Blockchain blockchain) {
@ -351,11 +442,31 @@ public class TrieLogHelper {
return new TrieLogCount(total.get(), canonicalCount.get(), forkCount.get(), orphanCount.get());
}
static void printCount(final PrintWriter out, final TrieLogCount count) {
void printCount(final PrintWriter out, final TrieLogCount count) {
out.printf(
"trieLog count: %s\n - canonical count: %s\n - fork count: %s\n - orphaned count: %s\n",
count.total, count.canonicalCount, count.forkCount, count.orphanCount);
}
void importTrieLog(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage, final Path trieLogFilePath) {
var trieLog = readTrieLogsAsRlpFromFile(trieLogFilePath.toString());
var updater = rootWorldStateStorage.updater();
trieLog.forEach((key, value) -> updater.getTrieLogStorageTransaction().put(key, value));
updater.getTrieLogStorageTransaction().commit();
}
void exportTrieLog(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final List<Hash> trieLogHash,
final Path directoryPath)
throws IOException {
final String trieLogFile = directoryPath.toString();
saveTrieLogsAsRlpInFile(trieLogHash, rootWorldStateStorage, trieLogFile);
}
record TrieLogCount(int total, int canonicalCount, int forkCount, int orphanCount) {}
}

@ -19,6 +19,7 @@ import static com.google.common.base.Preconditions.checkNotNull;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
@ -26,9 +27,11 @@ import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import java.io.IOException;
import java.io.PrintWriter;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.core.config.Configurator;
@ -43,7 +46,12 @@ import picocli.CommandLine.ParentCommand;
description = "Manipulate trie logs",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
subcommands = {TrieLogSubCommand.CountTrieLog.class, TrieLogSubCommand.PruneTrieLog.class})
subcommands = {
TrieLogSubCommand.CountTrieLog.class,
TrieLogSubCommand.PruneTrieLog.class,
TrieLogSubCommand.ExportTrieLog.class,
TrieLogSubCommand.ImportTrieLog.class
})
public class TrieLogSubCommand implements Runnable {
@SuppressWarnings("UnusedVariable")
@ -81,14 +89,15 @@ public class TrieLogSubCommand implements Runnable {
@Override
public void run() {
TrieLogContext context = getTrieLogContext();
final TrieLogContext context = getTrieLogContext();
final PrintWriter out = spec.commandLine().getOut();
out.println("Counting trie logs...");
TrieLogHelper.printCount(
final TrieLogHelper trieLogHelper = new TrieLogHelper();
trieLogHelper.printCount(
out,
TrieLogHelper.getCount(
trieLogHelper.getCount(
context.rootWorldStateStorage, Integer.MAX_VALUE, context.blockchain));
}
}
@ -96,7 +105,7 @@ public class TrieLogSubCommand implements Runnable {
@Command(
name = "prune",
description =
"This command prunes all trie log layers below the retention threshold, including orphaned trie logs.",
"This command prunes all trie log layers below the retention limit, including orphaned trie logs.",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class)
static class PruneTrieLog implements Runnable {
@ -111,11 +120,12 @@ public class TrieLogSubCommand implements Runnable {
@Override
public void run() {
TrieLogContext context = getTrieLogContext();
final TrieLogContext context = getTrieLogContext();
final Path dataDirectoryPath =
Paths.get(
TrieLogSubCommand.parentCommand.parentCommand.dataDir().toAbsolutePath().toString());
TrieLogHelper.prune(
final TrieLogHelper trieLogHelper = new TrieLogHelper();
trieLogHelper.prune(
context.config(),
context.rootWorldStateStorage(),
context.blockchain(),
@ -123,6 +133,105 @@ public class TrieLogSubCommand implements Runnable {
}
}
@Command(
name = "export",
description = "This command exports the trie log of a determined block to a binary file",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class)
static class ExportTrieLog implements Runnable {
@SuppressWarnings("unused")
@ParentCommand
private TrieLogSubCommand parentCommand;
@SuppressWarnings("unused")
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec; // Picocli injects reference to command spec
@SuppressWarnings("unused")
@CommandLine.Option(
names = "--trie-log-block-hash",
description =
"Comma separated list of hashes from the blocks you want to export the trie logs of",
split = " {0,1}, {0,1}",
arity = "1..*")
private List<String> trieLogBlockHashList;
@CommandLine.Option(
names = "--trie-log-file-path",
description = "The file you want to export the trie logs to",
arity = "1..1")
private Path trieLogFilePath = null;
@Override
public void run() {
if (trieLogFilePath == null) {
trieLogFilePath =
Paths.get(
TrieLogSubCommand.parentCommand
.parentCommand
.dataDir()
.resolve("trie-logs.bin")
.toAbsolutePath()
.toString());
}
final TrieLogContext context = getTrieLogContext();
final List<Hash> listOfBlockHashes =
trieLogBlockHashList.stream().map(Hash::fromHexString).toList();
final TrieLogHelper trieLogHelper = new TrieLogHelper();
try {
trieLogHelper.exportTrieLog(
context.rootWorldStateStorage(), listOfBlockHashes, trieLogFilePath);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
@Command(
name = "import",
description = "This command imports a trie log exported by another besu node",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class)
static class ImportTrieLog implements Runnable {
@SuppressWarnings("unused")
@ParentCommand
private TrieLogSubCommand parentCommand;
@SuppressWarnings("unused")
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec; // Picocli injects reference to command spec
@CommandLine.Option(
names = "--trie-log-file-path",
description = "The file you want to import the trie logs from",
arity = "1..1")
private Path trieLogFilePath = null;
@Override
public void run() {
if (trieLogFilePath == null) {
trieLogFilePath =
Paths.get(
TrieLogSubCommand.parentCommand
.parentCommand
.dataDir()
.resolve("trie-logs.bin")
.toAbsolutePath()
.toString());
}
TrieLogContext context = getTrieLogContext();
final TrieLogHelper trieLogHelper = new TrieLogHelper();
trieLogHelper.importTrieLog(context.rootWorldStateStorage(), trieLogFilePath);
}
}
record TrieLogContext(
DataStorageConfiguration config,
BonsaiWorldStateKeyValueStorage rootWorldStateStorage,

@ -0,0 +1,125 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.util;
import java.util.Map;
import java.util.Optional;
import picocli.CommandLine;
/**
* Abstract class for finding configuration resources. This class provides a common structure for
* classes that need to find configuration resources based on command line options and environment
* variables.
*
* @param <T> the type of configuration resource this finder will return
*/
public abstract class AbstractConfigurationFinder<T> {
/**
* Returns the name of the configuration option.
*
* @return the name of the configuration option
*/
protected abstract String getConfigOptionName();
/**
* Returns the name of the environment variable for the configuration.
*
* @return the name of the environment variable for the configuration
*/
protected abstract String getConfigEnvName();
/**
* Finds the configuration resource based on command line options and environment variables.
*
* @param environment the environment variables
* @param parseResult the command line parse result
* @return an Optional containing the configuration resource, or an empty Optional if no
* configuration resource was found
*/
public Optional<T> findConfiguration(
final Map<String, String> environment, final CommandLine.ParseResult parseResult) {
final CommandLine commandLine = parseResult.commandSpec().commandLine();
if (isConfigSpecifiedInBothSources(environment, parseResult)) {
throwExceptionForBothSourcesSpecified(environment, parseResult, commandLine);
}
if (parseResult.hasMatchedOption(getConfigOptionName())) {
return getFromOption(parseResult, commandLine);
}
if (environment.containsKey(getConfigEnvName())) {
return getFromEnvironment(environment, commandLine);
}
return Optional.empty();
}
/**
* Gets the configuration resource from the command line option.
*
* @param parseResult the command line parse result
* @param commandLine the command line
* @return an Optional containing the configuration resource, or an empty Optional if the
* configuration resource was not specified in the command line option
*/
protected abstract Optional<T> getFromOption(
final CommandLine.ParseResult parseResult, final CommandLine commandLine);
/**
* Gets the configuration resource from the environment variable.
*
* @param environment the environment variables
* @param commandLine the command line
* @return an Optional containing the configuration resource, or an empty Optional if the
* configuration resource was not specified in the environment variable
*/
protected abstract Optional<T> getFromEnvironment(
final Map<String, String> environment, final CommandLine commandLine);
/**
* Checks if the configuration resource is specified in both command line options and environment
* variables.
*
* @param environment the environment variables
* @param parseResult the command line parse result
* @return true if the configuration resource is specified in both places, false otherwise
*/
public boolean isConfigSpecifiedInBothSources(
final Map<String, String> environment, final CommandLine.ParseResult parseResult) {
return parseResult.hasMatchedOption(getConfigOptionName())
&& environment.containsKey(getConfigEnvName());
}
/**
* Throws an exception if the configuration resource is specified in both command line options and
* environment variables.
*
* @param environment the environment variables
* @param parseResult the command line parse result
* @param commandLine the command line
*/
public void throwExceptionForBothSourcesSpecified(
final Map<String, String> environment,
final CommandLine.ParseResult parseResult,
final CommandLine commandLine) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
"Both %s=%s and %s %s specified. Please specify only one.",
getConfigEnvName(),
getConfigOptionName(),
environment.get(getConfigEnvName()),
parseResult.matchedOption(getConfigOptionName()).stringValues()));
}
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.cli.util;
import static java.util.Arrays.asList;
import java.util.List;
import picocli.CommandLine.IDefaultValueProvider;
@ -34,8 +32,8 @@ public class CascadingDefaultProvider implements IDefaultValueProvider {
*
* @param defaultValueProviders List of default value providers
*/
public CascadingDefaultProvider(final IDefaultValueProvider... defaultValueProviders) {
this.defaultValueProviders = asList(defaultValueProviders);
public CascadingDefaultProvider(final List<IDefaultValueProvider> defaultValueProviders) {
this.defaultValueProviders = defaultValueProviders;
}
@Override

@ -0,0 +1,100 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.util;
import static org.hyperledger.besu.cli.DefaultCommandValues.CONFIG_FILE_OPTION_NAME;
import java.io.File;
import java.util.Map;
import java.util.Optional;
import picocli.CommandLine;
/**
* Class for finding configuration files. This class extends the AbstractConfigurationFinder and
* provides methods for finding configuration files based on command line options and environment
* variables.
*/
public class ConfigFileFinder extends AbstractConfigurationFinder<File> {
private static final String CONFIG_FILE_ENV_NAME = "BESU_CONFIG_FILE";
/**
* Returns the name of the configuration option.
*
* @return the name of the configuration option
*/
@Override
protected String getConfigOptionName() {
return CONFIG_FILE_OPTION_NAME;
}
/**
* Returns the name of the environment variable for the configuration.
*
* @return the name of the environment variable for the configuration
*/
@Override
protected String getConfigEnvName() {
return CONFIG_FILE_ENV_NAME;
}
/**
* Gets the configuration file from the command line option.
*
* @param parseResult the command line parse result
* @param commandLine the command line
* @return an Optional containing the configuration file, or an empty Optional if the
* configuration file was not specified in the command line option
*/
@Override
public Optional<File> getFromOption(
final CommandLine.ParseResult parseResult, final CommandLine commandLine) {
final CommandLine.Model.OptionSpec configFileOption =
parseResult.matchedOption(CONFIG_FILE_OPTION_NAME);
try {
File file = configFileOption.getter().get();
if (!file.exists()) {
throw new CommandLine.ParameterException(
commandLine,
String.format("Unable to read TOML configuration, file not found: %s", file));
}
return Optional.of(file);
} catch (final Exception e) {
throw new CommandLine.ParameterException(commandLine, e.getMessage(), e);
}
}
/**
* Gets the configuration file from the environment variable.
*
* @param environment the environment variables
* @param commandLine the command line
* @return an Optional containing the configuration file, or an empty Optional if the
* configuration file was not specified in the environment variable
*/
@Override
public Optional<File> getFromEnvironment(
final Map<String, String> environment, final CommandLine commandLine) {
final File toml = new File(environment.get(CONFIG_FILE_ENV_NAME));
if (!toml.exists()) {
throw new CommandLine.ParameterException(
commandLine,
String.format(
"TOML file %s specified in environment variable %s not found",
CONFIG_FILE_ENV_NAME, environment.get(CONFIG_FILE_ENV_NAME)));
}
return Optional.of(toml);
}
}

@ -15,6 +15,7 @@
package org.hyperledger.besu.cli.util;
import java.io.File;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -25,7 +26,6 @@ import picocli.CommandLine;
import picocli.CommandLine.IDefaultValueProvider;
import picocli.CommandLine.IExecutionStrategy;
import picocli.CommandLine.IParameterExceptionHandler;
import picocli.CommandLine.Model.OptionSpec;
import picocli.CommandLine.ParameterException;
import picocli.CommandLine.ParseResult;
@ -54,8 +54,13 @@ public class ConfigOptionSearchAndRunHandler extends CommandLine.RunLast {
@Override
public List<Object> handle(final ParseResult parseResult) throws ParameterException {
final CommandLine commandLine = parseResult.commandSpec().commandLine();
final Optional<File> configFile = findConfigFile(parseResult, commandLine);
commandLine.setDefaultValueProvider(createDefaultValueProvider(commandLine, configFile));
commandLine.setDefaultValueProvider(
createDefaultValueProvider(
commandLine,
new ConfigFileFinder().findConfiguration(environment, parseResult),
new ProfileFinder().findConfiguration(environment, parseResult)));
commandLine.setExecutionStrategy(resultHandler);
commandLine.setParameterExceptionHandler(parameterExceptionHandler);
commandLine.execute(parseResult.originalArgs().toArray(new String[0]));
@ -63,38 +68,6 @@ public class ConfigOptionSearchAndRunHandler extends CommandLine.RunLast {
return new ArrayList<>();
}
private Optional<File> findConfigFile(
final ParseResult parseResult, final CommandLine commandLine) {
if (parseResult.hasMatchedOption("--config-file")
&& environment.containsKey("BESU_CONFIG_FILE")) {
throw new ParameterException(
commandLine,
String.format(
"TOML file specified using BESU_CONFIG_FILE=%s and --config-file %s",
environment.get("BESU_CONFIG_FILE"),
parseResult.matchedOption("--config-file").stringValues()));
} else if (parseResult.hasMatchedOption("--config-file")) {
final OptionSpec configFileOption = parseResult.matchedOption("--config-file");
try {
return Optional.of(configFileOption.getter().get());
} catch (final Exception e) {
throw new ParameterException(commandLine, e.getMessage(), e);
}
} else if (environment.containsKey("BESU_CONFIG_FILE")) {
final File toml = new File(environment.get("BESU_CONFIG_FILE"));
if (!toml.exists()) {
throw new ParameterException(
commandLine,
String.format(
"TOML file %s specified in environment variable BESU_CONFIG_FILE not found",
environment.get("BESU_CONFIG_FILE")));
}
return Optional.of(toml);
}
return Optional.empty();
}
/**
* Create default value provider default value provider.
*
@ -104,14 +77,22 @@ public class ConfigOptionSearchAndRunHandler extends CommandLine.RunLast {
*/
@VisibleForTesting
IDefaultValueProvider createDefaultValueProvider(
final CommandLine commandLine, final Optional<File> configFile) {
if (configFile.isPresent()) {
return new CascadingDefaultProvider(
new EnvironmentVariableDefaultProvider(environment),
new TomlConfigFileDefaultProvider(commandLine, configFile.get()));
} else {
return new EnvironmentVariableDefaultProvider(environment);
}
final CommandLine commandLine,
final Optional<File> configFile,
final Optional<InputStream> profile) {
List<IDefaultValueProvider> providers = new ArrayList<>();
providers.add(new EnvironmentVariableDefaultProvider(environment));
configFile.ifPresent(
config -> {
if (config.exists()) {
providers.add(TomlConfigurationDefaultProvider.fromFile(commandLine, config));
}
});
profile.ifPresent(
p -> providers.add(TomlConfigurationDefaultProvider.fromInputStream(commandLine, p)));
return new CascadingDefaultProvider(providers);
}
@Override

@ -0,0 +1,76 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.util;
import static org.hyperledger.besu.cli.DefaultCommandValues.PROFILE_OPTION_NAME;
import org.hyperledger.besu.cli.config.ProfileName;
import java.io.InputStream;
import java.util.Map;
import java.util.Optional;
import picocli.CommandLine;
/**
* Class for finding profile configurations. This class extends the AbstractConfigurationFinder and
* provides methods for finding profile configurations based on command line options and environment
* variables. Each profile corresponds to a TOML configuration file that contains settings for
* various options. The profile to use can be specified with the '--profile' command line option or
* the 'BESU_PROFILE' environment variable.
*/
public class ProfileFinder extends AbstractConfigurationFinder<InputStream> {
private static final String PROFILE_ENV_NAME = "BESU_PROFILE";
@Override
protected String getConfigOptionName() {
return PROFILE_OPTION_NAME;
}
@Override
protected String getConfigEnvName() {
return PROFILE_ENV_NAME;
}
@Override
public Optional<InputStream> getFromOption(
final CommandLine.ParseResult parseResult, final CommandLine commandLine) {
try {
return getProfile(parseResult.matchedOption(PROFILE_OPTION_NAME).getter().get(), commandLine);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Optional<InputStream> getFromEnvironment(
final Map<String, String> environment, final CommandLine commandLine) {
return getProfile(ProfileName.valueOf(environment.get(PROFILE_ENV_NAME)), commandLine);
}
private static Optional<InputStream> getProfile(
final ProfileName profileName, final CommandLine commandLine) {
return Optional.of(getTomlFile(commandLine, profileName.getConfigFile()));
}
private static InputStream getTomlFile(final CommandLine commandLine, final String file) {
InputStream resourceUrl = ProfileFinder.class.getClassLoader().getResourceAsStream(file);
if (resourceUrl == null) {
throw new CommandLine.ParameterException(
commandLine, String.format("TOML file %s not found", file));
}
return resourceUrl;
}
}

@ -17,9 +17,13 @@ package org.hyperledger.besu.cli.util;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.Percentage;
import org.hyperledger.besu.util.number.PositiveNumber;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
@ -41,21 +45,52 @@ import picocli.CommandLine.Model.OptionSpec;
import picocli.CommandLine.ParameterException;
/** The Toml config file default value provider used by PicoCli. */
public class TomlConfigFileDefaultProvider implements IDefaultValueProvider {
public class TomlConfigurationDefaultProvider implements IDefaultValueProvider {
private final CommandLine commandLine;
private final File configFile;
private final InputStream configurationInputStream;
private TomlParseResult result;
/**
* Instantiates a new Toml config file default value provider.
*
* @param commandLine the command line
* @param configFile the config file
* @param configurationInputStream the input stream
*/
public TomlConfigFileDefaultProvider(final CommandLine commandLine, final File configFile) {
private TomlConfigurationDefaultProvider(
final CommandLine commandLine, final InputStream configurationInputStream) {
this.commandLine = commandLine;
this.configFile = configFile;
this.configurationInputStream = configurationInputStream;
}
/**
* Creates a new TomlConfigurationDefaultProvider from a file.
*
* @param commandLine the command line
* @param configFile the configuration file
* @return a new TomlConfigurationDefaultProvider
* @throws ParameterException if the configuration file is not found
*/
public static TomlConfigurationDefaultProvider fromFile(
final CommandLine commandLine, final File configFile) {
try {
return new TomlConfigurationDefaultProvider(commandLine, new FileInputStream(configFile));
} catch (final FileNotFoundException e) {
throw new ParameterException(
commandLine, "Unable to read TOML configuration, file not found.");
}
}
/**
* Creates a new TomlConfigurationDefaultProvider from an input stream.
*
* @param commandLine the command line
* @param inputStream the input stream
* @return a new TomlConfigurationDefaultProvider
*/
public static TomlConfigurationDefaultProvider fromInputStream(
final CommandLine commandLine, final InputStream inputStream) {
return new TomlConfigurationDefaultProvider(commandLine, inputStream);
}
@Override
@ -70,35 +105,33 @@ public class TomlConfigFileDefaultProvider implements IDefaultValueProvider {
private String getConfigurationValue(final OptionSpec optionSpec) {
// NOTE: This temporary fix is necessary to make certain options be treated as a multi-value.
// This can be done automatically by picocli if the object implements Collection.
final boolean isArray =
getKeyName(optionSpec).map(keyName -> result.isArray(keyName)).orElse(false);
final String defaultValue;
final boolean isArray = getKeyName(optionSpec).map(result::isArray).orElse(false);
// Convert config values to the right string representation for default string value
if (optionSpec.type().equals(Boolean.class) || optionSpec.type().equals(boolean.class)) {
defaultValue = getBooleanEntryAsString(optionSpec);
return getBooleanEntryAsString(optionSpec);
} else if (optionSpec.isMultiValue() || isArray) {
defaultValue = getListEntryAsString(optionSpec);
} else if (optionSpec.type().equals(Integer.class) || optionSpec.type().equals(int.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
} else if (optionSpec.type().equals(Long.class) || optionSpec.type().equals(long.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
} else if (optionSpec.type().equals(Wei.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
} else if (optionSpec.type().equals(BigInteger.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
} else if (optionSpec.type().equals(Double.class) || optionSpec.type().equals(double.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
} else if (optionSpec.type().equals(Float.class) || optionSpec.type().equals(float.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
} else if (optionSpec.type().equals(Percentage.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
} else if (optionSpec.type().equals(Fraction.class)) {
defaultValue = getNumericEntryAsString(optionSpec);
return getListEntryAsString(optionSpec);
} else if (isNumericType(optionSpec.type())) {
return getNumericEntryAsString(optionSpec);
} else { // else will be treated as String
defaultValue = getEntryAsString(optionSpec);
return getEntryAsString(optionSpec);
}
return defaultValue;
}
private boolean isNumericType(final Class<?> type) {
return type.equals(Integer.class)
|| type.equals(int.class)
|| type.equals(Long.class)
|| type.equals(long.class)
|| type.equals(Wei.class)
|| type.equals(BigInteger.class)
|| type.equals(Double.class)
|| type.equals(double.class)
|| type.equals(Float.class)
|| type.equals(float.class)
|| type.equals(Percentage.class)
|| type.equals(Fraction.class)
|| type.equals(PositiveNumber.class);
}
private String getEntryAsString(final OptionSpec spec) {
@ -195,7 +228,8 @@ public class TomlConfigFileDefaultProvider implements IDefaultValueProvider {
private void checkConfigurationValidity() {
if (result == null || result.isEmpty())
throw new ParameterException(
commandLine, String.format("Unable to read TOML configuration file %s", configFile));
commandLine,
String.format("Unable to read TOML configuration file %s", configurationInputStream));
}
/** Load configuration from file. */
@ -203,7 +237,7 @@ public class TomlConfigFileDefaultProvider implements IDefaultValueProvider {
if (result == null) {
try {
final TomlParseResult result = Toml.parse(configFile.toPath());
final TomlParseResult result = Toml.parse(configurationInputStream);
if (result.hasErrors()) {
final String errors =
@ -224,7 +258,6 @@ public class TomlConfigFileDefaultProvider implements IDefaultValueProvider {
commandLine, "Unable to read TOML configuration, file not found.");
}
}
checkConfigurationValidity();
}

@ -83,6 +83,7 @@ import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
import org.hyperledger.besu.ethereum.trie.bonsai.BonsaiWorldStateProvider;
import org.hyperledger.besu.ethereum.trie.bonsai.cache.CachedMerkleTrieLoader;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogManager;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive;
import org.hyperledger.besu.ethereum.trie.forest.pruner.MarkSweepPruner;
@ -91,6 +92,7 @@ import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
@ -781,6 +783,17 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
final JsonRpcMethods additionalJsonRpcMethodFactory =
createAdditionalJsonRpcMethodFactory(protocolContext);
if (dataStorageConfiguration.getUnstable().getBonsaiLimitTrieLogsEnabled()
&& DataStorageFormat.BONSAI.equals(dataStorageConfiguration.getDataStorageFormat())) {
final TrieLogManager trieLogManager =
((BonsaiWorldStateProvider) worldStateArchive).getTrieLogManager();
final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage =
worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class);
final TrieLogPruner trieLogPruner =
createTrieLogPruner(worldStateKeyValueStorage, blockchain, scheduler);
trieLogManager.subscribe(trieLogPruner);
}
final List<Closeable> closeables = new ArrayList<>();
closeables.add(protocolContext.getWorldStateArchive());
closeables.add(storageProvider);
@ -809,6 +822,26 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
dataStorageConfiguration);
}
private TrieLogPruner createTrieLogPruner(
final WorldStateKeyValueStorage worldStateStorage,
final Blockchain blockchain,
final EthScheduler scheduler) {
final GenesisConfigOptions genesisConfigOptions = configOptionsSupplier.get();
final boolean isProofOfStake = genesisConfigOptions.getTerminalTotalDifficulty().isPresent();
final TrieLogPruner trieLogPruner =
new TrieLogPruner(
(BonsaiWorldStateKeyValueStorage) worldStateStorage,
blockchain,
scheduler::executeServiceTask,
dataStorageConfiguration.getBonsaiMaxLayersToLoad(),
dataStorageConfiguration.getUnstable().getBonsaiTrieLogPruningWindowSize(),
isProofOfStake);
trieLogPruner.initialize();
return trieLogPruner;
}
/**
* Create synchronizer synchronizer.
*
@ -1070,29 +1103,15 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
final CachedMerkleTrieLoader cachedMerkleTrieLoader) {
return switch (dataStorageConfiguration.getDataStorageFormat()) {
case BONSAI -> {
final GenesisConfigOptions genesisConfigOptions = configOptionsSupplier.get();
final boolean isProofOfStake =
genesisConfigOptions.getTerminalTotalDifficulty().isPresent();
final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage =
worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class);
final TrieLogPruner trieLogPruner =
dataStorageConfiguration.getUnstable().getBonsaiTrieLogPruningEnabled()
? new TrieLogPruner(
worldStateKeyValueStorage,
blockchain,
dataStorageConfiguration.getUnstable().getBonsaiTrieLogRetentionThreshold(),
dataStorageConfiguration.getUnstable().getBonsaiTrieLogPruningLimit(),
isProofOfStake)
: TrieLogPruner.noOpTrieLogPruner();
trieLogPruner.initialize();
yield new BonsaiWorldStateProvider(
worldStateKeyValueStorage,
blockchain,
Optional.of(dataStorageConfiguration.getBonsaiMaxLayersToLoad()),
cachedMerkleTrieLoader,
besuComponent.map(BesuComponent::getBesuPluginContext).orElse(null),
evmConfiguration,
trieLogPruner);
evmConfiguration);
}
case FOREST -> {
final WorldStatePreimageStorage preimageStorage =

@ -42,6 +42,9 @@
<Logger name="org.apache.tuweni.discovery.DNSTimerTask">
<RegexFilter regex="Refreshing DNS records with .*" onMatch="DENY" onMismatch="NEUTRAL" />
</Logger>
<Logger name="org.apache.tuweni.discovery.DNSResolver">
<RegexFilter regex="DNS query error with .*" onMatch="DENY" onMismatch="NEUTRAL" />
</Logger>
<Root level="${sys:root.log.level}">
<AppenderRef ref="Router" />
</Root>

@ -15,7 +15,6 @@
package org.hyperledger.besu.cli;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.startsWith;
@ -28,12 +27,10 @@ import static org.hyperledger.besu.cli.config.NetworkName.HOLESKY;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.cli.config.NetworkName.MORDOR;
import static org.hyperledger.besu.cli.config.NetworkName.SEPOLIA;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.ENGINE;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.ETH;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.NET;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.PERM;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.WEB3;
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.GOERLI_BOOTSTRAP_NODES;
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.GOERLI_DISCOVERY_URL;
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.MAINNET_BOOTSTRAP_NODES;
@ -71,8 +68,6 @@ import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.authentication.JwtAlgorithm;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.core.ImmutableMiningParameters;
import org.hyperledger.besu.ethereum.core.ImmutableMiningParameters.MutableInitValues;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
@ -123,7 +118,6 @@ import com.google.common.collect.Lists;
import com.google.common.io.Resources;
import io.vertx.core.json.JsonObject;
import org.apache.commons.io.FileUtils;
import org.apache.commons.text.StringEscapeUtils;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.toml.Toml;
import org.apache.tuweni.toml.TomlParseResult;
@ -306,17 +300,18 @@ public class BesuCommandTest extends CommandTestAbstract {
final Path tempConfigFilePath = createTempFile("an-invalid-file-name-without-extension", "");
parseCommand("--config-file", tempConfigFilePath.toString());
final String expectedOutputStart =
"Unable to read TOML configuration file " + tempConfigFilePath;
final String expectedOutputStart = "Unable to read TOML configuration file";
assertThat(commandErrorOutput.toString(UTF_8)).startsWith(expectedOutputStart);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
}
@Test
public void callingWithConfigOptionButTomlFileNotFoundShouldDisplayHelp() {
parseCommand("--config-file", "./an-invalid-file-name-sdsd87sjhqoi34io23.toml");
String invalidFile = "./an-invalid-file-name-sdsd87sjhqoi34io23.toml";
parseCommand("--config-file", invalidFile);
final String expectedOutputStart = "Unable to read TOML configuration, file not found.";
final String expectedOutputStart =
String.format("Unable to read TOML configuration, file not found: %s", invalidFile);
assertThat(commandErrorOutput.toString(UTF_8)).startsWith(expectedOutputStart);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
}
@ -364,81 +359,6 @@ public class BesuCommandTest extends CommandTestAbstract {
assertThat(commandOutput.toString(UTF_8)).isEmpty();
}
@Test
public void overrideDefaultValuesIfKeyIsPresentInConfigFile(final @TempDir File dataFolder)
throws IOException {
final URL configFile = this.getClass().getResource("/complete_config.toml");
final Path genesisFile = createFakeGenesisFile(GENESIS_VALID_JSON);
final String updatedConfig =
Resources.toString(configFile, UTF_8)
.replace("/opt/besu/genesis.json", escapeTomlString(genesisFile.toString()))
.replace(
"data-path=\"/opt/besu\"",
"data-path=\"" + escapeTomlString(dataFolder.getPath()) + "\"");
final Path toml = createTempFile("toml", updatedConfig.getBytes(UTF_8));
final List<String> expectedApis = asList(ETH.name(), WEB3.name());
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(false);
jsonRpcConfiguration.setHost("5.6.7.8");
jsonRpcConfiguration.setPort(5678);
jsonRpcConfiguration.setCorsAllowedDomains(Collections.emptyList());
jsonRpcConfiguration.setRpcApis(expectedApis);
jsonRpcConfiguration.setMaxActiveConnections(1000);
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(false);
graphQLConfiguration.setHost("6.7.8.9");
graphQLConfiguration.setPort(6789);
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(false);
webSocketConfiguration.setHost("9.10.11.12");
webSocketConfiguration.setPort(9101);
webSocketConfiguration.setRpcApis(expectedApis);
final MetricsConfiguration metricsConfiguration =
MetricsConfiguration.builder().enabled(false).host("8.6.7.5").port(309).build();
parseCommand("--config-file", toml.toString());
verify(mockRunnerBuilder).discovery(eq(false));
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).p2pAdvertisedHost(eq("1.2.3.4"));
verify(mockRunnerBuilder).p2pListenPort(eq(1234));
verify(mockRunnerBuilder).jsonRpcConfiguration(eq(jsonRpcConfiguration));
verify(mockRunnerBuilder).graphQLConfiguration(eq(graphQLConfiguration));
verify(mockRunnerBuilder).webSocketConfiguration(eq(webSocketConfiguration));
verify(mockRunnerBuilder).metricsConfiguration(eq(metricsConfiguration));
verify(mockRunnerBuilder).build();
final List<EnodeURL> nodes =
asList(
EnodeURLImpl.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"),
EnodeURLImpl.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"),
EnodeURLImpl.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"));
assertThat(ethNetworkConfigArgumentCaptor.getValue().getBootNodes()).isEqualTo(nodes);
final EthNetworkConfig networkConfig =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(MAINNET))
.setNetworkId(BigInteger.valueOf(42))
.setGenesisConfig(encodeJsonGenesis(GENESIS_VALID_JSON))
.setBootNodes(nodes)
.setDnsDiscoveryUrl(null)
.build();
verify(mockControllerBuilder).dataDirectory(eq(dataFolder.toPath()));
verify(mockControllerBuilderFactory).fromEthNetworkConfig(eq(networkConfig), any(), any());
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
assertThat(syncConfigurationCaptor.getValue().getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(syncConfigurationCaptor.getValue().getFastSyncMinimumPeerCount()).isEqualTo(13);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void nodePermissionsSmartContractWithoutOptionMustError() {
parseCommand("--permissions-nodes-contract-address");
@ -861,81 +781,6 @@ public class BesuCommandTest extends CommandTestAbstract {
.isEmpty();
}
@Test
public void noOverrideDefaultValuesIfKeyIsNotPresentInConfigFile() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
parseCommand("--config-file", configFile);
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
final MetricsConfiguration metricsConfiguration = MetricsConfiguration.builder().build();
verify(mockRunnerBuilder).discovery(eq(true));
verify(mockRunnerBuilder)
.ethNetworkConfig(
new EthNetworkConfig(
EthNetworkConfig.jsonConfig(MAINNET),
MAINNET.getNetworkId(),
MAINNET_BOOTSTRAP_NODES,
MAINNET_DISCOVERY_URL));
verify(mockRunnerBuilder).p2pAdvertisedHost(eq("127.0.0.1"));
verify(mockRunnerBuilder).p2pListenPort(eq(30303));
verify(mockRunnerBuilder).jsonRpcConfiguration(eq(jsonRpcConfiguration));
verify(mockRunnerBuilder).graphQLConfiguration(eq(graphQLConfiguration));
verify(mockRunnerBuilder).webSocketConfiguration(eq(webSocketConfiguration));
verify(mockRunnerBuilder).metricsConfiguration(eq(metricsConfiguration));
verify(mockRunnerBuilder).build();
verify(mockControllerBuilder).build();
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
assertThat(syncConfig.getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(syncConfig.getFastSyncMinimumPeerCount()).isEqualTo(5);
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void envVariableOverridesValueFromConfigFile() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
final String expectedCoinbase = "0x0000000000000000000000000000000000000004";
setEnvironmentVariable("BESU_MINER_COINBASE", expectedCoinbase);
parseCommand("--config-file", configFile);
verify(mockControllerBuilder)
.miningParameters(
ImmutableMiningParameters.builder()
.mutableInitValues(
MutableInitValues.builder()
.coinbase(Address.fromHexString(expectedCoinbase))
.build())
.build());
}
@Test
public void cliOptionOverridesEnvVariableAndConfig() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
final String expectedCoinbase = "0x0000000000000000000000000000000000000006";
setEnvironmentVariable("BESU_MINER_COINBASE", "0x0000000000000000000000000000000000000004");
parseCommand("--config-file", configFile, "--miner-coinbase", expectedCoinbase);
verify(mockControllerBuilder)
.miningParameters(
ImmutableMiningParameters.builder()
.mutableInitValues(
MutableInitValues.builder()
.coinbase(Address.fromHexString(expectedCoinbase))
.build())
.build());
}
@Test
public void nodekeyOptionMustBeUsed() throws Exception {
final File file = new File("./specific/enclavePrivateKey");
@ -2298,18 +2143,6 @@ public class BesuCommandTest extends CommandTestAbstract {
"Invalid value for option '--rpc-http-api-methods-no-auth', options must be valid RPC methods");
}
@Test
public void rpcWsNoAuthApiMethodsCannotBeInvalid() {
parseCommand("--rpc-ws-enabled", "--rpc-ws-api-methods-no-auth", "invalid");
Mockito.verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8))
.contains(
"Invalid value for option '--rpc-ws-api-methods-no-auth', options must be valid RPC methods");
}
@Test
public void rpcHttpOptionsRequiresServiceToBeEnabled() {
parseCommand(
@ -2455,18 +2288,6 @@ public class BesuCommandTest extends CommandTestAbstract {
.contains("Invalid value for option '--rpc-http-api': invalid entries found [BOB]");
}
@Test
public void rpcWsApisPropertyWithInvalidEntryMustDisplayError() {
parseCommand("--rpc-ws-api", "ETH,BOB,TEST");
Mockito.verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8).trim())
.contains("Invalid value for option '--rpc-ws-api': invalid entries found [BOB, TEST]");
}
@Test
public void rpcApisPropertyWithPluginNamespaceAreValid() {
@ -2548,35 +2369,6 @@ public class BesuCommandTest extends CommandTestAbstract {
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsMaxFrameSizePropertyMustBeUsed() {
final int maxFrameSize = 65535;
parseCommand("--rpc-ws-max-frame-size", String.valueOf(maxFrameSize));
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getMaxFrameSize()).isEqualTo(maxFrameSize);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsMaxActiveConnectionsPropertyMustBeUsed() {
final int maxConnections = 99;
parseCommand("--rpc-ws-max-active-connections", String.valueOf(maxConnections));
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getMaxActiveConnections())
.isEqualTo(maxConnections);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcHttpTlsRequiresRpcHttpEnabled() {
parseCommand("--rpc-http-tls-enabled");
@ -3388,129 +3180,6 @@ public class BesuCommandTest extends CommandTestAbstract {
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsRpcEnabledPropertyMustBeUsed() {
parseCommand("--rpc-ws-enabled");
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().isEnabled()).isTrue();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsOptionsRequiresServiceToBeEnabled() {
parseCommand(
"--rpc-ws-api",
"ETH,NET",
"--rpc-ws-host",
"0.0.0.0",
"--rpc-ws-port",
"1234",
"--rpc-ws-max-active-connections",
"77",
"--rpc-ws-max-frame-size",
"65535");
verifyOptionsConstraintLoggerCall(
"--rpc-ws-enabled",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-api",
"--rpc-ws-max-active-connections",
"--rpc-ws-max-frame-size");
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsOptionsRequiresServiceToBeEnabledToml() throws IOException {
final Path toml =
createTempFile(
"toml",
"rpc-ws-api=[\"ETH\", \"NET\"]\n"
+ "rpc-ws-host=\"0.0.0.0\"\n"
+ "rpc-ws-port=1234\n"
+ "rpc-ws-max-active-connections=77\n"
+ "rpc-ws-max-frame-size=65535\n");
parseCommand("--config-file", toml.toString());
verifyOptionsConstraintLoggerCall(
"--rpc-ws-enabled",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-api",
"--rpc-ws-max-active-connections",
"--rpc-ws-max-frame-size");
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsApiPropertyMustBeUsed() {
final TestBesuCommand command = parseCommand("--rpc-ws-enabled", "--rpc-ws-api", "ETH, NET");
assertThat(command).isNotNull();
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getRpcApis())
.containsExactlyInAnyOrder(ETH.name(), NET.name());
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsHostAndPortOptionMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host, "--rpc-ws-port", String.valueOf(port));
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(wsRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsHostAndMayBeLocalhost() {
final String host = "localhost";
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host);
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsHostAndMayBeIPv6() {
final String host = "2600:DB8::8545";
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host);
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void metricsEnabledPropertyDefaultIsFalse() {
parseCommand();
@ -4300,35 +3969,6 @@ public class BesuCommandTest extends CommandTestAbstract {
"No Payload Provider has been provided. You must register one when enabling privacy plugin!");
}
private static String escapeTomlString(final String s) {
return StringEscapeUtils.escapeJava(s);
}
/**
* Check logger calls
*
* <p>Here we check the calls to logger and not the result of the log line as we don't test the
* logger itself but the fact that we call it.
*
* @param dependentOptions the string representing the list of dependent options names
* @param mainOption the main option name
*/
private void verifyOptionsConstraintLoggerCall(
final String mainOption, final String... dependentOptions) {
verify(mockLogger, atLeast(1))
.warn(
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture());
assertThat(stringArgumentCaptor.getAllValues().get(0)).isEqualTo(DEPENDENCY_WARNING_MSG);
for (final String option : dependentOptions) {
assertThat(stringArgumentCaptor.getAllValues().get(1)).contains(option);
}
assertThat(stringArgumentCaptor.getAllValues().get(2)).isEqualTo(mainOption);
}
/**
* Check logger calls
*
@ -4518,17 +4158,6 @@ public class BesuCommandTest extends CommandTestAbstract {
.isEqualTo(JwtAlgorithm.ES256);
}
@Test
public void webSocketAuthenticationAlgorithIsConfigured() {
parseCommand("--rpc-ws-authentication-jwt-algorithm", "ES256");
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getAuthenticationAlgorithm())
.isEqualTo(JwtAlgorithm.ES256);
}
@Test
public void httpAuthenticationPublicKeyIsConfigured() throws IOException {
final Path publicKey = Files.createTempFile("public_key", "");
@ -4552,29 +4181,6 @@ public class BesuCommandTest extends CommandTestAbstract {
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
@Test
public void wsAuthenticationPublicKeyIsConfigured() throws IOException {
final Path publicKey = Files.createTempFile("public_key", "");
parseCommand("--rpc-ws-authentication-jwt-public-key-file", publicKey.toString());
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getAuthenticationPublicKeyFile().getPath())
.isEqualTo(publicKey.toString());
}
@Test
public void wsAuthenticationWithoutRequiredConfiguredOptionsMustFail() {
parseCommand("--rpc-ws-enabled", "--rpc-ws-authentication-enabled");
verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8))
.contains(
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
@Test
public void privHttpApisWithPrivacyDisabledLogsWarning() {
parseCommand("--privacy-enabled=false", "--rpc-http-api", "PRIV", "--rpc-http-enabled");

@ -0,0 +1,306 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.cli.config.NetworkName.DEV;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.ETH;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.WEB3;
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.MAINNET_BOOTSTRAP_NODES;
import static org.hyperledger.besu.ethereum.p2p.config.DefaultDiscoveryConfiguration.MAINNET_DISCOVERY_URL;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.verify;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.core.ImmutableMiningParameters;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURLImpl;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.plugin.data.EnodeURL;
import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.net.URL;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import com.google.common.io.Resources;
import io.vertx.core.json.JsonObject;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.ArgumentCaptor;
public class CascadingDefaultProviderTest extends CommandTestAbstract {
private static final int GENESIS_CONFIG_TEST_CHAINID = 3141592;
private static final String VALID_NODE_ID =
"6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0";
private static final JsonObject GENESIS_VALID_JSON =
(new JsonObject())
.put("config", (new JsonObject()).put("chainId", GENESIS_CONFIG_TEST_CHAINID));
/**
* Test if the default values are overridden if the key is present in the configuration file. The
* test checks if the configuration file correctly overrides the default values for various
* settings, such as the JSON-RPC configuration, GraphQL configuration, WebSocket configuration,
* and metrics configuration.
*/
@Test
public void overrideDefaultValuesIfKeyIsPresentInConfigFile(final @TempDir File dataFolder)
throws IOException {
final URL configFile = this.getClass().getResource("/complete_config.toml");
final Path genesisFile = createFakeGenesisFile(GENESIS_VALID_JSON);
final String updatedConfig =
Resources.toString(configFile, UTF_8)
.replace("/opt/besu/genesis.json", escapeTomlString(genesisFile.toString()))
.replace(
"data-path=\"/opt/besu\"",
"data-path=\"" + escapeTomlString(dataFolder.getPath()) + "\"");
final Path toml = createTempFile("toml", updatedConfig.getBytes(UTF_8));
final List<String> expectedApis = asList(ETH.name(), WEB3.name());
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(false);
jsonRpcConfiguration.setHost("5.6.7.8");
jsonRpcConfiguration.setPort(5678);
jsonRpcConfiguration.setCorsAllowedDomains(Collections.emptyList());
jsonRpcConfiguration.setRpcApis(expectedApis);
jsonRpcConfiguration.setMaxActiveConnections(1000);
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(false);
graphQLConfiguration.setHost("6.7.8.9");
graphQLConfiguration.setPort(6789);
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(false);
webSocketConfiguration.setHost("9.10.11.12");
webSocketConfiguration.setPort(9101);
webSocketConfiguration.setRpcApis(expectedApis);
final MetricsConfiguration metricsConfiguration =
MetricsConfiguration.builder().enabled(false).host("8.6.7.5").port(309).build();
parseCommand("--config-file", toml.toString());
verify(mockRunnerBuilder).discovery(eq(false));
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).p2pAdvertisedHost(eq("1.2.3.4"));
verify(mockRunnerBuilder).p2pListenPort(eq(1234));
verify(mockRunnerBuilder).jsonRpcConfiguration(eq(jsonRpcConfiguration));
verify(mockRunnerBuilder).graphQLConfiguration(eq(graphQLConfiguration));
verify(mockRunnerBuilder).webSocketConfiguration(eq(webSocketConfiguration));
verify(mockRunnerBuilder).metricsConfiguration(eq(metricsConfiguration));
verify(mockRunnerBuilder).build();
final List<EnodeURL> nodes =
asList(
EnodeURLImpl.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"),
EnodeURLImpl.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"),
EnodeURLImpl.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"));
assertThat(ethNetworkConfigArgumentCaptor.getValue().getBootNodes()).isEqualTo(nodes);
final EthNetworkConfig networkConfig =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(MAINNET))
.setNetworkId(BigInteger.valueOf(42))
.setGenesisConfig(encodeJsonGenesis(GENESIS_VALID_JSON))
.setBootNodes(nodes)
.setDnsDiscoveryUrl(null)
.build();
verify(mockControllerBuilder).dataDirectory(eq(dataFolder.toPath()));
verify(mockControllerBuilderFactory).fromEthNetworkConfig(eq(networkConfig), any(), any());
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
assertThat(syncConfigurationCaptor.getValue().getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(syncConfigurationCaptor.getValue().getFastSyncMinimumPeerCount()).isEqualTo(13);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
/**
* Test if the default values are not overridden if the key is not present in the configuration
* file. The test checks if the default values for various settings remain unchanged when the
* corresponding keys are not present in the configuration file.
*/
@Test
public void noOverrideDefaultValuesIfKeyIsNotPresentInConfigFile() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
parseCommand("--config-file", configFile);
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
final MetricsConfiguration metricsConfiguration = MetricsConfiguration.builder().build();
verify(mockRunnerBuilder).discovery(eq(true));
verify(mockRunnerBuilder)
.ethNetworkConfig(
new EthNetworkConfig(
EthNetworkConfig.jsonConfig(MAINNET),
MAINNET.getNetworkId(),
MAINNET_BOOTSTRAP_NODES,
MAINNET_DISCOVERY_URL));
verify(mockRunnerBuilder).p2pAdvertisedHost(eq("127.0.0.1"));
verify(mockRunnerBuilder).p2pListenPort(eq(30303));
verify(mockRunnerBuilder).jsonRpcConfiguration(eq(jsonRpcConfiguration));
verify(mockRunnerBuilder).graphQLConfiguration(eq(graphQLConfiguration));
verify(mockRunnerBuilder).webSocketConfiguration(eq(webSocketConfiguration));
verify(mockRunnerBuilder).metricsConfiguration(eq(metricsConfiguration));
verify(mockRunnerBuilder).build();
verify(mockControllerBuilder).build();
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
assertThat(syncConfig.getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(syncConfig.getFastSyncMinimumPeerCount()).isEqualTo(5);
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
/**
* Test if the environment variable overrides the value from the configuration file. The test
* checks if the value of the miner's coinbase address set through an environment variable
* correctly overrides the value specified in the configuration file.
*/
@Test
public void envVariableOverridesValueFromConfigFile() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
final String expectedCoinbase = "0x0000000000000000000000000000000000000004";
setEnvironmentVariable("BESU_MINER_COINBASE", expectedCoinbase);
parseCommand("--config-file", configFile);
verify(mockControllerBuilder)
.miningParameters(
ImmutableMiningParameters.builder()
.mutableInitValues(
ImmutableMiningParameters.MutableInitValues.builder()
.coinbase(Address.fromHexString(expectedCoinbase))
.build())
.build());
}
/**
* Test if the command line option overrides the environment variable and configuration. The test
* checks if the value of the miner's coinbase address set through a command line option correctly
* overrides the value specified in the environment variable and the configuration file.
*/
@Test
public void cliOptionOverridesEnvVariableAndConfig() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
final String expectedCoinbase = "0x0000000000000000000000000000000000000006";
setEnvironmentVariable("BESU_MINER_COINBASE", "0x0000000000000000000000000000000000000004");
parseCommand("--config-file", configFile, "--miner-coinbase", expectedCoinbase);
verify(mockControllerBuilder)
.miningParameters(
ImmutableMiningParameters.builder()
.mutableInitValues(
ImmutableMiningParameters.MutableInitValues.builder()
.coinbase(Address.fromHexString(expectedCoinbase))
.build())
.build());
}
/**
* Test if the profile option sets the correct defaults. The test checks if the 'dev' profile
* correctly sets the network ID to the expected value.
*/
@Test
public void profileOptionShouldSetCorrectDefaults() {
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
parseCommand("--profile", "dev");
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any(), any());
verify(mockControllerBuilder).build();
final EthNetworkConfig config = networkArg.getValue();
assertThat(config.getNetworkId()).isEqualTo(DEV.getNetworkId());
}
/**
* Test if the command line option overrides the profile configuration. The test checks if the
* network ID set through a command line option correctly overrides the value specified in the
* 'dev' profile.
*/
@Test
public void cliOptionOverridesProfileConfiguration() {
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
parseCommand("--profile", "dev", "--network", "MAINNET");
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any(), any());
verify(mockControllerBuilder).build();
final EthNetworkConfig config = networkArg.getValue();
assertThat(config.getNetworkId()).isEqualTo(MAINNET.getNetworkId());
}
/**
* Test if the configuration file overrides the profile configuration. The test checks if the
* network ID specified in the configuration file correctly overrides the value specified in the
* 'dev' profile.
*/
@Test
public void configFileOverridesProfileConfiguration() {
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
parseCommand("--profile", "dev", "--config-file", configFile);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any(), any());
verify(mockControllerBuilder).build();
final EthNetworkConfig config = networkArg.getValue();
assertThat(config.getNetworkId()).isEqualTo(MAINNET.getNetworkId());
}
/**
* Test if the environment variable overrides the profile configuration. The test checks if the
* network ID set through an environment variable correctly overrides the value specified in the
* 'dev' profile.
*/
@Test
public void environmentVariableOverridesProfileConfiguration() {
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
setEnvironmentVariable("BESU_NETWORK", "MAINNET");
parseCommand("--profile", "dev");
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any(), any());
verify(mockControllerBuilder).build();
final EthNetworkConfig config = networkArg.getValue();
assertThat(config.getNetworkId()).isEqualTo(MAINNET.getNetworkId());
}
}

@ -22,7 +22,7 @@ import static picocli.CommandLine.defaultExceptionHandler;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.EnvironmentVariableDefaultProvider;
import org.hyperledger.besu.cli.util.TomlConfigFileDefaultProvider;
import org.hyperledger.besu.cli.util.TomlConfigurationDefaultProvider;
import org.hyperledger.besu.util.StringUtils;
import java.io.IOException;
@ -252,7 +252,7 @@ public class CommandLineUtilsTest {
final AbstractTestCommand testCommand = new TestMultiCommandWithDeps(mockLogger);
testCommand.commandLine.setDefaultValueProvider(
new TomlConfigFileDefaultProvider(testCommand.commandLine, toml.toFile()));
TomlConfigurationDefaultProvider.fromFile(testCommand.commandLine, toml.toFile()));
testCommand.commandLine.parseWithHandlers(new RunLast(), defaultExceptionHandler());
verifyMultiOptionsConstraintLoggerCall(

@ -15,14 +15,18 @@
package org.hyperledger.besu.cli;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.Runner;
@ -40,6 +44,7 @@ import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions;
import org.hyperledger.besu.components.BesuComponent;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.consensus.qbft.pki.PkiBlockCreationConfiguration;
import org.hyperledger.besu.consensus.qbft.pki.PkiBlockCreationConfigurationProvider;
import org.hyperledger.besu.controller.BesuController;
@ -109,6 +114,7 @@ import io.opentelemetry.api.GlobalOpenTelemetry;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.JsonObject;
import org.apache.commons.text.StringEscapeUtils;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.awaitility.Awaitility;
@ -128,20 +134,37 @@ import picocli.CommandLine.RunLast;
@ExtendWith(MockitoExtension.class)
public abstract class CommandTestAbstract {
private static final Logger TEST_LOGGER = LoggerFactory.getLogger(CommandTestAbstract.class);
protected static final int POA_BLOCK_PERIOD_SECONDS = 5;
protected static final JsonObject VALID_GENESIS_QBFT_POST_LONDON =
(new JsonObject())
.put(
"config",
new JsonObject()
.put("londonBlock", 0)
.put("qbft", new JsonObject().put("blockperiodseconds", 5)));
.put(
"qbft",
new JsonObject().put("blockperiodseconds", POA_BLOCK_PERIOD_SECONDS)));
protected static final JsonObject VALID_GENESIS_IBFT2_POST_LONDON =
(new JsonObject())
.put(
"config",
new JsonObject()
.put("londonBlock", 0)
.put("ibft2", new JsonObject().put("blockperiodseconds", 5)));
.put(
"ibft2",
new JsonObject().put("blockperiodseconds", POA_BLOCK_PERIOD_SECONDS)));
protected static final JsonObject VALID_GENESIS_CLIQUE_POST_LONDON =
(new JsonObject())
.put(
"config",
new JsonObject()
.put("londonBlock", 0)
.put(
"clique",
new JsonObject().put("blockperiodseconds", POA_BLOCK_PERIOD_SECONDS)));
protected final PrintStream originalOut = System.out;
protected final PrintStream originalErr = System.err;
protected final ByteArrayOutputStream commandOutput = new ByteArrayOutputStream();
@ -556,6 +579,11 @@ public abstract class CommandTestAbstract {
return vertx;
}
@Override
public GenesisConfigOptions getActualGenesisConfigOptions() {
return super.getActualGenesisConfigOptions();
}
public CommandSpec getSpec() {
return spec;
}
@ -682,4 +710,33 @@ public abstract class CommandTestAbstract {
PORT_CHECK,
NO_PORT_CHECK
}
protected static String escapeTomlString(final String s) {
return StringEscapeUtils.escapeJava(s);
}
/**
* Check logger calls
*
* <p>Here we check the calls to logger and not the result of the log line as we don't test the
* logger itself but the fact that we call it.
*
* @param dependentOptions the string representing the list of dependent options names
* @param mainOption the main option name
*/
protected void verifyOptionsConstraintLoggerCall(
final String mainOption, final String... dependentOptions) {
verify(mockLogger, atLeast(1))
.warn(
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture());
assertThat(stringArgumentCaptor.getAllValues().get(0)).isEqualTo(DEPENDENCY_WARNING_MSG);
for (final String option : dependentOptions) {
assertThat(stringArgumentCaptor.getAllValues().get(1)).contains(option);
}
assertThat(stringArgumentCaptor.getAllValues().get(2)).isEqualTo(mainOption);
}
}

@ -20,6 +20,7 @@ import static org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConf
import static org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration.Implementation.SEQUENCED;
import static org.mockito.Mockito.mock;
import org.hyperledger.besu.cli.config.ProfileName;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import java.math.BigInteger;
@ -151,21 +152,21 @@ class ConfigurationOverviewBuilderTest {
}
@Test
void setTrieLogPruningEnabled() {
final String noTrieLogRetentionThresholdSet = builder.build();
assertThat(noTrieLogRetentionThresholdSet).doesNotContain("Trie log pruning enabled");
void setBonsaiLimitTrieLogsEnabled() {
final String noTrieLogRetentionLimitSet = builder.build();
assertThat(noTrieLogRetentionLimitSet).doesNotContain("Limit trie logs enabled");
builder.setTrieLogPruningEnabled();
builder.setTrieLogRetentionThreshold(42);
String trieLogRetentionThresholdSet = builder.build();
assertThat(trieLogRetentionThresholdSet)
.contains("Trie log pruning enabled")
builder.setLimitTrieLogsEnabled();
builder.setTrieLogRetentionLimit(42);
String trieLogRetentionLimitSet = builder.build();
assertThat(trieLogRetentionLimitSet)
.contains("Limit trie logs enabled")
.contains("retention: 42");
assertThat(trieLogRetentionThresholdSet).doesNotContain("prune limit");
assertThat(trieLogRetentionLimitSet).doesNotContain("prune window");
builder.setTrieLogPruningLimit(1000);
trieLogRetentionThresholdSet = builder.build();
assertThat(trieLogRetentionThresholdSet).contains("prune limit: 1000");
builder.setTrieLogsPruningWindowSize(1000);
trieLogRetentionLimitSet = builder.build();
assertThat(trieLogRetentionLimitSet).contains("prune window: 1000");
}
@Test
@ -209,4 +210,11 @@ class ConfigurationOverviewBuilderTest {
final String layeredTxPoolSelected = builder.build();
assertThat(layeredTxPoolSelected).contains("Using JOURNALED worldstate update mode");
}
@Test
void setProfile() {
builder.setProfile(ProfileName.DEV.name());
final String profileSelected = builder.build();
assertThat(profileSelected).contains("Profile: DEV");
}
}

@ -19,7 +19,7 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.cli.util.TomlConfigFileDefaultProvider;
import org.hyperledger.besu.cli.util.TomlConfigurationDefaultProvider;
import org.hyperledger.besu.datatypes.Wei;
import java.io.BufferedWriter;
@ -42,7 +42,7 @@ import picocli.CommandLine.Model.OptionSpec;
import picocli.CommandLine.ParameterException;
@ExtendWith(MockitoExtension.class)
public class TomlConfigFileDefaultProviderTest {
public class TomlConfigurationDefaultProviderTest {
@Mock CommandLine mockCommandLine;
@Mock CommandSpec mockCommandSpec;
@ -67,8 +67,8 @@ public class TomlConfigFileDefaultProviderTest {
fileWriter.write("a-longer-option='1234'");
fileWriter.flush();
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, tempConfigFile);
final TomlConfigurationDefaultProvider providerUnderTest =
TomlConfigurationDefaultProvider.fromFile(mockCommandLine, tempConfigFile);
// this option must be found in config
assertThat(
@ -152,8 +152,8 @@ public class TomlConfigFileDefaultProviderTest {
fileWriter.write("a-double-value-option-int=1"); // should be able to parse int as double
fileWriter.flush();
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, tempConfigFile);
final TomlConfigurationDefaultProvider providerUnderTest =
TomlConfigurationDefaultProvider.fromFile(mockCommandLine, tempConfigFile);
assertThat(
providerUnderTest.defaultValue(
@ -221,16 +221,9 @@ public class TomlConfigFileDefaultProviderTest {
@Test
public void configFileNotFoundMustThrow() {
final File nonExistingFile = new File("doesnt.exit");
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, nonExistingFile);
assertThatThrownBy(
() ->
providerUnderTest.defaultValue(
OptionSpec.builder("an-option").type(String.class).build()))
() -> TomlConfigurationDefaultProvider.fromFile(mockCommandLine, nonExistingFile))
.isInstanceOf(ParameterException.class)
.hasMessage("Unable to read TOML configuration, file not found.");
}
@ -240,8 +233,8 @@ public class TomlConfigFileDefaultProviderTest {
final File tempConfigFile = Files.createTempFile("invalid", "toml").toFile();
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, tempConfigFile);
final TomlConfigurationDefaultProvider providerUnderTest =
TomlConfigurationDefaultProvider.fromFile(mockCommandLine, tempConfigFile);
assertThatThrownBy(
() ->
@ -260,8 +253,8 @@ public class TomlConfigFileDefaultProviderTest {
fileWriter.write("an-invalid-syntax=======....");
fileWriter.flush();
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, tempConfigFile);
final TomlConfigurationDefaultProvider providerUnderTest =
TomlConfigurationDefaultProvider.fromFile(mockCommandLine, tempConfigFile);
assertThatThrownBy(
() ->
@ -286,8 +279,8 @@ public class TomlConfigFileDefaultProviderTest {
fileWriter.write("invalid_option=true");
fileWriter.flush();
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, tempConfigFile);
final TomlConfigurationDefaultProvider providerUnderTest =
TomlConfigurationDefaultProvider.fromFile(mockCommandLine, tempConfigFile);
assertThatThrownBy(
() ->
@ -321,8 +314,8 @@ public class TomlConfigFileDefaultProviderTest {
fileWriter.newLine();
fileWriter.flush();
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, tempConfigFile);
final TomlConfigurationDefaultProvider providerUnderTest =
TomlConfigurationDefaultProvider.fromFile(mockCommandLine, tempConfigFile);
assertThat(
providerUnderTest.defaultValue(
@ -361,8 +354,8 @@ public class TomlConfigFileDefaultProviderTest {
fileWriter.newLine();
fileWriter.flush();
final TomlConfigFileDefaultProvider providerUnderTest =
new TomlConfigFileDefaultProvider(mockCommandLine, tempConfigFile);
final TomlConfigurationDefaultProvider providerUnderTest =
TomlConfigurationDefaultProvider.fromFile(mockCommandLine, tempConfigFile);
assertThatThrownBy(
() ->

@ -16,8 +16,6 @@ package org.hyperledger.besu.cli.options;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.verify;
import org.hyperledger.besu.cli.CommandTestAbstract;
@ -69,7 +67,10 @@ public abstract class AbstractCLIOptionsTest<D, T extends CLIOptions<D>>
final TestBesuCommand cmd = parseCommand(cliOptions);
final T optionsFromCommand = getOptionsFromBesuCommand(cmd);
assertThat(optionsFromCommand).usingRecursiveComparison().isEqualTo(options);
assertThat(optionsFromCommand)
.usingRecursiveComparison()
.ignoringFields(getNonOptionFields())
.isEqualTo(options);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
@ -84,10 +85,10 @@ public abstract class AbstractCLIOptionsTest<D, T extends CLIOptions<D>>
final T optionsFromCommand = getOptionsFromBesuCommand(cmd);
// Check default values supplied by CLI match expected default values
final String[] fieldsToIgnore = getFieldsWithComputedDefaults().toArray(new String[0]);
assertThat(optionsFromCommand)
.usingRecursiveComparison()
.ignoringFields(fieldsToIgnore)
.ignoringFields(getFieldsWithComputedDefaults())
.ignoringFields(getNonOptionFields())
.isEqualTo(defaultOptions);
}
@ -95,8 +96,12 @@ public abstract class AbstractCLIOptionsTest<D, T extends CLIOptions<D>>
protected abstract D createCustomizedDomainObject();
protected List<String> getFieldsWithComputedDefaults() {
return Collections.emptyList();
protected String[] getFieldsWithComputedDefaults() {
return new String[0];
}
protected String[] getNonOptionFields() {
return new String[0];
}
protected List<String> getFieldsToIgnore() {
@ -125,29 +130,4 @@ public abstract class AbstractCLIOptionsTest<D, T extends CLIOptions<D>>
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).contains(errorMsg);
}
/**
* Check logger calls
*
* <p>Here we check the calls to logger and not the result of the log line as we don't test the
* logger itself but the fact that we call it.
*
* @param dependentOptions the string representing the list of dependent options names
* @param mainOption the main option name
*/
protected void verifyOptionsConstraintLoggerCall(
final String mainOption, final String... dependentOptions) {
verify(mockLogger, atLeast(1))
.warn(
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture());
assertThat(stringArgumentCaptor.getAllValues().get(0)).isEqualTo(DEPENDENCY_WARNING_MSG);
for (final String option : dependentOptions) {
assertThat(stringArgumentCaptor.getAllValues().get(1)).contains(option);
}
assertThat(stringArgumentCaptor.getAllValues().get(2)).isEqualTo(mainOption);
}
}

@ -32,7 +32,9 @@ import org.hyperledger.besu.util.number.PositiveNumber;
import java.io.IOException;
import java.nio.file.Path;
import java.time.Duration;
import java.util.Optional;
import java.util.OptionalInt;
import org.apache.tuweni.bytes.Bytes;
import org.junit.jupiter.api.Test;
@ -361,13 +363,16 @@ public class MiningOptionsTest extends AbstractCLIOptionsTest<MiningParameters,
@Test
public void poaBlockTxsSelectionMaxTimeOptionOver100Percent() throws IOException {
final Path genesisFileIBFT2 = createFakeGenesisFile(VALID_GENESIS_IBFT2_POST_LONDON);
final Path genesisFileClique = createFakeGenesisFile(VALID_GENESIS_CLIQUE_POST_LONDON);
internalTestSuccess(
miningParams ->
assertThat(miningParams.getPoaBlockTxsSelectionMaxTime())
.isEqualTo(PositiveNumber.fromInt(200)),
miningParams -> {
assertThat(miningParams.getPoaBlockTxsSelectionMaxTime())
.isEqualTo(PositiveNumber.fromInt(200));
assertThat(miningParams.getBlockTxsSelectionMaxTime())
.isEqualTo(Duration.ofSeconds(POA_BLOCK_PERIOD_SECONDS * 2).toMillis());
},
"--genesis-file",
genesisFileIBFT2.toString(),
genesisFileClique.toString(),
"--poa-block-txs-selection-max-time",
"200");
}
@ -407,6 +412,16 @@ public class MiningOptionsTest extends AbstractCLIOptionsTest<MiningParameters,
@Override
protected MiningOptions getOptionsFromBesuCommand(final TestBesuCommand besuCommand) {
return besuCommand.getMiningOptions();
final var miningOptions = besuCommand.getMiningOptions();
miningOptions.setGenesisBlockPeriodSeconds(
besuCommand.getActualGenesisConfigOptions().isPoa()
? OptionalInt.of(POA_BLOCK_PERIOD_SECONDS)
: OptionalInt.empty());
return miningOptions;
}
@Override
protected String[] getNonOptionFields() {
return new String[] {"maybeGenesisBlockPeriodSeconds"};
}
}

@ -134,7 +134,7 @@ public class NetworkingOptionsTest
final NetworkingOptions options = cmd.getNetworkingOptions();
final NetworkingConfiguration networkingConfig = options.toDomainObject();
assertThat(networkingConfig.getDiscovery().isFilterOnEnrForkIdEnabled()).isEqualTo(false);
assertThat(networkingConfig.getDiscovery().isFilterOnEnrForkIdEnabled()).isEqualTo(true);
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
assertThat(commandOutput.toString(UTF_8)).isEmpty();

@ -0,0 +1,249 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.options;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.ETH;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.NET;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoInteractions;
import org.hyperledger.besu.cli.CommandTestAbstract;
import org.hyperledger.besu.ethereum.api.jsonrpc.authentication.JwtAlgorithm;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mockito;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class RpcWebsocketOptionsTest extends CommandTestAbstract {
@Test
public void rpcWsApiPropertyMustBeUsed() {
final CommandTestAbstract.TestBesuCommand command =
parseCommand("--rpc-ws-enabled", "--rpc-ws-api", "ETH, NET");
assertThat(command).isNotNull();
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getRpcApis())
.containsExactlyInAnyOrder(ETH.name(), NET.name());
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsHostAndPortOptionMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host, "--rpc-ws-port", String.valueOf(port));
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(wsRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsMaxFrameSizePropertyMustBeUsed() {
final int maxFrameSize = 65535;
parseCommand("--rpc-ws-max-frame-size", String.valueOf(maxFrameSize));
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getMaxFrameSize()).isEqualTo(maxFrameSize);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsMaxActiveConnectionsPropertyMustBeUsed() {
final int maxConnections = 99;
parseCommand("--rpc-ws-max-active-connections", String.valueOf(maxConnections));
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getMaxActiveConnections())
.isEqualTo(maxConnections);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsRpcEnabledPropertyMustBeUsed() {
parseCommand("--rpc-ws-enabled");
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().isEnabled()).isTrue();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void webSocketAuthenticationAlgorithmIsConfigured() {
parseCommand("--rpc-ws-authentication-jwt-algorithm", "ES256");
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getAuthenticationAlgorithm())
.isEqualTo(JwtAlgorithm.ES256);
}
@Test
public void wsAuthenticationPublicKeyIsConfigured() throws IOException {
final Path publicKey = Files.createTempFile("public_key", "");
parseCommand("--rpc-ws-authentication-jwt-public-key-file", publicKey.toString());
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getAuthenticationPublicKeyFile().getPath())
.isEqualTo(publicKey.toString());
}
@Test
public void rpcWsHostAndMayBeLocalhost() {
final String host = "localhost";
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host);
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsHostAndMayBeIPv6() {
final String host = "2600:DB8::8545";
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host);
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsNoAuthApiMethodsCannotBeInvalid() {
parseCommand("--rpc-ws-enabled", "--rpc-ws-api-methods-no-auth", "invalid");
Mockito.verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8))
.contains(
"Invalid value for option '--rpc-ws-api-methods-no-auth', options must be valid RPC methods");
}
@Test
public void rpcWsApisPropertyWithInvalidEntryMustDisplayError() {
parseCommand("--rpc-ws-api", "ETH,BOB,TEST");
Mockito.verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8).trim())
.contains("Invalid value for option '--rpc-ws-api': invalid entries found [BOB, TEST]");
}
@Test
public void rpcWsOptionsRequiresServiceToBeEnabled() {
parseCommand(
"--rpc-ws-api",
"ETH,NET",
"--rpc-ws-host",
"0.0.0.0",
"--rpc-ws-port",
"1234",
"--rpc-ws-max-active-connections",
"77",
"--rpc-ws-max-frame-size",
"65535");
verifyOptionsConstraintLoggerCall(
"--rpc-ws-enabled",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-api",
"--rpc-ws-max-active-connections",
"--rpc-ws-max-frame-size");
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void rpcWsOptionsRequiresServiceToBeEnabledToml() throws IOException {
final Path toml =
createTempFile(
"toml",
"rpc-ws-api=[\"ETH\", \"NET\"]\n"
+ "rpc-ws-host=\"0.0.0.0\"\n"
+ "rpc-ws-port=1234\n"
+ "rpc-ws-max-active-connections=77\n"
+ "rpc-ws-max-frame-size=65535\n");
parseCommand("--config-file", toml.toString());
verifyOptionsConstraintLoggerCall(
"--rpc-ws-enabled",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-api",
"--rpc-ws-max-active-connections",
"--rpc-ws-max-frame-size");
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void wsAuthenticationWithoutRequiredConfiguredOptionsMustFail() {
parseCommand("--rpc-ws-enabled", "--rpc-ws-authentication-enabled");
verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8))
.contains(
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
}

@ -82,8 +82,8 @@ public class SynchronizerOptionsTest
}
@Override
protected List<String> getFieldsWithComputedDefaults() {
return Arrays.asList("maxTrailingPeers", "computationParallelism");
protected String[] getFieldsWithComputedDefaults() {
return new String[] {"maxTrailingPeers", "computationParallelism"};
}
@Override

@ -16,7 +16,7 @@
package org.hyperledger.besu.cli.options.stable;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_THRESHOLD;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
import org.hyperledger.besu.cli.options.AbstractCLIOptionsTest;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
@ -32,50 +32,59 @@ public class DataStorageOptionsTest
public void bonsaiTrieLogPruningLimitOption() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getUnstable().getBonsaiTrieLogPruningLimit())
.isEqualTo(1),
"--Xbonsai-trie-log-pruning-enabled",
"--Xbonsai-trie-log-pruning-limit",
"1");
assertThat(dataStorageConfiguration.getUnstable().getBonsaiTrieLogPruningWindowSize())
.isEqualTo(600),
"--Xbonsai-limit-trie-logs-enabled",
"--Xbonsai-trie-logs-pruning-window-size",
"600");
}
@Test
public void bonsaiTrieLogPruningLimitShouldBePositive() {
public void bonsaiTrieLogPruningWindowSizeShouldBePositive() {
internalTestFailure(
"--Xbonsai-trie-log-pruning-limit=0 must be greater than 0",
"--Xbonsai-trie-log-pruning-enabled",
"--Xbonsai-trie-log-pruning-limit",
"--Xbonsai-trie-logs-pruning-window-size=0 must be greater than 0",
"--Xbonsai-limit-trie-logs-enabled",
"--Xbonsai-trie-logs-pruning-window-size",
"0");
}
@Test
public void bonsaiTrieLogRetentionThresholdOption() {
public void bonsaiTrieLogPruningWindowSizeShouldBeAboveRetentionLimit() {
internalTestFailure(
"--Xbonsai-trie-logs-pruning-window-size=512 must be greater than --bonsai-historical-block-limit=512",
"--Xbonsai-limit-trie-logs-enabled",
"--Xbonsai-trie-logs-pruning-window-size",
"512");
}
@Test
public void bonsaiTrieLogRetentionLimitOption() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getUnstable().getBonsaiTrieLogRetentionThreshold())
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_THRESHOLD + 1),
"--Xbonsai-trie-log-pruning-enabled",
"--Xbonsai-trie-log-retention-threshold",
assertThat(dataStorageConfiguration.getBonsaiMaxLayersToLoad())
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT + 1),
"--Xbonsai-limit-trie-logs-enabled",
"--bonsai-historical-block-limit",
"513");
}
@Test
public void bonsaiTrieLogRetentionThresholdOption_boundaryTest() {
public void bonsaiTrieLogRetentionLimitOption_boundaryTest() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getUnstable().getBonsaiTrieLogRetentionThreshold())
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_THRESHOLD),
"--Xbonsai-trie-log-pruning-enabled",
"--Xbonsai-trie-log-retention-threshold",
assertThat(dataStorageConfiguration.getBonsaiMaxLayersToLoad())
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT),
"--Xbonsai-limit-trie-logs-enabled",
"--bonsai-historical-block-limit",
"512");
}
@Test
public void bonsaiTrieLogRetentionThresholdShouldBeAboveMinimum() {
public void bonsaiTrieLogRetentionLimitShouldBeAboveMinimum() {
internalTestFailure(
"--Xbonsai-trie-log-retention-threshold minimum value is 512",
"--Xbonsai-trie-log-pruning-enabled",
"--Xbonsai-trie-log-retention-threshold",
"--bonsai-historical-block-limit minimum value is 512",
"--Xbonsai-limit-trie-logs-enabled",
"--bonsai-historical-block-limit",
"511");
}
@ -88,12 +97,11 @@ public class DataStorageOptionsTest
protected DataStorageConfiguration createCustomizedDomainObject() {
return ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(DataStorageFormat.BONSAI)
.bonsaiMaxLayersToLoad(100L)
.bonsaiMaxLayersToLoad(513L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiTrieLogPruningEnabled(true)
.bonsaiTrieLogRetentionThreshold(1000L)
.bonsaiTrieLogPruningLimit(20)
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(514)
.build())
.build();
}

@ -15,11 +15,13 @@
package org.hyperledger.besu.cli.subcommands.storage;
import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageFormat.BONSAI;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.datatypes.Hash;
@ -27,20 +29,26 @@ import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockHeaderTestFixture;
import org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogFactoryImpl;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogLayer;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.tuweni.bytes.Bytes;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@ -53,20 +61,23 @@ class TrieLogHelperTest {
private static final StorageProvider storageProvider = new InMemoryKeyValueStorageProvider();
private static BonsaiWorldStateKeyValueStorage inMemoryWorldState;
private TrieLogHelper nonValidatingTrieLogHelper;
@Mock private MutableBlockchain blockchain;
@TempDir static Path dataDir;
private static class NonValidatingTrieLogHelper extends TrieLogHelper {
@Override
void validatePruneConfiguration(final DataStorageConfiguration config) {}
}
Path test;
@Mock private MutableBlockchain blockchain;
static BlockHeader blockHeader1;
static BlockHeader blockHeader2;
static BlockHeader blockHeader3;
static BlockHeader blockHeader4;
static BlockHeader blockHeader5;
@BeforeAll
public static void setup() throws IOException {
@BeforeEach
public void setup() throws IOException {
blockHeader1 = new BlockHeaderTestFixture().number(1).buildHeader();
blockHeader2 = new BlockHeaderTestFixture().number(2).buildHeader();
@ -78,33 +89,35 @@ class TrieLogHelperTest {
new BonsaiWorldStateKeyValueStorage(
storageProvider, new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_CONFIG);
createTrieLog(blockHeader1);
var updater = inMemoryWorldState.updater();
updater
.getTrieLogStorageTransaction()
.put(blockHeader1.getHash().toArrayUnsafe(), Bytes.fromHexString("0x01").toArrayUnsafe());
.put(blockHeader1.getHash().toArrayUnsafe(), createTrieLog(blockHeader1));
updater
.getTrieLogStorageTransaction()
.put(blockHeader2.getHash().toArrayUnsafe(), Bytes.fromHexString("0x02").toArrayUnsafe());
.put(blockHeader2.getHash().toArrayUnsafe(), createTrieLog(blockHeader2));
updater
.getTrieLogStorageTransaction()
.put(blockHeader3.getHash().toArrayUnsafe(), Bytes.fromHexString("0x03").toArrayUnsafe());
.put(blockHeader3.getHash().toArrayUnsafe(), createTrieLog(blockHeader3));
updater
.getTrieLogStorageTransaction()
.put(blockHeader4.getHash().toArrayUnsafe(), Bytes.fromHexString("0x04").toArrayUnsafe());
.put(blockHeader4.getHash().toArrayUnsafe(), createTrieLog(blockHeader4));
updater
.getTrieLogStorageTransaction()
.put(blockHeader5.getHash().toArrayUnsafe(), Bytes.fromHexString("0x05").toArrayUnsafe());
.put(blockHeader5.getHash().toArrayUnsafe(), createTrieLog(blockHeader5));
updater.getTrieLogStorageTransaction().commit();
}
@BeforeEach
void createDirectory() throws IOException {
Files.createDirectories(dataDir.resolve("database"));
nonValidatingTrieLogHelper = new NonValidatingTrieLogHelper();
}
@AfterEach
void deleteDirectory() throws IOException {
Files.deleteIfExists(dataDir.resolve("database"));
private static byte[] createTrieLog(final BlockHeader blockHeader) {
TrieLogLayer trieLogLayer = new TrieLogLayer();
trieLogLayer.setBlockHash(blockHeader.getBlockHash());
final BytesValueRLPOutput rlpLog = new BytesValueRLPOutput();
TrieLogFactoryImpl.writeTo(trieLogLayer, rlpLog);
return rlpLog.encoded().toArrayUnsafe();
}
void mockBlockchainBase() {
@ -114,17 +127,17 @@ class TrieLogHelperTest {
}
@Test
public void prune() {
public void prune(final @TempDir Path dataDir) throws IOException {
Files.createDirectories(dataDir.resolve("database"));
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(2L)
.bonsaiMaxLayersToLoad(3L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiTrieLogRetentionThreshold(3)
.build()
.withBonsaiTrieLogRetentionThreshold(3))
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
mockBlockchainBase();
@ -133,79 +146,76 @@ class TrieLogHelperTest {
when(blockchain.getBlockHeader(3)).thenReturn(Optional.of(blockHeader3));
// assert trie logs that will be pruned exist before prune call
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get(),
Bytes.fromHexString("0x01").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader2.getHash()).get(),
Bytes.fromHexString("0x02").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get(),
Bytes.fromHexString("0x03").toArrayUnsafe());
TrieLogHelper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, dataDir);
assertThat(inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get())
.isEqualTo(createTrieLog(blockHeader1));
assertThat(inMemoryWorldState.getTrieLog(blockHeader2.getHash()).get())
.isEqualTo(createTrieLog(blockHeader2));
assertThat(inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get())
.isEqualTo(createTrieLog(blockHeader3));
nonValidatingTrieLogHelper.prune(
dataStorageConfiguration, inMemoryWorldState, blockchain, dataDir);
// assert pruned trie logs are not in the DB
assertEquals(inMemoryWorldState.getTrieLog(blockHeader1.getHash()), Optional.empty());
assertEquals(inMemoryWorldState.getTrieLog(blockHeader2.getHash()), Optional.empty());
assertThat(inMemoryWorldState.getTrieLog(blockHeader1.getHash())).isEqualTo(Optional.empty());
assertThat(inMemoryWorldState.getTrieLog(blockHeader2.getHash())).isEqualTo(Optional.empty());
// assert retained trie logs are in the DB
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get(),
Bytes.fromHexString("0x03").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader4.getHash()).get(),
Bytes.fromHexString("0x04").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader5.getHash()).get(),
Bytes.fromHexString("0x05").toArrayUnsafe());
assertThat(inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get())
.isEqualTo(createTrieLog(blockHeader3));
assertThat(inMemoryWorldState.getTrieLog(blockHeader4.getHash()).get())
.isEqualTo(createTrieLog(blockHeader4));
assertThat(inMemoryWorldState.getTrieLog(blockHeader5.getHash()).get())
.isEqualTo(createTrieLog(blockHeader5));
}
@Test
public void cantPruneIfNoFinalizedIsFound() {
public void cannotPruneIfNoFinalizedIsFound() {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(2L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiTrieLogRetentionThreshold(2)
.build()
.withBonsaiTrieLogRetentionThreshold(2))
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
when(blockchain.getFinalized()).thenReturn(Optional.empty());
assertThrows(
RuntimeException.class,
() ->
TrieLogHelper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, dataDir));
assertThatThrownBy(
() ->
nonValidatingTrieLogHelper.prune(
dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(RuntimeException.class)
.hasMessage("No finalized block present, can't safely run trie log prune");
}
@Test
public void cantPruneIfUserRetainsMoreLayerThanExistingChainLength() {
public void cannotPruneIfUserRetainsMoreLayersThanExistingChainLength() {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(2L)
.bonsaiMaxLayersToLoad(10L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiTrieLogRetentionThreshold(10)
.build()
.withBonsaiTrieLogRetentionThreshold(10))
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
assertThrows(
IllegalArgumentException.class,
() ->
TrieLogHelper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, dataDir));
assertThatThrownBy(
() ->
nonValidatingTrieLogHelper.prune(
dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Trying to retain more trie logs than chain length (5), skipping pruning");
}
@Test
public void cantPruneIfUserRequiredFurtherThanFinalized() {
public void cannotPruneIfUserRequiredFurtherThanFinalized(final @TempDir Path dataDir) {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
@ -213,54 +223,277 @@ class TrieLogHelperTest {
.bonsaiMaxLayersToLoad(2L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiTrieLogRetentionThreshold(2)
.build()
.withBonsaiTrieLogRetentionThreshold(2))
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
mockBlockchainBase();
assertThrows(
IllegalArgumentException.class,
() ->
TrieLogHelper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, dataDir));
assertThatThrownBy(
() ->
nonValidatingTrieLogHelper.prune(
dataStorageConfiguration, inMemoryWorldState, blockchain, dataDir))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"Trying to prune more layers than the finalized block height, skipping pruning");
}
@Test
public void exceptionWhileSavingFileStopsPruneProcess() throws IOException {
Files.delete(dataDir.resolve("database"));
public void skipPruningIfTrieLogCountIsLessThanMaxLayersToLoad() {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(2L)
.bonsaiMaxLayersToLoad(6L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
assertThatThrownBy(
() ->
nonValidatingTrieLogHelper.prune(
dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Trie log count (5) is less than retention limit (6), skipping pruning");
}
@Test
public void mismatchInPrunedTrieLogCountShouldNotDeleteFiles(final @TempDir Path dataDir)
throws IOException {
Files.createDirectories(dataDir.resolve("database"));
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(3L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
mockBlockchainBase();
when(blockchain.getBlockHeader(5)).thenReturn(Optional.of(blockHeader5));
when(blockchain.getBlockHeader(4)).thenReturn(Optional.of(blockHeader4));
when(blockchain.getBlockHeader(3)).thenReturn(Optional.of(blockHeader3));
final BonsaiWorldStateKeyValueStorage inMemoryWorldStateSpy = spy(inMemoryWorldState);
// force a different value the second time the trie log count is called
when(inMemoryWorldStateSpy.streamTrieLogKeys(3L + DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE))
.thenCallRealMethod()
.thenReturn(Stream.empty());
assertThatThrownBy(
() ->
nonValidatingTrieLogHelper.prune(
dataStorageConfiguration, inMemoryWorldStateSpy, blockchain, dataDir))
.isInstanceOf(RuntimeException.class)
.hasMessage(
"Remaining trie logs (0) did not match --bonsai-historical-block-limit (3). Trie logs backup files have not been deleted, it is safe to rerun the subcommand.");
}
@Test
public void trieLogRetentionLimitShouldBeAboveMinimum() {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(511L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
TrieLogHelper helper = new TrieLogHelper();
assertThatThrownBy(
() ->
helper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(RuntimeException.class)
.hasMessage("--bonsai-historical-block-limit minimum value is 512");
}
@Test
public void trieLogPruningWindowSizeShouldBePositive() {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(512L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(0)
.build())
.build();
TrieLogHelper helper = new TrieLogHelper();
assertThatThrownBy(
() ->
helper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(RuntimeException.class)
.hasMessage("--Xbonsai-trie-logs-pruning-window-size=0 must be greater than 0");
}
@Test
public void trieLogPruningWindowSizeShouldBeAboveRetentionLimit() {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(512L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiTrieLogRetentionThreshold(2)
.build()
.withBonsaiTrieLogRetentionThreshold(2))
.bonsaiLimitTrieLogsEnabled(true)
.bonsaiTrieLogPruningWindowSize(512)
.build())
.build();
assertThrows(
RuntimeException.class,
() ->
TrieLogHelper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, dataDir));
TrieLogHelper helper = new TrieLogHelper();
assertThatThrownBy(
() ->
helper.prune(dataStorageConfiguration, inMemoryWorldState, blockchain, Path.of("")))
.isInstanceOf(RuntimeException.class)
.hasMessage(
"--Xbonsai-trie-logs-pruning-window-size=512 must be greater than --bonsai-historical-block-limit=512");
}
@Test
public void exceptionWhileSavingFileStopsPruneProcess(final @TempDir Path dataDir) {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(BONSAI)
.bonsaiMaxLayersToLoad(3L)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(true)
.build())
.build();
mockBlockchainBase();
when(blockchain.getBlockHeader(5)).thenReturn(Optional.of(blockHeader5));
when(blockchain.getBlockHeader(4)).thenReturn(Optional.of(blockHeader4));
when(blockchain.getBlockHeader(3)).thenReturn(Optional.of(blockHeader3));
assertThatThrownBy(
() ->
nonValidatingTrieLogHelper.prune(
dataStorageConfiguration,
inMemoryWorldState,
blockchain,
dataDir.resolve("unknownPath")))
.isInstanceOf(RuntimeException.class)
.hasCauseExactlyInstanceOf(FileNotFoundException.class);
// assert all trie logs are still in the DB
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get(),
Bytes.fromHexString("0x01").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader2.getHash()).get(),
Bytes.fromHexString("0x02").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get(),
Bytes.fromHexString("0x03").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader4.getHash()).get(),
Bytes.fromHexString("0x04").toArrayUnsafe());
assertArrayEquals(
inMemoryWorldState.getTrieLog(blockHeader5.getHash()).get(),
Bytes.fromHexString("0x05").toArrayUnsafe());
assertThat(inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get())
.isEqualTo(createTrieLog(blockHeader1));
assertThat(inMemoryWorldState.getTrieLog(blockHeader2.getHash()).get())
.isEqualTo(createTrieLog(blockHeader2));
assertThat(inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get())
.isEqualTo(createTrieLog(blockHeader3));
assertThat(inMemoryWorldState.getTrieLog(blockHeader4.getHash()).get())
.isEqualTo(createTrieLog(blockHeader4));
assertThat(inMemoryWorldState.getTrieLog(blockHeader5.getHash()).get())
.isEqualTo(createTrieLog(blockHeader5));
}
@Test
public void exportedTrieMatchesDbTrieLog(final @TempDir Path dataDir) throws IOException {
nonValidatingTrieLogHelper.exportTrieLog(
inMemoryWorldState,
singletonList(blockHeader1.getHash()),
dataDir.resolve("trie-log-dump"));
var trieLog =
nonValidatingTrieLogHelper
.readTrieLogsAsRlpFromFile(dataDir.resolve("trie-log-dump").toString())
.entrySet()
.stream()
.findFirst()
.get();
assertThat(trieLog.getKey()).isEqualTo(blockHeader1.getHash().toArrayUnsafe());
assertThat(trieLog.getValue())
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get());
}
@Test
public void exportedMultipleTriesMatchDbTrieLogs(final @TempDir Path dataDir) throws IOException {
nonValidatingTrieLogHelper.exportTrieLog(
inMemoryWorldState,
List.of(blockHeader1.getHash(), blockHeader2.getHash(), blockHeader3.getHash()),
dataDir.resolve("trie-log-dump"));
var trieLogs =
nonValidatingTrieLogHelper
.readTrieLogsAsRlpFromFile(dataDir.resolve("trie-log-dump").toString())
.entrySet()
.stream()
.collect(Collectors.toMap(e -> Bytes.wrap(e.getKey()), Map.Entry::getValue));
assertThat(trieLogs.get(blockHeader1.getHash()))
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get());
assertThat(trieLogs.get(blockHeader2.getHash()))
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader2.getHash()).get());
assertThat(trieLogs.get(blockHeader3.getHash()))
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get());
}
@Test
public void importedTrieLogMatchesDbTrieLog(final @TempDir Path dataDir) throws IOException {
StorageProvider tempStorageProvider = new InMemoryKeyValueStorageProvider();
BonsaiWorldStateKeyValueStorage inMemoryWorldState2 =
new BonsaiWorldStateKeyValueStorage(
tempStorageProvider, new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_CONFIG);
nonValidatingTrieLogHelper.exportTrieLog(
inMemoryWorldState,
singletonList(blockHeader1.getHash()),
dataDir.resolve("trie-log-dump"));
var trieLog =
nonValidatingTrieLogHelper.readTrieLogsAsRlpFromFile(
dataDir.resolve("trie-log-dump").toString());
var updater = inMemoryWorldState2.updater();
trieLog.forEach((k, v) -> updater.getTrieLogStorageTransaction().put(k, v));
updater.getTrieLogStorageTransaction().commit();
assertThat(inMemoryWorldState2.getTrieLog(blockHeader1.getHash()).get())
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get());
}
@Test
public void importedMultipleTriesMatchDbTrieLogs(final @TempDir Path dataDir) throws IOException {
StorageProvider tempStorageProvider = new InMemoryKeyValueStorageProvider();
BonsaiWorldStateKeyValueStorage inMemoryWorldState2 =
new BonsaiWorldStateKeyValueStorage(
tempStorageProvider, new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_CONFIG);
nonValidatingTrieLogHelper.exportTrieLog(
inMemoryWorldState,
List.of(blockHeader1.getHash(), blockHeader2.getHash(), blockHeader3.getHash()),
dataDir.resolve("trie-log-dump"));
var trieLog =
nonValidatingTrieLogHelper.readTrieLogsAsRlpFromFile(
dataDir.resolve("trie-log-dump").toString());
var updater = inMemoryWorldState2.updater();
trieLog.forEach((k, v) -> updater.getTrieLogStorageTransaction().put(k, v));
updater.getTrieLogStorageTransaction().commit();
assertThat(inMemoryWorldState2.getTrieLog(blockHeader1.getHash()).get())
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader1.getHash()).get());
assertThat(inMemoryWorldState2.getTrieLog(blockHeader2.getHash()).get())
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader2.getHash()).get());
assertThat(inMemoryWorldState2.getTrieLog(blockHeader3.getHash()).get())
.isEqualTo(inMemoryWorldState.getTrieLog(blockHeader3.getHash()).get());
}
}

@ -145,7 +145,7 @@ public class ConfigOptionSearchAndRunHandlerTest {
public void shouldRetrieveConfigFromEnvironmentWhenConfigFileSpecified() throws Exception {
final IDefaultValueProvider defaultValueProvider =
configParsingHandler.createDefaultValueProvider(
mockCommandLine, Optional.of(new File("foo")));
mockCommandLine, Optional.of(new File("foo")), Optional.empty());
final String value = defaultValueProvider.defaultValue(OptionSpec.builder("--logging").build());
assertThat(value).isEqualTo("ERROR");
}
@ -153,7 +153,8 @@ public class ConfigOptionSearchAndRunHandlerTest {
@Test
public void shouldRetrieveConfigFromEnvironmentWhenConfigFileNotSpecified() throws Exception {
final IDefaultValueProvider defaultValueProvider =
configParsingHandler.createDefaultValueProvider(mockCommandLine, Optional.empty());
configParsingHandler.createDefaultValueProvider(
mockCommandLine, Optional.empty(), Optional.empty());
final String value = defaultValueProvider.defaultValue(OptionSpec.builder("--logging").build());
assertThat(value).isEqualTo("ERROR");
}

@ -115,6 +115,7 @@ class TraceServiceImplTest {
.getTransactions()
.forEach(
tx -> {
verify(opTracer).tracePrepareTransaction(any(), eq(tx));
verify(opTracer).traceStartTransaction(any(), eq(tx));
verify(opTracer)
.traceEndTransaction(
@ -162,6 +163,7 @@ class TraceServiceImplTest {
.getTransactions()
.forEach(
tx -> {
verify(opTracer).tracePrepareTransaction(any(), eq(tx));
verify(opTracer).traceStartTransaction(any(), eq(tx));
verify(opTracer)
.traceEndTransaction(

@ -16,7 +16,7 @@ node-private-key-file="./path/to/privateKey"
pid-path="~/.pid"
reorg-logging-threshold=0
static-nodes-file="~/besudata/static-nodes.json"
profile="NONE"
# Security Module plugin to use
security-module="localfile"
@ -90,6 +90,7 @@ rpc-max-logs-range=100
json-pretty-print-enabled=false
cache-last-blocks=512
rpc-gas-cap = 50000000
rpc-max-trace-filter-range=100
# PRIVACY TLS
privacy-tls-enabled=false

@ -2,3 +2,6 @@
#mining
miner-coinbase="0x0000000000000000000000000000000000000002"
#network
network="mainnet"

@ -77,4 +77,9 @@ public abstract class ApiConfiguration {
public Long getUpperBoundGasAndPriorityFeeCoefficient() {
return DEFAULT_UPPER_BOUND_GAS_AND_PRIORITY_FEE_COEFFICIENT;
}
@Value.Default
public Long getMaxTraceFilterRange() {
return 1000L;
}
}

@ -24,8 +24,10 @@ import org.hyperledger.besu.ethereum.api.jsonrpc.internal.parameters.FilterParam
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.processor.BlockTracer;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.processor.Tracer;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.processor.TransactionTrace;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.RpcErrorType;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.tracing.flat.FlatTrace;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.tracing.flat.RewardTraceGenerator;
import org.hyperledger.besu.ethereum.api.query.BlockchainQueries;
@ -66,12 +68,15 @@ import org.slf4j.LoggerFactory;
public class TraceFilter extends TraceBlock {
private static final Logger LOG = LoggerFactory.getLogger(TraceFilter.class);
private final Long maxRange;
public TraceFilter(
final Supplier<BlockTracer> blockTracerSupplier,
final ProtocolSchedule protocolSchedule,
final BlockchainQueries blockchainQueries) {
final BlockchainQueries blockchainQueries,
final Long maxRange) {
super(protocolSchedule, blockchainQueries);
this.maxRange = maxRange;
}
@Override
@ -88,6 +93,17 @@ public class TraceFilter extends TraceBlock {
final long toBlock = resolveBlockNumber(filterParameter.getToBlock());
LOG.trace("Received RPC rpcName={} fromBlock={} toBlock={}", getName(), fromBlock, toBlock);
if (maxRange > 0 && toBlock - fromBlock > maxRange) {
LOG.atDebug()
.setMessage("trace_filter request {} failed:")
.addArgument(requestContext.getRequest())
.setCause(
new IllegalArgumentException(RpcErrorType.EXCEEDS_RPC_MAX_BLOCK_RANGE.getMessage()))
.log();
return new JsonRpcErrorResponse(
requestContext.getRequest().getId(), RpcErrorType.EXCEEDS_RPC_MAX_BLOCK_RANGE);
}
final ObjectMapper mapper = new ObjectMapper();
final ArrayNodeWrapper resultArrayNode =
new ArrayNodeWrapper(

@ -60,7 +60,11 @@ public class TraceJsonRpcMethods extends ApiGroupJsonRpcMethods {
new BlockReplay(protocolSchedule, blockchainQueries.getBlockchain());
return mapOf(
new TraceReplayBlockTransactions(protocolSchedule, blockchainQueries),
new TraceFilter(() -> new BlockTracer(blockReplay), protocolSchedule, blockchainQueries),
new TraceFilter(
() -> new BlockTracer(blockReplay),
protocolSchedule,
blockchainQueries,
apiConfiguration.getMaxTraceFilterRange()),
new TraceGet(() -> new BlockTracer(blockReplay), blockchainQueries, protocolSchedule),
new TraceTransaction(
() -> new BlockTracer(blockReplay), protocolSchedule, blockchainQueries),

@ -0,0 +1,79 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequest;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.parameters.BlockParameter;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.parameters.FilterParameter;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.processor.BlockTracer;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.RpcErrorType;
import org.hyperledger.besu.ethereum.api.query.BlockchainQueries;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import java.util.function.Supplier;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class TraceFilterTest {
private TraceFilter method;
@Mock Supplier<BlockTracer> blockTracerSupplier;
@Mock ProtocolSchedule protocolSchedule;
@Mock BlockchainQueries blockchainQueries;
@ParameterizedTest
@CsvSource({
"0, 1001, 1000", "0, 5000, 1000", "1, 1002, 1000", "1, 6002, 1000", "1000, 3000, 1000",
"0, 501, 500", "0, 5000, 500", "1, 502, 500", "1, 6002, 500", "1000, 3000, 500"
})
public void shouldFailIfParamsExceedMaxRange(
final long fromBlock, final long toBlock, final long maxFilterRange) {
final FilterParameter filterParameter =
new FilterParameter(
new BlockParameter(fromBlock),
new BlockParameter(toBlock),
null,
null,
null,
null,
null,
null,
null);
JsonRpcRequestContext request =
new JsonRpcRequestContext(
new JsonRpcRequest("2.0", "trace_filter", new Object[] {filterParameter}));
method =
new TraceFilter(blockTracerSupplier, protocolSchedule, blockchainQueries, maxFilterRange);
final JsonRpcResponse response = method.response(request);
assertThat(response).isInstanceOf(JsonRpcErrorResponse.class);
final JsonRpcErrorResponse errorResponse = (JsonRpcErrorResponse) response;
assertThat(errorResponse.getErrorType()).isEqualTo(RpcErrorType.EXCEEDS_RPC_MAX_BLOCK_RANGE);
}
}

@ -293,6 +293,8 @@ public class MainnetTransactionProcessor {
return TransactionProcessingResult.invalid(validationResult);
}
operationTracer.tracePrepareTransaction(worldState, transaction);
final long previousNonce = sender.incrementNonce();
LOG.trace(
"Incremented sender {} nonce ({} -> {})",

@ -16,18 +16,21 @@ package org.hyperledger.besu.ethereum.storage.keyvalue;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import java.nio.charset.StandardCharsets;
import org.bouncycastle.util.Arrays;
public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
BLOCKCHAIN(new byte[] {1}, true),
WORLD_STATE(new byte[] {2}, new int[] {0, 1}),
DEFAULT("default".getBytes(StandardCharsets.UTF_8)),
BLOCKCHAIN(new byte[] {1}, true, true),
WORLD_STATE(new byte[] {2}, new int[] {0, 1}, false, true),
PRIVATE_TRANSACTIONS(new byte[] {3}),
PRIVATE_STATE(new byte[] {4}),
PRUNING_STATE(new byte[] {5}, new int[] {0, 1}),
ACCOUNT_INFO_STATE(new byte[] {6}, new int[] {2}),
ACCOUNT_INFO_STATE(new byte[] {6}, new int[] {2}, false, true),
CODE_STORAGE(new byte[] {7}, new int[] {2}),
ACCOUNT_STORAGE_STORAGE(new byte[] {8}, new int[] {2}),
TRIE_BRANCH_STORAGE(new byte[] {9}, new int[] {2}),
ACCOUNT_STORAGE_STORAGE(new byte[] {8}, new int[] {2}, false, true),
TRIE_BRANCH_STORAGE(new byte[] {9}, new int[] {2}, false, true),
TRIE_LOG_STORAGE(new byte[] {10}, new int[] {2}),
VARIABLES(new byte[] {11}), // formerly GOQUORUM_PRIVATE_WORLD_STATE
@ -45,24 +48,30 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
private final byte[] id;
private final int[] versionList;
private final boolean containsStaticData;
private final boolean eligibleToHighSpecFlag;
KeyValueSegmentIdentifier(final byte[] id) {
this(id, new int[] {0, 1, 2});
}
KeyValueSegmentIdentifier(final byte[] id, final boolean containsStaticData) {
this(id, new int[] {0, 1, 2}, containsStaticData);
KeyValueSegmentIdentifier(
final byte[] id, final boolean containsStaticData, final boolean eligibleToHighSpecFlag) {
this(id, new int[] {0, 1, 2}, containsStaticData, eligibleToHighSpecFlag);
}
KeyValueSegmentIdentifier(final byte[] id, final int[] versionList) {
this(id, versionList, false);
this(id, versionList, false, false);
}
KeyValueSegmentIdentifier(
final byte[] id, final int[] versionList, final boolean containsStaticData) {
final byte[] id,
final int[] versionList,
final boolean containsStaticData,
final boolean eligibleToHighSpecFlag) {
this.id = id;
this.versionList = versionList;
this.containsStaticData = containsStaticData;
this.eligibleToHighSpecFlag = eligibleToHighSpecFlag;
}
@Override
@ -80,6 +89,11 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
return containsStaticData;
}
@Override
public boolean isEligibleToHighSpecFlag() {
return eligibleToHighSpecFlag;
}
@Override
public boolean includeInDatabaseVersion(final int version) {
return Arrays.contains(versionList, version);

@ -42,6 +42,17 @@ public class BonsaiValue<T> implements TrieLog.LogTuple<T> {
this.clearedAtLeastOnce = lastStepCleared;
}
public BonsaiValue(
final T prior,
final T updated,
final boolean lastStepCleared,
final boolean clearedAtLeastOnce) {
this.prior = prior;
this.updated = updated;
this.lastStepCleared = lastStepCleared;
this.clearedAtLeastOnce = clearedAtLeastOnce;
}
@Override
public T getPrior() {
return prior;
@ -117,4 +128,8 @@ public class BonsaiValue<T> implements TrieLog.LogTuple<T> {
.append(lastStepCleared)
.toHashCode();
}
public BonsaiValue<T> copy() {
return new BonsaiValue<T>(prior, updated, lastStepCleared, clearedAtLeastOnce);
}
}

@ -31,7 +31,6 @@ import org.hyperledger.besu.ethereum.trie.bonsai.cache.CachedMerkleTrieLoader;
import org.hyperledger.besu.ethereum.trie.bonsai.cache.CachedWorldStorageManager;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogManager;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.trie.bonsai.worldview.BonsaiWorldState;
import org.hyperledger.besu.ethereum.trie.bonsai.worldview.BonsaiWorldStateUpdateAccumulator;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
@ -74,8 +73,7 @@ public class BonsaiWorldStateProvider implements WorldStateArchive {
final Optional<Long> maxLayersToLoad,
final CachedMerkleTrieLoader cachedMerkleTrieLoader,
final BesuContext pluginContext,
final EvmConfiguration evmConfiguration,
final TrieLogPruner trieLogPruner) {
final EvmConfiguration evmConfiguration) {
this.worldStateKeyValueStorage = worldStateKeyValueStorage;
this.cachedWorldStorageManager = new CachedWorldStorageManager(this, worldStateKeyValueStorage);
@ -86,8 +84,7 @@ public class BonsaiWorldStateProvider implements WorldStateArchive {
blockchain,
worldStateKeyValueStorage,
maxLayersToLoad.orElse(RETAINED_LAYERS),
pluginContext,
trieLogPruner);
pluginContext);
this.blockchain = blockchain;
this.cachedMerkleTrieLoader = cachedMerkleTrieLoader;
this.persistedState = new BonsaiWorldState(this, worldStateKeyValueStorage, evmConfiguration);

@ -25,7 +25,7 @@ import java.util.Optional;
public class NoOpTrieLogManager extends TrieLogManager {
public NoOpTrieLogManager() {
super(null, null, 0, null, TrieLogPruner.noOpTrieLogPruner());
super(null, null, 0, null);
}
@Override

@ -47,19 +47,16 @@ public class TrieLogManager {
protected final Subscribers<TrieLogEvent.TrieLogObserver> trieLogObservers = Subscribers.create();
protected final TrieLogFactory trieLogFactory;
private final TrieLogPruner trieLogPruner;
public TrieLogManager(
final Blockchain blockchain,
final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage,
final long maxLayersToLoad,
final BesuContext pluginContext,
final TrieLogPruner trieLogPruner) {
final BesuContext pluginContext) {
this.blockchain = blockchain;
this.rootWorldStateStorage = worldStateKeyValueStorage;
this.maxLayersToLoad = maxLayersToLoad;
this.trieLogFactory = setupTrieLogFactory(pluginContext);
this.trieLogPruner = trieLogPruner;
}
public synchronized void saveTrieLog(
@ -85,8 +82,6 @@ public class TrieLogManager {
} finally {
if (success) {
stateUpdater.commit();
trieLogPruner.addToPruneQueue(forBlockHeader.getNumber(), forBlockHeader.getBlockHash());
trieLogPruner.pruneFromQueue();
} else {
stateUpdater.rollback();
}

@ -20,10 +20,12 @@ import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.ProcessableBlockHeader;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.plugin.services.trielogs.TrieLogEvent;
import java.util.Comparator;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.stream.Stream;
import com.google.common.collect.ArrayListMultimap;
@ -33,7 +35,7 @@ import org.apache.tuweni.bytes.Bytes32;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TrieLogPruner {
public class TrieLogPruner implements TrieLogEvent.TrieLogObserver {
private static final Logger LOG = LoggerFactory.getLogger(TrieLogPruner.class);
@ -41,6 +43,7 @@ public class TrieLogPruner {
private final int loadingLimit;
private final BonsaiWorldStateKeyValueStorage rootWorldStateStorage;
private final Blockchain blockchain;
private final Consumer<Runnable> executeAsync;
private final long numBlocksToRetain;
private final boolean requireFinalizedBlock;
@ -50,11 +53,13 @@ public class TrieLogPruner {
public TrieLogPruner(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final Blockchain blockchain,
final Consumer<Runnable> executeAsync,
final long numBlocksToRetain,
final int pruningLimit,
final boolean requireFinalizedBlock) {
this.rootWorldStateStorage = rootWorldStateStorage;
this.blockchain = blockchain;
this.executeAsync = executeAsync;
this.numBlocksToRetain = numBlocksToRetain;
this.pruningLimit = pruningLimit;
this.loadingLimit = pruningLimit; // same as pruningLimit for now
@ -166,34 +171,18 @@ public class TrieLogPruner {
return wasPruned.size();
}
public static TrieLogPruner noOpTrieLogPruner() {
return new NoOpTrieLogPruner(null, null, 0, 0);
}
public static class NoOpTrieLogPruner extends TrieLogPruner {
private NoOpTrieLogPruner(
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
final Blockchain blockchain,
final long numBlocksToRetain,
final int pruningLimit) {
super(rootWorldStateStorage, blockchain, numBlocksToRetain, pruningLimit, true);
}
@Override
public int initialize() {
// no-op
return -1;
}
@Override
void addToPruneQueue(final long blockNumber, final Hash blockHash) {
// no-op
}
@Override
int pruneFromQueue() {
// no-op
return -1;
@Override
public void onTrieLogAdded(final TrieLogEvent event) {
if (TrieLogEvent.Type.ADDED.equals(event.getType())) {
final Hash blockHash = event.layer().getBlockHash();
final Optional<Long> blockNumber = event.layer().getBlockNumber();
blockNumber.ifPresent(
blockNum ->
executeAsync.accept(
() -> {
addToPruneQueue(blockNum, blockHash);
pruneFromQueue();
}));
}
}
}

@ -121,12 +121,12 @@ public class BonsaiWorldState
}
/**
* Having a protected method to override the accumulator solves the chicken-egg problem of needing
* a worldstate reference (this) when construction the Accumulator.
* Override the accumulator solves the chicken-egg problem of needing a worldstate reference
* (this) when construction the Accumulator.
*
* @param accumulator accumulator to use.
*/
protected void setAccumulator(final BonsaiWorldStateUpdateAccumulator accumulator) {
public void setAccumulator(final BonsaiWorldStateUpdateAccumulator accumulator) {
this.accumulator = accumulator;
}

@ -60,13 +60,13 @@ public class BonsaiWorldStateUpdateAccumulator
implements BonsaiWorldView, TrieLogAccumulator {
private static final Logger LOG =
LoggerFactory.getLogger(BonsaiWorldStateUpdateAccumulator.class);
private final Consumer<BonsaiValue<BonsaiAccount>> accountPreloader;
private final Consumer<StorageSlotKey> storagePreloader;
protected final Consumer<BonsaiValue<BonsaiAccount>> accountPreloader;
protected final Consumer<StorageSlotKey> storagePreloader;
private final AccountConsumingMap<BonsaiValue<BonsaiAccount>> accountsToUpdate;
private final Map<Address, BonsaiValue<Bytes>> codeToUpdate = new ConcurrentHashMap<>();
private final Set<Address> storageToClear = Collections.synchronizedSet(new HashSet<>());
private final EvmConfiguration evmConfiguration;
protected final EvmConfiguration evmConfiguration;
// storage sub mapped by _hashed_ key. This is because in self_destruct calls we need to
// enumerate the old storage and delete it. Those are trie stored by hashed key by spec and the
@ -74,7 +74,7 @@ public class BonsaiWorldStateUpdateAccumulator
private final Map<Address, StorageConsumingMap<StorageSlotKey, BonsaiValue<UInt256>>>
storageToUpdate = new ConcurrentHashMap<>();
private boolean isAccumulatorStateChanged;
protected boolean isAccumulatorStateChanged;
public BonsaiWorldStateUpdateAccumulator(
final BonsaiWorldView world,

@ -43,27 +43,21 @@ public interface DataStorageConfiguration {
@Value.Immutable
interface Unstable {
boolean DEFAULT_BONSAI_TRIE_LOG_PRUNING_ENABLED = false;
long DEFAULT_BONSAI_TRIE_LOG_RETENTION_THRESHOLD = 512L;
long MINIMUM_BONSAI_TRIE_LOG_RETENTION_THRESHOLD = DEFAULT_BONSAI_TRIE_LOG_RETENTION_THRESHOLD;
int DEFAULT_BONSAI_TRIE_LOG_PRUNING_LIMIT = 30_000;
boolean DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED = false;
long MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
int DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE = 30_000;
DataStorageConfiguration.Unstable DEFAULT =
ImmutableDataStorageConfiguration.Unstable.builder().build();
@Value.Default
default boolean getBonsaiTrieLogPruningEnabled() {
return DEFAULT_BONSAI_TRIE_LOG_PRUNING_ENABLED;
default boolean getBonsaiLimitTrieLogsEnabled() {
return DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
}
@Value.Default
default long getBonsaiTrieLogRetentionThreshold() {
return DEFAULT_BONSAI_TRIE_LOG_RETENTION_THRESHOLD;
}
@Value.Default
default int getBonsaiTrieLogPruningLimit() {
return DEFAULT_BONSAI_TRIE_LOG_PRUNING_LIMIT;
default int getBonsaiTrieLogPruningWindowSize() {
return DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
}
}
}

@ -30,7 +30,6 @@ import org.hyperledger.besu.ethereum.storage.keyvalue.WorldStatePreimageKeyValue
import org.hyperledger.besu.ethereum.trie.bonsai.BonsaiWorldStateProvider;
import org.hyperledger.besu.ethereum.trie.bonsai.cache.CachedMerkleTrieLoader;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.forest.worldview.ForestMutableWorldState;
@ -115,8 +114,7 @@ public class InMemoryKeyValueStorageProvider extends KeyValueStorageProvider {
Optional.empty(),
cachedMerkleTrieLoader,
null,
evmConfiguration,
TrieLogPruner.noOpTrieLogPruner());
evmConfiguration);
}
public static MutableWorldState createInMemoryWorldState() {

@ -67,7 +67,6 @@ import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.trie.bonsai.cache.CachedMerkleTrieLoader;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
@ -163,8 +162,7 @@ public abstract class AbstractIsolationTests {
Optional.of(16L),
new CachedMerkleTrieLoader(new NoOpMetricsSystem()),
null,
EvmConfiguration.DEFAULT,
TrieLogPruner.noOpTrieLogPruner());
EvmConfiguration.DEFAULT);
var ws = archive.getMutable();
genesisState.writeStateTo(ws);
protocolContext = new ProtocolContext(blockchain, archive, null, Optional.empty());

@ -42,7 +42,6 @@ import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValu
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogFactoryImpl;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogLayer;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogManager;
import org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.trie.bonsai.worldview.BonsaiWorldState;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
@ -127,8 +126,7 @@ class BonsaiWorldStateProviderTest {
Optional.of(512L),
new CachedMerkleTrieLoader(new NoOpMetricsSystem()),
null,
EvmConfiguration.DEFAULT,
TrieLogPruner.noOpTrieLogPruner());
EvmConfiguration.DEFAULT);
final BlockHeader blockHeader = blockBuilder.number(0).buildHeader();
final BlockHeader chainHead = blockBuilder.number(512).buildHeader();
when(blockchain.getChainHeadHeader()).thenReturn(chainHead);

@ -16,7 +16,6 @@
package org.hyperledger.besu.ethereum.trie.bonsai.trielog;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.ethereum.trie.bonsai.trielog.TrieLogPruner.noOpTrieLogPruner;
import static org.mockito.Mockito.spy;
import org.hyperledger.besu.datatypes.Hash;
@ -57,9 +56,7 @@ class TrieLogManagerTests {
@BeforeEach
public void setup() {
trieLogManager =
new TrieLogManager(
blockchain, bonsaiWorldStateKeyValueStorage, 512, null, noOpTrieLogPruner());
trieLogManager = new TrieLogManager(blockchain, bonsaiWorldStateKeyValueStorage, 512, null);
}
@Test

@ -29,6 +29,7 @@ import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.trie.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import java.util.Optional;
import java.util.function.Consumer;
import java.util.stream.Stream;
import org.apache.logging.log4j.Level;
@ -44,6 +45,7 @@ public class TrieLogPrunerTest {
private BonsaiWorldStateKeyValueStorage worldState;
private Blockchain blockchain;
private final Consumer<Runnable> executeAsync = Runnable::run;
@SuppressWarnings("BannedMethod")
@BeforeEach
@ -67,7 +69,8 @@ public class TrieLogPrunerTest {
when(blockchain.getBlockHeader(header2.getBlockHash())).thenReturn(Optional.empty());
// When
TrieLogPruner trieLogPruner = new TrieLogPruner(worldState, blockchain, 3, loadingLimit, false);
TrieLogPruner trieLogPruner =
new TrieLogPruner(worldState, blockchain, executeAsync, 3, loadingLimit, false);
trieLogPruner.initialize();
// Then
@ -86,7 +89,8 @@ public class TrieLogPrunerTest {
when(worldState.pruneTrieLog(any(Hash.class))).thenReturn(true);
// requireFinalizedBlock = false means this is not a PoS chain
TrieLogPruner trieLogPruner =
new TrieLogPruner(worldState, blockchain, blocksToRetain, pruningWindowSize, false);
new TrieLogPruner(
worldState, blockchain, executeAsync, blocksToRetain, pruningWindowSize, false);
trieLogPruner.addToPruneQueue(0, key(0)); // older block outside prune window
trieLogPruner.addToPruneQueue(1, key(1)); // block inside the prune window
@ -194,7 +198,8 @@ public class TrieLogPrunerTest {
final int pruningWindowSize = (int) chainHeight;
when(blockchain.getChainHeadBlockNumber()).thenReturn(chainHeight);
TrieLogPruner trieLogPruner =
new TrieLogPruner(worldState, blockchain, blocksToRetain, pruningWindowSize, true);
new TrieLogPruner(
worldState, blockchain, executeAsync, blocksToRetain, pruningWindowSize, true);
trieLogPruner.addToPruneQueue(1, key(1));
trieLogPruner.addToPruneQueue(2, key(2));
@ -228,6 +233,46 @@ public class TrieLogPrunerTest {
assertThat(trieLogPruner.pruneFromQueue()).isEqualTo(1);
}
@Test
public void onTrieLogAdded_should_prune() {
// Given
final TriggerableConsumer triggerableConsumer = new TriggerableConsumer();
TrieLogPruner trieLogPruner =
new TrieLogPruner(worldState, blockchain, triggerableConsumer, 0, 1, false);
assertThat(trieLogPruner.pruneFromQueue()).isEqualTo(0);
final TrieLogLayer layer = new TrieLogLayer();
layer.setBlockNumber(1L);
layer.setBlockHash(key(1));
when(blockchain.getChainHeadBlockNumber()).thenReturn(1L);
// When
trieLogPruner.onTrieLogAdded(new TrieLogAddedEvent(layer));
verify(worldState, never()).pruneTrieLog(key(1));
triggerableConsumer.run();
// Then
verify(worldState, times(1)).pruneTrieLog(key(1));
}
@Test
public void onTrieLogAdded_should_not_prune_when_no_blockNumber() {
// Given
TrieLogPruner trieLogPruner =
new TrieLogPruner(worldState, blockchain, executeAsync, 0, 1, false);
assertThat(trieLogPruner.pruneFromQueue()).isEqualTo(0);
final TrieLogLayer layer = new TrieLogLayer();
layer.setBlockHash(key(1));
when(blockchain.getChainHeadBlockNumber()).thenReturn(1L);
// When
trieLogPruner.onTrieLogAdded(new TrieLogAddedEvent(layer));
// Then
verify(worldState, never()).pruneTrieLog(key(1));
}
private TrieLogPruner setupPrunerAndFinalizedBlock(
final long configuredRetainHeight, final long finalizedBlockHeight) {
final long chainHeight = 5;
@ -241,7 +286,8 @@ public class TrieLogPrunerTest {
.thenReturn(Optional.of(finalizedHeader));
when(blockchain.getChainHeadBlockNumber()).thenReturn(chainHeight);
TrieLogPruner trieLogPruner =
new TrieLogPruner(worldState, blockchain, blocksToRetain, pruningWindowSize, true);
new TrieLogPruner(
worldState, blockchain, executeAsync, blocksToRetain, pruningWindowSize, true);
trieLogPruner.addToPruneQueue(1, key(1));
trieLogPruner.addToPruneQueue(2, key(2));
@ -255,4 +301,18 @@ public class TrieLogPrunerTest {
private Hash key(final int k) {
return Hash.hash(Bytes.of(k));
}
private static class TriggerableConsumer implements Consumer<Runnable> {
private Runnable runnable;
@Override
public void accept(final Runnable runnable) {
this.runnable = runnable;
}
public void run() {
runnable.run();
}
}
}

@ -139,6 +139,7 @@ public class EthPeers {
"peer_limit",
"The maximum number of peers this node allows to connect",
() -> peerUpperBound);
connectedPeersCounter =
metricsSystem.createCounter(
BesuMetricCategory.PEERS, "connected_total", "Total number of peers connected");

@ -110,7 +110,7 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
this.blockBroadcaster = new BlockBroadcaster(ethContext);
supportedCapabilities =
this.supportedCapabilities =
calculateCapabilities(synchronizerConfiguration, ethereumWireProtocolConfiguration);
// Run validators
@ -252,11 +252,14 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
@Override
public void stop() {
if (stopped.compareAndSet(false, true)) {
LOG.info("Stopping {} Subprotocol.", getSupportedProtocol());
LOG.atInfo().setMessage("Stopping {} Subprotocol.").addArgument(getSupportedProtocol()).log();
scheduler.stop();
shutdown.countDown();
} else {
LOG.error("Attempted to stop already stopped {} Subprotocol.", getSupportedProtocol());
LOG.atInfo()
.setMessage("Attempted to stop already stopped {} Subprotocol.")
.addArgument(this::getSupportedProtocol)
.log();
}
}
@ -264,7 +267,10 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
public void awaitStop() throws InterruptedException {
shutdown.await();
scheduler.awaitStop();
LOG.info("{} Subprotocol stopped.", getSupportedProtocol());
LOG.atInfo()
.setMessage("{} Subprotocol stopped.")
.addArgument(this::getSupportedProtocol)
.log();
}
@Override
@ -277,8 +283,10 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
EthProtocolLogger.logProcessMessage(cap, code);
final EthPeer ethPeer = ethPeers.peer(message.getConnection());
if (ethPeer == null) {
LOG.debug(
"Ignoring message received from unknown peer connection: {}", message.getConnection());
LOG.atDebug()
.setMessage("Ignoring message received from unknown peer connection: {}")
.addArgument(message::getConnection)
.log();
return;
}
@ -288,19 +296,24 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
return;
} else if (!ethPeer.statusHasBeenReceived()) {
// Peers are required to send status messages before any other message type
LOG.debug(
"{} requires a Status ({}) message to be sent first. Instead, received message {} (BREACH_OF_PROTOCOL). Disconnecting from {}.",
this.getClass().getSimpleName(),
EthPV62.STATUS,
code,
ethPeer);
LOG.atDebug()
.setMessage(
"{} requires a Status ({}) message to be sent first. Instead, received message {} (BREACH_OF_PROTOCOL). Disconnecting from {}.")
.addArgument(() -> this.getClass().getSimpleName())
.addArgument(EthPV62.STATUS)
.addArgument(code)
.addArgument(ethPeer::toString)
.log();
ethPeer.disconnect(DisconnectReason.BREACH_OF_PROTOCOL);
return;
}
if (this.mergePeerFilter.isPresent()) {
if (this.mergePeerFilter.get().disconnectIfGossipingBlocks(message, ethPeer)) {
LOG.debug("Post-merge disconnect: peer still gossiping blocks {}", ethPeer);
LOG.atDebug()
.setMessage("Post-merge disconnect: peer still gossiping blocks {}")
.addArgument(ethPeer::toString)
.log();
handleDisconnect(ethPeer.getConnection(), DisconnectReason.SUBPROTOCOL_TRIGGERED, false);
return;
}
@ -333,11 +346,12 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
maybeResponseData = ethMessages.dispatch(ethMessage);
}
} catch (final RLPException e) {
LOG.debug(
"Received malformed message {} (BREACH_OF_PROTOCOL), disconnecting: {}",
messageData.getData(),
ethPeer,
e);
LOG.atDebug()
.setMessage("Received malformed message {} (BREACH_OF_PROTOCOL), disconnecting: {}, {}")
.addArgument(messageData::getData)
.addArgument(ethPeer::toString)
.addArgument(e::toString)
.log();
ethPeer.disconnect(DisconnectMessage.DisconnectReason.BREACH_OF_PROTOCOL);
}
@ -368,23 +382,31 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
genesisHash,
latestForkId);
try {
LOG.trace("Sending status message to {} for connection {}.", peer.getId(), connection);
LOG.atTrace()
.setMessage("Sending status message to {} for connection {}.")
.addArgument(peer::getId)
.addArgument(connection::toString)
.log();
peer.send(status, getSupportedProtocol(), connection);
peer.registerStatusSent(connection);
} catch (final PeerNotConnected peerNotConnected) {
// Nothing to do.
}
LOG.trace("{}", ethPeers);
LOG.atTrace().setMessage("{}").addArgument(ethPeers::toString).log();
}
@Override
public boolean shouldConnect(final Peer peer, final boolean incoming) {
if (peer.getForkId().map(forkId -> forkIdManager.peerCheck(forkId)).orElse(true)) {
LOG.trace("ForkId OK or not available");
if (peer.getForkId().map(forkIdManager::peerCheck).orElse(true)) {
LOG.atDebug()
.setMessage("ForkId OK or not available for peer {}")
.addArgument(peer::getId)
.log();
if (ethPeers.shouldConnect(peer, incoming)) {
return true;
}
}
LOG.atDebug().setMessage("ForkId check failed for peer {}").addArgument(peer::getId).log();
return false;
}
@ -397,11 +419,11 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
LOG.atDebug()
.setMessage("Disconnect - {} - {} - {}... - {} peers left")
.addArgument(initiatedByPeer ? "Inbound" : "Outbound")
.addArgument(reason)
.addArgument(connection.getPeer().getId().slice(0, 8))
.addArgument(ethPeers.peerCount())
.addArgument(reason::toString)
.addArgument(() -> connection.getPeer().getId().slice(0, 8))
.addArgument(ethPeers::peerCount)
.log();
LOG.trace("{}", ethPeers);
LOG.atTrace().setMessage("{}").addArgument(ethPeers::toString).log();
}
}
@ -412,43 +434,41 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
try {
if (!status.networkId().equals(networkId)) {
LOG.atDebug()
.setMessage("Mismatched network id: {}, EthPeer {}...")
.addArgument(status.networkId())
.addArgument(peer.getShortNodeId())
.log();
LOG.atTrace()
.setMessage("Mismatched network id: {}, EthPeer {}")
.addArgument(status.networkId())
.addArgument(peer)
.setMessage("Mismatched network id: {}, peer {}")
.addArgument(status::networkId)
.addArgument(() -> getPeerOrPeerId(peer))
.log();
peer.disconnect(DisconnectReason.SUBPROTOCOL_TRIGGERED);
} else if (!forkIdManager.peerCheck(forkId) && status.protocolVersion() > 63) {
LOG.debug(
"{} has matching network id ({}), but non-matching fork id: {}",
peer,
networkId,
forkId);
LOG.atDebug()
.setMessage("{} has matching network id ({}), but non-matching fork id: {}")
.addArgument(() -> getPeerOrPeerId(peer))
.addArgument(networkId::toString)
.addArgument(forkId)
.log();
peer.disconnect(DisconnectReason.SUBPROTOCOL_TRIGGERED);
} else if (forkIdManager.peerCheck(status.genesisHash())) {
LOG.debug(
"{} has matching network id ({}), but non-matching genesis hash: {}",
peer,
networkId,
status.genesisHash());
LOG.atDebug()
.setMessage("{} has matching network id ({}), but non-matching genesis hash: {}")
.addArgument(() -> getPeerOrPeerId(peer))
.addArgument(networkId::toString)
.addArgument(status::genesisHash)
.log();
peer.disconnect(DisconnectReason.SUBPROTOCOL_TRIGGERED);
} else if (mergePeerFilter.isPresent()
&& mergePeerFilter.get().disconnectIfPoW(status, peer)) {
LOG.atDebug()
.setMessage("Post-merge disconnect: peer still PoW {}")
.addArgument(peer.getShortNodeId())
.addArgument(() -> getPeerOrPeerId(peer))
.log();
handleDisconnect(peer.getConnection(), DisconnectReason.SUBPROTOCOL_TRIGGERED, false);
} else {
LOG.debug(
"Received status message from {}: {} with connection {}",
peer,
status,
message.getConnection());
LOG.atDebug()
.setMessage("Received status message from {}: {} with connection {}")
.addArgument(peer::toString)
.addArgument(status::toString)
.addArgument(message::getConnection)
.log();
peer.registerStatusReceived(
status.bestHash(),
status.totalDifficulty(),
@ -467,6 +487,10 @@ public class EthProtocolManager implements ProtocolManager, MinedBlockObserver {
}
}
private Object getPeerOrPeerId(final EthPeer peer) {
return LOG.isTraceEnabled() ? peer : peer.getShortNodeId();
}
@Override
public void blockMined(final Block block) {
// This assumes the block has already been included in the chain

@ -142,8 +142,8 @@ public class EthScheduler {
txWorkerExecutor.execute(command);
}
public <T> CompletableFuture<T> scheduleServiceTask(final Supplier<T> task) {
return CompletableFuture.supplyAsync(task, servicesExecutor);
public void executeServiceTask(final Runnable command) {
servicesExecutor.execute(command);
}
public <T> CompletableFuture<T> scheduleServiceTask(final EthTask<T> task) {

@ -32,9 +32,9 @@ import java.util.concurrent.CompletableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class SyncTargetManager {
public abstract class AbstractSyncTargetManager {
private static final Logger LOG = LoggerFactory.getLogger(SyncTargetManager.class);
private static final Logger LOG = LoggerFactory.getLogger(AbstractSyncTargetManager.class);
private final SynchronizerConfiguration config;
private final ProtocolSchedule protocolSchedule;
@ -42,7 +42,7 @@ public abstract class SyncTargetManager {
private final EthContext ethContext;
private final MetricsSystem metricsSystem;
protected SyncTargetManager(
protected AbstractSyncTargetManager(
final SynchronizerConfiguration config,
final ProtocolSchedule protocolSchedule,
final ProtocolContext protocolContext,

@ -43,7 +43,7 @@ public class PipelineChainDownloader implements ChainDownloader {
private static final Logger LOG = LoggerFactory.getLogger(PipelineChainDownloader.class);
static final Duration PAUSE_AFTER_ERROR_DURATION = Duration.ofSeconds(2);
private final SyncState syncState;
private final SyncTargetManager syncTargetManager;
private final AbstractSyncTargetManager syncTargetManager;
private final DownloadPipelineFactory downloadPipelineFactory;
private final EthScheduler scheduler;
@ -55,7 +55,7 @@ public class PipelineChainDownloader implements ChainDownloader {
public PipelineChainDownloader(
final SyncState syncState,
final SyncTargetManager syncTargetManager,
final AbstractSyncTargetManager syncTargetManager,
final DownloadPipelineFactory downloadPipelineFactory,
final EthScheduler scheduler,
final MetricsSystem metricsSystem) {

@ -129,12 +129,16 @@ public class BackwardSyncContext {
backwardChain.addNewHash(newBlockHash);
}
final Status status = getOrStartSyncSession();
backwardChain
.getBlock(newBlockHash)
.ifPresent(
newTargetBlock -> status.updateTargetHeight(newTargetBlock.getHeader().getNumber()));
return status.currentFuture;
if (isReady()) {
final Status status = getOrStartSyncSession();
backwardChain
.getBlock(newBlockHash)
.ifPresent(
newTargetBlock -> status.updateTargetHeight(newTargetBlock.getHeader().getNumber()));
return status.currentFuture;
} else {
return CompletableFuture.failedFuture(new Throwable("Backward sync is not ready"));
}
}
public synchronized CompletableFuture<Void> syncBackwardsUntil(final Block newPivot) {
@ -142,9 +146,13 @@ public class BackwardSyncContext {
backwardChain.appendTrustedBlock(newPivot);
}
final Status status = getOrStartSyncSession();
status.updateTargetHeight(newPivot.getHeader().getNumber());
return status.currentFuture;
if (isReady()) {
final Status status = getOrStartSyncSession();
status.updateTargetHeight(newPivot.getHeader().getNumber());
return status.currentFuture;
} else {
return CompletableFuture.failedFuture(new Throwable("Backward sync is not ready"));
}
}
private Status getOrStartSyncSession() {

@ -20,7 +20,7 @@ import org.hyperledger.besu.ethereum.eth.sync.PipelineChainDownloader;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncChainDownloader;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncState;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.FastSyncTargetManager;
import org.hyperledger.besu.ethereum.eth.sync.fastsync.SyncTargetManager;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
@ -38,8 +38,8 @@ public class CheckpointSyncChainDownloader extends FastSyncChainDownloader {
final MetricsSystem metricsSystem,
final FastSyncState fastSyncState) {
final FastSyncTargetManager syncTargetManager =
new FastSyncTargetManager(
final SyncTargetManager syncTargetManager =
new SyncTargetManager(
config,
worldStateStorageCoordinator,
protocolSchedule,

@ -38,8 +38,8 @@ public class FastSyncChainDownloader {
final MetricsSystem metricsSystem,
final FastSyncState fastSyncState) {
final FastSyncTargetManager syncTargetManager =
new FastSyncTargetManager(
final SyncTargetManager syncTargetManager =
new SyncTargetManager(
config,
worldStateStorageCoordinator,
protocolSchedule,

@ -136,8 +136,8 @@ public class FastSyncDownloadPipelineFactory implements DownloadPipelineFactory
new DownloadBodiesStep(protocolSchedule, ethContext, metricsSystem);
final DownloadReceiptsStep downloadReceiptsStep =
new DownloadReceiptsStep(ethContext, metricsSystem);
final FastImportBlocksStep importBlockStep =
new FastImportBlocksStep(
final ImportBlocksStep importBlockStep =
new ImportBlocksStep(
protocolSchedule,
protocolContext,
attachedValidationPolicy,

@ -45,7 +45,9 @@ public class FastSyncDownloader<REQUEST> {
private static final Duration FAST_SYNC_RETRY_DELAY = Duration.ofSeconds(5);
private static final Logger LOG = LoggerFactory.getLogger(FastSyncDownloader.class);
@SuppressWarnings("PrivateStaticFinalLoggers")
protected final Logger LOG = LoggerFactory.getLogger(getClass());
private final WorldStateStorageCoordinator worldStateStorageCoordinator;
private final WorldStateDownloader worldStateDownloader;
private final TaskCollection<REQUEST> taskCollection;
@ -76,7 +78,7 @@ public class FastSyncDownloader<REQUEST> {
public CompletableFuture<FastSyncState> start() {
if (!running.compareAndSet(false, true)) {
throw new IllegalStateException("FastSyncDownloader already running");
throw new IllegalStateException("SyncDownloader already running");
}
LOG.info("Starting sync");
return start(initialFastSyncState);
@ -112,7 +114,7 @@ public class FastSyncDownloader<REQUEST> {
protected CompletableFuture<FastSyncState> handleFailure(final Throwable error) {
trailingPeerRequirements = Optional.empty();
Throwable rootCause = ExceptionUtils.rootCause(error);
if (rootCause instanceof FastSyncException) {
if (rootCause instanceof SyncException) {
return CompletableFuture.failedFuture(error);
} else if (rootCause instanceof StalledDownloadException) {
LOG.debug("Stalled sync re-pivoting to newer block.");
@ -125,7 +127,7 @@ public class FastSyncDownloader<REQUEST> {
return start(FastSyncState.EMPTY_SYNC_STATE);
} else {
LOG.error(
"Encountered an unexpected error during fast sync. Restarting sync in "
"Encountered an unexpected error during sync. Restarting sync in "
+ FAST_SYNC_RETRY_DELAY.getSeconds()
+ " seconds.",
error);
@ -137,7 +139,7 @@ public class FastSyncDownloader<REQUEST> {
public void stop() {
synchronized (this) {
if (running.compareAndSet(true, false)) {
LOG.info("Stopping fast sync");
LOG.info("Stopping sync");
// Cancelling the world state download will also cause the chain download to be cancelled.
worldStateDownloader.cancel();
}
@ -154,7 +156,7 @@ public class FastSyncDownloader<REQUEST> {
MoreFiles.deleteRecursively(fastSyncDataDirectory, RecursiveDeleteOption.ALLOW_INSECURE);
}
} catch (final IOException e) {
LOG.error("Unable to clean up fast sync state", e);
LOG.error("Unable to clean up sync state", e);
}
}

@ -33,8 +33,8 @@ import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FastImportBlocksStep implements Consumer<List<BlockWithReceipts>> {
private static final Logger LOG = LoggerFactory.getLogger(FastImportBlocksStep.class);
public class ImportBlocksStep implements Consumer<List<BlockWithReceipts>> {
private static final Logger LOG = LoggerFactory.getLogger(ImportBlocksStep.class);
private static final long PRINT_DELAY = TimeUnit.SECONDS.toMillis(30L);
private final ProtocolSchedule protocolSchedule;
@ -46,7 +46,7 @@ public class FastImportBlocksStep implements Consumer<List<BlockWithReceipts>> {
private OptionalLong logStartBlock = OptionalLong.empty();
private final BlockHeader pivotHeader;
public FastImportBlocksStep(
public ImportBlocksStep(
final ProtocolSchedule protocolSchedule,
final ProtocolContext protocolContext,
final ValidationPolicy headerValidationPolicy,

@ -156,8 +156,7 @@ public class PivotBlockRetriever {
|| pivotBlockNumber.get() <= BlockHeader.GENESIS_BLOCK_NUMBER) {
LOG.info("Max retries reached, cancel pivot block download.");
// Pivot block selection has failed
result.completeExceptionally(
new FastSyncException(FastSyncError.PIVOT_BLOCK_HEADER_MISMATCH));
result.completeExceptionally(new SyncException(SyncError.PIVOT_BLOCK_HEADER_MISMATCH));
return;
} else {
LOG.info("Move pivot block back to {} and retry.", pivotBlockNumber);

@ -14,7 +14,7 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.fastsync;
public enum FastSyncError {
public enum SyncError {
NO_PEERS_AVAILABLE,
PIVOT_BLOCK_HEADER_MISMATCH,
UNEXPECTED_ERROR

@ -14,21 +14,21 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.fastsync;
public class FastSyncException extends RuntimeException {
public class SyncException extends RuntimeException {
private final FastSyncError error;
private final SyncError error;
public FastSyncException(final FastSyncError error) {
super("Fast sync failed: " + error);
public SyncException(final SyncError error) {
super("Sync failed: " + error);
this.error = error;
}
public FastSyncError getError() {
public SyncError getError() {
return error;
}
public FastSyncException(final Throwable error) {
public SyncException(final Throwable error) {
super(error);
this.error = FastSyncError.UNEXPECTED_ERROR;
this.error = SyncError.UNEXPECTED_ERROR;
}
}

@ -23,7 +23,7 @@ import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer;
import org.hyperledger.besu.ethereum.eth.manager.EthPeers;
import org.hyperledger.besu.ethereum.eth.sync.SyncTargetManager;
import org.hyperledger.besu.ethereum.eth.sync.AbstractSyncTargetManager;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.tasks.RetryingGetHeaderFromPeerByNumberTask;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
@ -39,8 +39,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FastSyncTargetManager extends SyncTargetManager {
private static final Logger LOG = LoggerFactory.getLogger(FastSyncTargetManager.class);
public class SyncTargetManager extends AbstractSyncTargetManager {
private static final Logger LOG = LoggerFactory.getLogger(SyncTargetManager.class);
private final WorldStateStorageCoordinator worldStateStorageCoordinator;
private final ProtocolSchedule protocolSchedule;
@ -53,7 +53,7 @@ public class FastSyncTargetManager extends SyncTargetManager {
private final int logDebugRepeatDelay = 15;
private final int logInfoRepeatDelay = 120;
public FastSyncTargetManager(
public SyncTargetManager(
final SynchronizerConfiguration config,
final WorldStateStorageCoordinator worldStateStorageCoordinator,
final ProtocolSchedule protocolSchedule,

@ -21,7 +21,7 @@ import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer;
import org.hyperledger.besu.ethereum.eth.sync.SyncTargetManager;
import org.hyperledger.besu.ethereum.eth.sync.AbstractSyncTargetManager;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncTarget;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
@ -34,7 +34,7 @@ import java.util.concurrent.CompletableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class FullSyncTargetManager extends SyncTargetManager {
class FullSyncTargetManager extends AbstractSyncTargetManager {
private static final Logger LOG = LoggerFactory.getLogger(FullSyncTargetManager.class);
private final ProtocolContext protocolContext;

@ -26,13 +26,8 @@ import org.hyperledger.besu.services.tasks.TaskCollection;
import java.nio.file.Path;
import java.util.concurrent.CompletableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SnapSyncDownloader extends FastSyncDownloader<SnapDataRequest> {
private static final Logger LOG = LoggerFactory.getLogger(SnapSyncDownloader.class);
public SnapSyncDownloader(
final FastSyncActions fastSyncActions,
final WorldStateStorageCoordinator worldStateStorageCoordinator,

@ -15,7 +15,7 @@
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static io.netty.util.internal.ObjectUtil.checkNonEmpty;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.HEAL_TRIE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncMetricsManager.Step.HEAL_TRIE;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.metrics.BesuMetricCategory;
@ -37,9 +37,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Manages the metrics related to the SnapSync process. */
public class SnapsyncMetricsManager {
public class SnapSyncMetricsManager {
private static final Logger LOG = LoggerFactory.getLogger(SnapsyncMetricsManager.class);
private static final Logger LOG = LoggerFactory.getLogger(SnapSyncMetricsManager.class);
private static final long PRINT_DELAY = TimeUnit.MINUTES.toMillis(1);
private final MetricsSystem metricsSystem;
@ -79,7 +79,7 @@ public class SnapsyncMetricsManager {
private long lastNotifyTimestamp;
public SnapsyncMetricsManager(final MetricsSystem metricsSystem, final EthContext ethContext) {
public SnapSyncMetricsManager(final MetricsSystem metricsSystem, final EthContext ethContext) {
this.metricsSystem = metricsSystem;
this.ethContext = ethContext;
percentageProgress = new AtomicReference<>(new BigDecimal(0));

@ -87,7 +87,7 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
private OptionalLong blockObserverId;
// metrics around the snapsync
private final SnapsyncMetricsManager metricsManager;
private final SnapSyncMetricsManager metricsManager;
public SnapWorldDownloadState(
final WorldStateStorageCoordinator worldStateStorageCoordinator,
@ -97,7 +97,7 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
final InMemoryTasksPriorityQueues<SnapDataRequest> pendingRequests,
final int maxRequestsWithoutProgress,
final long minMillisBeforeStalling,
final SnapsyncMetricsManager metricsManager,
final SnapSyncMetricsManager metricsManager,
final Clock clock) {
super(
worldStateStorageCoordinator,
@ -417,7 +417,7 @@ public class SnapWorldDownloadState extends WorldDownloadState<SnapDataRequest>
__ -> {});
}
public SnapsyncMetricsManager getMetricsManager() {
public SnapSyncMetricsManager getMetricsManager() {
return metricsManager;
}

@ -14,7 +14,7 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.DOWNLOAD;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncMetricsManager.Step.DOWNLOAD;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest.createAccountRangeDataRequest;
import org.hyperledger.besu.datatypes.Hash;
@ -133,8 +133,8 @@ public class SnapWorldStateDownloader implements WorldStateDownloader {
stateRoot,
snapTaskCollection.size());
final SnapsyncMetricsManager snapsyncMetricsManager =
new SnapsyncMetricsManager(metricsSystem, ethContext);
final SnapSyncMetricsManager snapsyncMetricsManager =
new SnapSyncMetricsManager(metricsSystem, ethContext);
final SnapWorldDownloadState newDownloadState =
new SnapWorldDownloadState(

@ -18,7 +18,7 @@ import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MAX_R
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MIN_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.findNewBeginElementInRange;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RequestType.ACCOUNT_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.DOWNLOAD;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncMetricsManager.Step.DOWNLOAD;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.StackTrie.FlatDatabaseUpdater.noop;
import static org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator.applyForStrategy;

@ -16,7 +16,7 @@ package org.hyperledger.besu.ethereum.eth.sync.snapsync.request.heal;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MAX_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager.MIN_RANGE;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager.Step.HEAL_FLAT;
import static org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncMetricsManager.Step.HEAL_FLAT;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager;

@ -52,7 +52,7 @@ import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class PipelineChainDownloaderTest {
@Mock private SyncTargetManager syncTargetManager;
@Mock private AbstractSyncTargetManager syncTargetManager;
@Mock private DownloadPipelineFactory downloadPipelineFactory;
@Mock private EthScheduler scheduler;
@Mock private Pipeline<?> downloadPipeline;

@ -48,7 +48,7 @@ import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.mainnet.MainnetProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSpec;
import org.hyperledger.besu.ethereum.referencetests.DefaultReferenceTestWorldState;
import org.hyperledger.besu.ethereum.referencetests.ForestReferenceTestWorldState;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
@ -154,7 +154,7 @@ public class BackwardSyncContextTest {
new BlockProcessingOutputs(
// use forest-based worldstate since it does not require
// blockheader stateroot to match actual worldstate root
DefaultReferenceTestWorldState.create(Collections.emptyMap()),
ForestReferenceTestWorldState.create(Collections.emptyMap()),
blockDataGenerator.receipts(block))));
});

@ -38,7 +38,7 @@ import org.hyperledger.besu.ethereum.eth.manager.RespondingEthPeer;
import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.mainnet.MainnetProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.referencetests.DefaultReferenceTestWorldState;
import org.hyperledger.besu.ethereum.referencetests.ForestReferenceTestWorldState;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import java.nio.charset.StandardCharsets;
@ -138,7 +138,7 @@ public class ForwardSyncStepTest {
return new BlockProcessingResult(
Optional.of(
new BlockProcessingOutputs(
DefaultReferenceTestWorldState.create(Collections.emptyMap()),
ForestReferenceTestWorldState.create(Collections.emptyMap()),
blockDataGenerator.receipts(block))));
});
}

@ -22,11 +22,11 @@ public class FastImportBlocksPercentageCalculationTest {
@Test
public void blocksPercent_calculations() {
assertThat(FastImportBlocksStep.getBlocksPercent(1, 1)).isEqualByComparingTo(100l);
assertThat(FastImportBlocksStep.getBlocksPercent(1, 100)).isEqualByComparingTo(1l);
assertThat(FastImportBlocksStep.getBlocksPercent(0, 100)).isEqualByComparingTo(0l);
assertThat(FastImportBlocksStep.getBlocksPercent(99, 0)).isEqualByComparingTo(0l);
assertThat(FastImportBlocksStep.getBlocksPercent(1, 1000)).isEqualByComparingTo(0l);
assertThat(FastImportBlocksStep.getBlocksPercent(1, 10000)).isEqualByComparingTo(0l);
assertThat(ImportBlocksStep.getBlocksPercent(1, 1)).isEqualByComparingTo(100l);
assertThat(ImportBlocksStep.getBlocksPercent(1, 100)).isEqualByComparingTo(1l);
assertThat(ImportBlocksStep.getBlocksPercent(0, 100)).isEqualByComparingTo(0l);
assertThat(ImportBlocksStep.getBlocksPercent(99, 0)).isEqualByComparingTo(0l);
assertThat(ImportBlocksStep.getBlocksPercent(1, 1000)).isEqualByComparingTo(0l);
assertThat(ImportBlocksStep.getBlocksPercent(1, 10000)).isEqualByComparingTo(0l);
}
}

@ -181,11 +181,11 @@ public class FastSyncDownloaderTest {
public void shouldAbortIfSelectPivotBlockFails(final DataStorageFormat dataStorageFormat) {
setup(dataStorageFormat);
when(fastSyncActions.selectPivotBlock(FastSyncState.EMPTY_SYNC_STATE))
.thenThrow(new FastSyncException(FastSyncError.UNEXPECTED_ERROR));
.thenThrow(new SyncException(SyncError.UNEXPECTED_ERROR));
final CompletableFuture<FastSyncState> result = downloader.start();
assertCompletedExceptionally(result, FastSyncError.UNEXPECTED_ERROR);
assertCompletedExceptionally(result, SyncError.UNEXPECTED_ERROR);
verify(fastSyncActions).selectPivotBlock(FastSyncState.EMPTY_SYNC_STATE);
verifyNoMoreInteractions(fastSyncActions);
@ -224,10 +224,10 @@ public class FastSyncDownloaderTest {
assertThat(result).isNotDone();
worldStateFuture.completeExceptionally(new FastSyncException(FastSyncError.NO_PEERS_AVAILABLE));
worldStateFuture.completeExceptionally(new SyncException(SyncError.NO_PEERS_AVAILABLE));
verify(chainDownloader).cancel();
chainFuture.completeExceptionally(new CancellationException());
assertCompletedExceptionally(result, FastSyncError.NO_PEERS_AVAILABLE);
assertCompletedExceptionally(result, SyncError.NO_PEERS_AVAILABLE);
assertThat(chainFuture).isCancelled();
}
@ -264,8 +264,8 @@ public class FastSyncDownloaderTest {
assertThat(result).isNotDone();
chainFuture.completeExceptionally(new FastSyncException(FastSyncError.NO_PEERS_AVAILABLE));
assertCompletedExceptionally(result, FastSyncError.NO_PEERS_AVAILABLE);
chainFuture.completeExceptionally(new SyncException(SyncError.NO_PEERS_AVAILABLE));
assertCompletedExceptionally(result, SyncError.NO_PEERS_AVAILABLE);
assertThat(worldStateFuture).isCancelled();
}
@ -594,13 +594,13 @@ public class FastSyncDownloaderTest {
}
private <T> void assertCompletedExceptionally(
final CompletableFuture<T> future, final FastSyncError expectedError) {
final CompletableFuture<T> future, final SyncError expectedError) {
assertThat(future).isCompletedExceptionally();
future.exceptionally(
actualError -> {
assertThat(actualError)
.isInstanceOf(FastSyncException.class)
.extracting(ex -> ((FastSyncException) ex).getError())
.isInstanceOf(SyncException.class)
.extracting(ex -> ((SyncException) ex).getError())
.isEqualTo(expectedError);
return null;
});

@ -45,7 +45,7 @@ import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class FastImportBlocksStepTest {
public class ImportBlocksStepTest {
@Mock private ProtocolSchedule protocolSchedule;
@Mock private ProtocolSpec protocolSpec;
@ -56,7 +56,7 @@ public class FastImportBlocksStepTest {
@Mock private BlockHeader pivotHeader;
private final BlockDataGenerator gen = new BlockDataGenerator();
private FastImportBlocksStep importBlocksStep;
private ImportBlocksStep importBlocksStep;
@BeforeEach
public void setUp() {
@ -66,7 +66,7 @@ public class FastImportBlocksStepTest {
when(ommerValidationPolicy.getValidationModeForNextBlock()).thenReturn(LIGHT);
importBlocksStep =
new FastImportBlocksStep(
new ImportBlocksStep(
protocolSchedule,
protocolContext,
validationPolicy,

@ -376,9 +376,9 @@ public class PivotBlockRetrieverTest {
assertThat(future).isCompletedExceptionally();
assertThatThrownBy(future::get)
.hasRootCauseInstanceOf(FastSyncException.class)
.extracting(e -> ((FastSyncException) ExceptionUtils.rootCause(e)).getError())
.isEqualTo(FastSyncError.PIVOT_BLOCK_HEADER_MISMATCH);
.hasRootCauseInstanceOf(SyncException.class)
.extracting(e -> ((SyncException) ExceptionUtils.rootCause(e)).getError())
.isEqualTo(SyncError.PIVOT_BLOCK_HEADER_MISMATCH);
}
@ParameterizedTest
@ -406,9 +406,9 @@ public class PivotBlockRetrieverTest {
assertThat(future).isCompletedExceptionally();
assertThatThrownBy(future::get)
.hasRootCauseInstanceOf(FastSyncException.class)
.extracting(e -> ((FastSyncException) ExceptionUtils.rootCause(e)).getError())
.isEqualTo(FastSyncError.PIVOT_BLOCK_HEADER_MISMATCH);
.hasRootCauseInstanceOf(SyncException.class)
.extracting(e -> ((SyncException) ExceptionUtils.rootCause(e)).getError())
.isEqualTo(SyncError.PIVOT_BLOCK_HEADER_MISMATCH);
}
private Responder responderForFakeBlocks(final long... blockNumbers) {

@ -54,7 +54,7 @@ public class PersistDataStepTest {
@BeforeEach
public void setUp() {
when(downloadState.getMetricsManager()).thenReturn(mock(SnapsyncMetricsManager.class));
when(downloadState.getMetricsManager()).thenReturn(mock(SnapSyncMetricsManager.class));
}
@Test

@ -85,7 +85,7 @@ public class SnapWorldDownloadStateTest {
private final SnapSyncProcessState snapSyncState = mock(SnapSyncProcessState.class);
private final SnapSyncStatePersistenceManager snapContext =
mock(SnapSyncStatePersistenceManager.class);
private final SnapsyncMetricsManager metricsManager = mock(SnapsyncMetricsManager.class);
private final SnapSyncMetricsManager metricsManager = mock(SnapSyncMetricsManager.class);
private final Blockchain blockchain = mock(Blockchain.class);
private final DynamicPivotBlockSelector dynamicPivotBlockManager =
mock(DynamicPivotBlockSelector.class);

@ -19,9 +19,9 @@ import org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.core.TrieGenerator;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.RangeManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncMetricsManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapSyncProcessState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapWorldDownloadState;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.SnapsyncMetricsManager;
import org.hyperledger.besu.ethereum.eth.sync.snapsync.request.SnapDataRequest;
import org.hyperledger.besu.ethereum.proof.WorldStateProofProvider;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
@ -68,7 +68,7 @@ public class AccountFlatDatabaseHealingRangeRequestTest {
@BeforeEach
public void setup() {
Mockito.when(downloadState.getMetricsManager())
.thenReturn(Mockito.mock(SnapsyncMetricsManager.class));
.thenReturn(Mockito.mock(SnapSyncMetricsManager.class));
Mockito.when(downloadState.getAccountsHealingList()).thenReturn(new HashSet<>());
}

@ -271,6 +271,7 @@ public class T8nExecutor {
final TransactionProcessingResult result;
try {
tracer = tracerManager.getManagedTracer(i, transaction.getHash());
tracer.tracePrepareTransaction(worldStateUpdater, transaction);
tracer.traceStartTransaction(worldStateUpdater, transaction);
result =
processor.processTransaction(

@ -32,7 +32,7 @@ public class DiscoveryConfiguration {
private List<EnodeURL> bootnodes = new ArrayList<>();
private String dnsDiscoveryURL;
private boolean discoveryV5Enabled = false;
private boolean filterOnEnrForkId = false;
private boolean filterOnEnrForkId = NetworkingConfiguration.DEFAULT_FILTER_ON_ENR_FORK_ID;
public static DiscoveryConfiguration create() {
return new DiscoveryConfiguration();

@ -23,6 +23,7 @@ public class NetworkingConfiguration {
public static final int DEFAULT_INITIATE_CONNECTIONS_FREQUENCY_SEC = 30;
public static final int DEFAULT_CHECK_MAINTAINED_CONNECTIONS_FREQUENCY_SEC = 60;
public static final int DEFAULT_PEER_LOWER_BOUND = 25;
public static final boolean DEFAULT_FILTER_ON_ENR_FORK_ID = true;
private DiscoveryConfiguration discovery = new DiscoveryConfiguration();
private RlpxConfiguration rlpx = new RlpxConfiguration();

@ -26,6 +26,7 @@ import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.Packet;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerDiscoveryController;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerRequirement;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerTable;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PingPacketData;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.TimerUtil;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURLImpl;
@ -73,6 +74,8 @@ public abstract class PeerDiscoveryAgent {
// The devp2p specification says only accept packets up to 1280, but some
// clients ignore that, so we add in a little extra padding.
private static final int MAX_PACKET_SIZE_BYTES = 1600;
private static final List<String> PING_PACKET_SOURCE_IGNORED =
List.of("127.0.0.1", "255.255.255.255");
protected final List<DiscoveryPeer> bootstrapPeers;
private final List<PeerRequirement> peerRequirements = new CopyOnWriteArrayList<>();
@ -81,6 +84,7 @@ public abstract class PeerDiscoveryAgent {
private final MetricsSystem metricsSystem;
private final RlpxAgent rlpxAgent;
private final ForkIdManager forkIdManager;
private final PeerTable peerTable;
/* The peer controller, which takes care of the state machine of peers. */
protected Optional<PeerDiscoveryController> controller = Optional.empty();
@ -109,7 +113,8 @@ public abstract class PeerDiscoveryAgent {
final MetricsSystem metricsSystem,
final StorageProvider storageProvider,
final ForkIdManager forkIdManager,
final RlpxAgent rlpxAgent) {
final RlpxAgent rlpxAgent,
final PeerTable peerTable) {
this.metricsSystem = metricsSystem;
checkArgument(nodeKey != null, "nodeKey cannot be null");
checkArgument(config != null, "provided configuration cannot be null");
@ -130,6 +135,7 @@ public abstract class PeerDiscoveryAgent {
this.forkIdManager = forkIdManager;
this.forkIdSupplier = () -> forkIdManager.getForkIdForChainHead().getForkIdAsBytesList();
this.rlpxAgent = rlpxAgent;
this.peerTable = peerTable;
}
protected abstract TimerUtil createTimer();
@ -263,9 +269,9 @@ public abstract class PeerDiscoveryAgent {
.peerRequirement(PeerRequirement.combine(peerRequirements))
.peerPermissions(peerPermissions)
.metricsSystem(metricsSystem)
.forkIdManager(forkIdManager)
.filterOnEnrForkId((config.isFilterOnEnrForkIdEnabled()))
.rlpxAgent(rlpxAgent)
.peerTable(peerTable)
.build();
}
@ -282,27 +288,7 @@ public abstract class PeerDiscoveryAgent {
.flatMap(Endpoint::getTcpPort)
.orElse(udpPort);
// If the host is present in the P2P PING packet itself, use that as the endpoint. If the P2P
// PING packet specifies 127.0.0.1 (the default if a custom value is not specified with
// --p2p-host or via a suitable --nat-method) we ignore it in favour of the UDP source address.
// The likelihood is that the UDP source will be 127.0.0.1 anyway, but this reduces the chance
// of an unexpected change in behaviour as a result of
// https://github.com/hyperledger/besu/issues/6224 being fixed.
final String host =
packet
.getPacketData(PingPacketData.class)
.flatMap(PingPacketData::getFrom)
.map(Endpoint::getHost)
.filter(abc -> !abc.equals("127.0.0.1"))
.stream()
.peek(
h ->
LOG.trace(
"Using \"From\" endpoint {} specified in ping packet. Ignoring UDP source host {}",
h,
sourceEndpoint.getHost()))
.findFirst()
.orElseGet(sourceEndpoint::getHost);
final String host = deriveHost(sourceEndpoint, packet);
// Notify the peer controller.
final DiscoveryPeer peer =
@ -317,6 +303,38 @@ public abstract class PeerDiscoveryAgent {
controller.ifPresent(c -> c.onMessage(packet, peer));
}
/**
* method to derive the host from the source endpoint and the P2P PING packet. If the host is
* present in the P2P PING packet itself, use that as the endpoint. If the P2P PING packet
* specifies 127.0.0.1 (the default if a custom value is not specified with --p2p-host or via a
* suitable --nat-method) we ignore it in favour of the UDP source address. Some implementations
* send 127.0.0.1 or 255.255.255.255 anyway, but this reduces the chance of an unexpected change
* in behaviour as a result of https://github.com/hyperledger/besu/issues/6224 being fixed.
*
* @param sourceEndpoint source endpoint of the packet
* @param packet P2P PING packet
* @return host address as string
*/
static String deriveHost(final Endpoint sourceEndpoint, final Packet packet) {
return packet
.getPacketData(PingPacketData.class)
.flatMap(PingPacketData::getFrom)
.map(Endpoint::getHost)
.filter(
fromAddr ->
(!PING_PACKET_SOURCE_IGNORED.contains(fromAddr)
&& InetAddresses.isInetAddress(fromAddr)))
.stream()
.peek(
h ->
LOG.trace(
"Using \"From\" endpoint {} specified in ping packet. Ignoring UDP source host {}",
h,
sourceEndpoint.getHost()))
.findFirst()
.orElseGet(sourceEndpoint::getHost);
}
/**
* Send a packet to the given recipient.
*

@ -23,6 +23,7 @@ import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.Packet;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerDiscoveryController;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerDiscoveryController.AsyncExecutor;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerTable;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.TimerUtil;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.VertxTimerUtil;
import org.hyperledger.besu.ethereum.p2p.permissions.PeerPermissions;
@ -73,7 +74,8 @@ public class VertxPeerDiscoveryAgent extends PeerDiscoveryAgent {
final MetricsSystem metricsSystem,
final StorageProvider storageProvider,
final ForkIdManager forkIdManager,
final RlpxAgent rlpxAgent) {
final RlpxAgent rlpxAgent,
final PeerTable peerTable) {
super(
nodeKey,
config,
@ -82,7 +84,8 @@ public class VertxPeerDiscoveryAgent extends PeerDiscoveryAgent {
metricsSystem,
storageProvider,
forkIdManager,
rlpxAgent);
rlpxAgent,
peerTable);
checkArgument(vertx != null, "vertx instance cannot be null");
this.vertx = vertx;

@ -21,8 +21,6 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import org.hyperledger.besu.cryptoservices.NodeKey;
import org.hyperledger.besu.ethereum.forkid.ForkId;
import org.hyperledger.besu.ethereum.forkid.ForkIdManager;
import org.hyperledger.besu.ethereum.p2p.discovery.DiscoveryPeer;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryStatus;
import org.hyperledger.besu.ethereum.p2p.peers.Peer;
@ -129,7 +127,6 @@ public class PeerDiscoveryController {
private final DiscoveryProtocolLogger discoveryProtocolLogger;
private final LabelledMetric<Counter> interactionCounter;
private final LabelledMetric<Counter> interactionRetryCounter;
private final ForkIdManager forkIdManager;
private final boolean filterOnEnrForkId;
private final RlpxAgent rlpxAgent;
@ -161,7 +158,6 @@ public class PeerDiscoveryController {
final PeerPermissions peerPermissions,
final MetricsSystem metricsSystem,
final Optional<Cache<Bytes, Packet>> maybeCacheForEnrRequests,
final ForkIdManager forkIdManager,
final boolean filterOnEnrForkId,
final RlpxAgent rlpxAgent) {
this.timerUtil = timerUtil;
@ -197,11 +193,11 @@ public class PeerDiscoveryController {
"discovery_interaction_retry_count",
"Total number of interaction retries performed",
"type");
this.cachedEnrRequests =
maybeCacheForEnrRequests.orElse(
CacheBuilder.newBuilder().maximumSize(50).expireAfterWrite(10, SECONDS).build());
this.forkIdManager = forkIdManager;
this.filterOnEnrForkId = filterOnEnrForkId;
}
@ -314,6 +310,7 @@ public class PeerDiscoveryController {
}
final DiscoveryPeer peer = resolvePeer(sender);
final Bytes peerId = peer.getId();
switch (packet.getType()) {
case PING:
if (peerPermissions.allowInboundBonding(peer)) {
@ -333,10 +330,10 @@ public class PeerDiscoveryController {
if (filterOnEnrForkId) {
requestENR(peer);
}
bondingPeers.invalidate(peer.getId());
bondingPeers.invalidate(peerId);
addToPeerTable(peer);
recursivePeerRefreshState.onBondingComplete(peer);
Optional.ofNullable(cachedEnrRequests.getIfPresent(peer.getId()))
Optional.ofNullable(cachedEnrRequests.getIfPresent(peerId))
.ifPresent(cachedEnrRequest -> processEnrRequest(peer, cachedEnrRequest));
});
break;
@ -360,12 +357,12 @@ public class PeerDiscoveryController {
if (PeerDiscoveryStatus.BONDED.equals(peer.getStatus())) {
processEnrRequest(peer, packet);
} else if (PeerDiscoveryStatus.BONDING.equals(peer.getStatus())) {
LOG.trace("ENR_REQUEST cached for bonding peer Id: {}", peer.getId());
LOG.trace("ENR_REQUEST cached for bonding peer Id: {}", peerId);
// Due to UDP, it may happen that we receive the ENR_REQUEST just before the PONG.
// Because peers want to send the ENR_REQUEST directly after the pong.
// If this happens we don't want to ignore the request but process when bonded.
// this cache allows to keep the request and to respond after having processed the PONG
cachedEnrRequests.put(peer.getId(), packet);
cachedEnrRequests.put(peerId, packet);
}
break;
case ENR_RESPONSE:
@ -376,26 +373,6 @@ public class PeerDiscoveryController {
packet.getPacketData(ENRResponsePacketData.class);
final NodeRecord enr = packetData.get().getEnr();
peer.setNodeRecord(enr);
final Optional<ForkId> maybeForkId = peer.getForkId();
if (maybeForkId.isPresent()) {
if (forkIdManager.peerCheck(maybeForkId.get())) {
connectOnRlpxLayer(peer);
LOG.debug(
"Peer {} PASSED fork id check. ForkId received: {}",
sender.getId(),
maybeForkId.get());
} else {
LOG.debug(
"Peer {} FAILED fork id check. ForkId received: {}",
sender.getId(),
maybeForkId.get());
}
} else {
// if the peer hasn't sent the ForkId try to connect to it anyways
connectOnRlpxLayer(peer);
LOG.debug("No fork id sent by peer: {}", peer.getId());
}
});
break;
}
@ -431,9 +408,7 @@ public class PeerDiscoveryController {
if (peer.getStatus() != PeerDiscoveryStatus.BONDED) {
peer.setStatus(PeerDiscoveryStatus.BONDED);
if (!filterOnEnrForkId) {
connectOnRlpxLayer(peer);
}
connectOnRlpxLayer(peer);
}
final PeerTable.AddResult result = peerTable.tryAdd(peer);
@ -560,8 +535,6 @@ public class PeerDiscoveryController {
*/
@VisibleForTesting
void requestENR(final DiscoveryPeer peer) {
peer.setStatus(PeerDiscoveryStatus.ENR_REQUESTED);
final Consumer<PeerInteractionState> action =
interaction -> {
final ENRRequestPacketData data = ENRRequestPacketData.create();
@ -838,7 +811,6 @@ public class PeerDiscoveryController {
private Cache<Bytes, Packet> cachedEnrRequests =
CacheBuilder.newBuilder().maximumSize(50).expireAfterWrite(10, SECONDS).build();
private ForkIdManager forkIdManager;
private RlpxAgent rlpxAgent;
private Builder() {}
@ -846,10 +818,6 @@ public class PeerDiscoveryController {
public PeerDiscoveryController build() {
validate();
if (peerTable == null) {
peerTable = new PeerTable(this.nodeKey.getPublicKey().getEncodedBytes(), 16);
}
return new PeerDiscoveryController(
nodeKey,
localPeer,
@ -864,7 +832,6 @@ public class PeerDiscoveryController {
peerPermissions,
metricsSystem,
Optional.of(cachedEnrRequests),
forkIdManager,
filterOnEnrForkId,
rlpxAgent);
}
@ -875,8 +842,8 @@ public class PeerDiscoveryController {
validateRequiredDependency(timerUtil, "TimerUtil");
validateRequiredDependency(workerExecutor, "AsyncExecutor");
validateRequiredDependency(metricsSystem, "MetricsSystem");
validateRequiredDependency(forkIdManager, "ForkIdManager");
validateRequiredDependency(rlpxAgent, "RlpxAgent");
validateRequiredDependency(peerTable, "PeerTable");
}
private void validateRequiredDependency(final Object object, final String name) {
@ -970,11 +937,5 @@ public class PeerDiscoveryController {
this.rlpxAgent = rlpxAgent;
return this;
}
public Builder forkIdManager(final ForkIdManager forkIdManager) {
checkNotNull(forkIdManager);
this.forkIdManager = forkIdManager;
return this;
}
}
}

@ -56,26 +56,21 @@ public class PeerTable {
* Builds a new peer table, where distance is calculated using the provided nodeId as a baseline.
*
* @param nodeId The ID of the node where this peer table is stored.
* @param bucketSize The maximum length of each k-bucket.
*/
public PeerTable(final Bytes nodeId, final int bucketSize) {
public PeerTable(final Bytes nodeId) {
this.keccak256 = Hash.keccak256(nodeId);
this.table =
Stream.generate(() -> new Bucket(DEFAULT_BUCKET_SIZE))
.limit(N_BUCKETS + 1)
.toArray(Bucket[]::new);
this.distanceCache = new ConcurrentHashMap<>();
this.maxEntriesCnt = N_BUCKETS * bucketSize;
this.maxEntriesCnt = N_BUCKETS * DEFAULT_BUCKET_SIZE;
// A bloom filter with 4096 expected insertions of 64-byte keys with a 0.1% false positive
// probability yields a memory footprint of ~7.5kb.
buildBloomFilter();
}
public PeerTable(final Bytes nodeId) {
this(nodeId, DEFAULT_BUCKET_SIZE);
}
/**
* Returns the table's representation of a peer, if it exists.
*
@ -83,11 +78,12 @@ public class PeerTable {
* @return The stored representation.
*/
public Optional<DiscoveryPeer> get(final PeerId peer) {
if (!idBloom.mightContain(peer.getId())) {
final Bytes peerId = peer.getId();
if (!idBloom.mightContain(peerId)) {
return Optional.empty();
}
final int distance = distanceFrom(peer);
return table[distance].getAndTouch(peer.getId());
return table[distance].getAndTouch(peerId);
}
/**

@ -27,6 +27,7 @@ import org.hyperledger.besu.ethereum.p2p.discovery.DiscoveryPeer;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryAgent;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryStatus;
import org.hyperledger.besu.ethereum.p2p.discovery.VertxPeerDiscoveryAgent;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerTable;
import org.hyperledger.besu.ethereum.p2p.peers.DefaultPeerPrivileges;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURLImpl;
import org.hyperledger.besu.ethereum.p2p.peers.LocalNode;
@ -383,11 +384,12 @@ public class DefaultP2PNetwork implements P2PNetwork {
@VisibleForTesting
void attemptPeerConnections() {
LOG.trace("Initiating connections to discovered peers.");
rlpxAgent.connect(
final Stream<DiscoveryPeer> toTry =
streamDiscoveredPeers()
.filter(peer -> peer.getStatus() == PeerDiscoveryStatus.BONDED)
.filter(peerDiscoveryAgent::checkForkId)
.sorted(Comparator.comparing(DiscoveryPeer::getLastAttemptedConnection)));
.sorted(Comparator.comparing(DiscoveryPeer::getLastAttemptedConnection));
toTry.forEach(rlpxAgent::connect);
}
@Override
@ -511,6 +513,7 @@ public class DefaultP2PNetwork implements P2PNetwork {
private Supplier<Stream<PeerConnection>> allConnectionsSupplier;
private Supplier<Stream<PeerConnection>> allActiveConnectionsSupplier;
private int peersLowerBound;
private PeerTable peerTable;
public P2PNetwork build() {
validate();
@ -528,6 +531,7 @@ public class DefaultP2PNetwork implements P2PNetwork {
final MutableLocalNode localNode =
MutableLocalNode.create(config.getRlpx().getClientId(), 5, supportedCapabilities);
final PeerPrivileges peerPrivileges = new DefaultPeerPrivileges(maintainedPeers);
peerTable = new PeerTable(nodeKey.getPublicKey().getEncodedBytes());
rlpxAgent = rlpxAgent == null ? createRlpxAgent(localNode, peerPrivileges) : rlpxAgent;
peerDiscoveryAgent = peerDiscoveryAgent == null ? createDiscoveryAgent() : peerDiscoveryAgent;
@ -572,7 +576,8 @@ public class DefaultP2PNetwork implements P2PNetwork {
metricsSystem,
storageProvider,
forkIdManager,
rlpxAgent);
rlpxAgent,
peerTable);
}
private RlpxAgent createRlpxAgent(
@ -589,6 +594,7 @@ public class DefaultP2PNetwork implements P2PNetwork {
.allConnectionsSupplier(allConnectionsSupplier)
.allActiveConnectionsSupplier(allActiveConnectionsSupplier)
.peersLowerBound(peersLowerBound)
.peerTable(peerTable)
.build();
}

@ -20,6 +20,7 @@ import static com.google.common.base.Preconditions.checkState;
import org.hyperledger.besu.cryptoservices.NodeKey;
import org.hyperledger.besu.ethereum.p2p.config.RlpxConfiguration;
import org.hyperledger.besu.ethereum.p2p.discovery.DiscoveryPeer;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerTable;
import org.hyperledger.besu.ethereum.p2p.peers.LocalNode;
import org.hyperledger.besu.ethereum.p2p.peers.Peer;
import org.hyperledger.besu.ethereum.p2p.peers.PeerPrivileges;
@ -162,13 +163,6 @@ public class RlpxAgent {
}
}
public void connect(final Stream<? extends Peer> peerStream) {
if (!localNode.isReady()) {
return;
}
peerStream.forEach(this::connect);
}
public void disconnect(final Bytes peerId, final DisconnectReason reason) {
try {
allActiveConnectionsSupplier
@ -206,6 +200,7 @@ public class RlpxAgent {
+ this.getClass().getSimpleName()
+ " has finished starting"));
}
// Check peer is valid
final EnodeURL enode = peer.getEnodeURL();
if (!enode.isListening()) {
@ -380,6 +375,7 @@ public class RlpxAgent {
private Supplier<Stream<PeerConnection>> allConnectionsSupplier;
private Supplier<Stream<PeerConnection>> allActiveConnectionsSupplier;
private int peersLowerBound;
private PeerTable peerTable;
private Builder() {}
@ -399,12 +395,13 @@ public class RlpxAgent {
localNode,
connectionEvents,
metricsSystem,
p2pTLSConfiguration.get());
p2pTLSConfiguration.get(),
peerTable);
} else {
LOG.debug("Using default NettyConnectionInitializer");
connectionInitializer =
new NettyConnectionInitializer(
nodeKey, config, localNode, connectionEvents, metricsSystem);
nodeKey, config, localNode, connectionEvents, metricsSystem, peerTable);
}
}
@ -499,5 +496,10 @@ public class RlpxAgent {
this.peersLowerBound = peersLowerBound;
return this;
}
public Builder peerTable(final PeerTable peerTable) {
this.peerTable = peerTable;
return this;
}
}
}

@ -14,6 +14,7 @@
*/
package org.hyperledger.besu.ethereum.p2p.rlpx.connections.netty;
import org.hyperledger.besu.ethereum.p2p.discovery.internal.PeerTable;
import org.hyperledger.besu.ethereum.p2p.peers.LocalNode;
import org.hyperledger.besu.ethereum.p2p.peers.Peer;
import org.hyperledger.besu.ethereum.p2p.rlpx.connections.PeerConnection;
@ -60,6 +61,7 @@ abstract class AbstractHandshakeHandler extends SimpleChannelInboundHandler<Byte
private final FramerProvider framerProvider;
private final boolean inboundInitiated;
private final PeerTable peerTable;
AbstractHandshakeHandler(
final List<SubProtocol> subProtocols,
@ -70,7 +72,8 @@ abstract class AbstractHandshakeHandler extends SimpleChannelInboundHandler<Byte
final MetricsSystem metricsSystem,
final HandshakerProvider handshakerProvider,
final FramerProvider framerProvider,
final boolean inboundInitiated) {
final boolean inboundInitiated,
final PeerTable peerTable) {
this.subProtocols = subProtocols;
this.localNode = localNode;
this.expectedPeer = expectedPeer;
@ -80,6 +83,7 @@ abstract class AbstractHandshakeHandler extends SimpleChannelInboundHandler<Byte
this.handshaker = handshakerProvider.buildInstance();
this.framerProvider = framerProvider;
this.inboundInitiated = inboundInitiated;
this.peerTable = peerTable;
}
/**
@ -97,47 +101,48 @@ abstract class AbstractHandshakeHandler extends SimpleChannelInboundHandler<Byte
ctx.writeAndFlush(nextMsg.get());
} else if (handshaker.getStatus() != Handshaker.HandshakeStatus.SUCCESS) {
LOG.debug("waiting for more bytes");
return;
}
} else {
final Bytes nodeId = handshaker.partyPubKey().getEncodedBytes();
if (!localNode.isReady()) {
// If we're handling a connection before the node is fully up, just disconnect
LOG.debug("Rejecting connection because local node is not ready {}", nodeId);
disconnect(ctx, DisconnectMessage.DisconnectReason.UNKNOWN);
return;
}
final Bytes nodeId = handshaker.partyPubKey().getEncodedBytes();
if (!localNode.isReady()) {
// If we're handling a connection before the node is fully up, just disconnect
LOG.debug("Rejecting connection because local node is not ready {}", nodeId);
disconnect(ctx, DisconnectMessage.DisconnectReason.UNKNOWN);
return;
LOG.trace("Sending framed hello");
// Exchange keys done
final Framer framer = this.framerProvider.buildFramer(handshaker.secrets());
final ByteToMessageDecoder deFramer =
new DeFramer(
framer,
subProtocols,
localNode,
expectedPeer,
connectionEventDispatcher,
connectionFuture,
metricsSystem,
inboundInitiated,
peerTable);
ctx.channel()
.pipeline()
.replace(this, "DeFramer", deFramer)
.addBefore("DeFramer", "validate", new ValidateFirstOutboundMessage(framer));
ctx.writeAndFlush(new OutboundMessage(null, HelloMessage.create(localNode.getPeerInfo())))
.addListener(
ff -> {
if (ff.isSuccess()) {
LOG.trace("Successfully wrote hello message");
}
});
msg.retain();
ctx.fireChannelRead(msg);
}
LOG.trace("Sending framed hello");
// Exchange keys done
final Framer framer = this.framerProvider.buildFramer(handshaker.secrets());
final ByteToMessageDecoder deFramer =
new DeFramer(
framer,
subProtocols,
localNode,
expectedPeer,
connectionEventDispatcher,
connectionFuture,
metricsSystem,
inboundInitiated);
ctx.channel()
.pipeline()
.replace(this, "DeFramer", deFramer)
.addBefore("DeFramer", "validate", new ValidateFirstOutboundMessage(framer));
ctx.writeAndFlush(new OutboundMessage(null, HelloMessage.create(localNode.getPeerInfo())))
.addListener(
ff -> {
if (ff.isSuccess()) {
LOG.trace("Successfully wrote hello message");
}
});
msg.retain();
ctx.fireChannelRead(msg);
}
private void disconnect(

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save