Update SLF4J version (#4587)

* Bump SLF4J version and replace helper for lambdas

Signed-off-by: Diego López León <dieguitoll@gmail.com>

* Remove unnecessary toString calls

Signed-off-by: Diego López León <dieguitoll@gmail.com>

* Replace unnecessaryy lambdas for constant references

Signed-off-by: Diego López León <dieguitoll@gmail.com>

---------

Signed-off-by: Diego López León <dieguitoll@gmail.com>
pull/5145/head
Diego López León 2 years ago committed by GitHub
parent b850aa7b05
commit 66c757dfa3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 31
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/PostMergeContext.java
  2. 37
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/TransitionProtocolSchedule.java
  3. 12
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/TransitionUtils.java
  4. 142
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/blockcreation/MergeCoordinator.java
  5. 15
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/TraceCall.java
  6. 13
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/TraceCallMany.java
  7. 38
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/AbstractEngineForkchoiceUpdated.java
  8. 22
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/AbstractEngineGetPayload.java
  9. 30
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/AbstractEngineNewPayload.java
  10. 7
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineExchangeCapabilities.java
  11. 22
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineExchangeTransitionConfiguration.java
  12. 8
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadBodiesByHashV1.java
  13. 14
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineGetPayloadBodiesByRangeV1.java
  14. 62
      ethereum/blockcreation/src/main/java/org/hyperledger/besu/ethereum/blockcreation/BlockTransactionSelector.java
  15. 17
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/AbstractTrieLogManager.java
  16. 6
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiPersistedWorldState.java
  17. 7
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiSnapshotWorldStateKeyValueStorage.java
  18. 12
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/LayeredTrieLogManager.java
  19. 12
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/SnapshotTrieLogManager.java
  20. 12
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/mainnet/feemarket/CancunFeeMarket.java
  21. 15
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/EthPeers.java
  22. 11
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/task/AbstractGetHeadersFromPeerTask.java
  23. 48
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/task/AbstractRetryingSwitchingPeerTask.java
  24. 26
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/task/BufferedGetPooledTransactionsFromPeerFetcher.java
  25. 36
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/task/RetryingGetBlockFromPeersTask.java
  26. 26
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/manager/task/RetryingGetBlocksFromPeersTask.java
  27. 143
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/BlockPropagationManager.java
  28. 17
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/ChainHeadTracker.java
  29. 3
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/DefaultSynchronizer.java
  30. 13
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/PipelineChainDownloader.java
  31. 32
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/BackwardChain.java
  32. 29
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/BackwardSyncContext.java
  33. 12
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/BackwardSyncStep.java
  34. 19
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/BackwardsSyncAlgorithm.java
  35. 42
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/ForwardSyncStep.java
  36. 16
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/ProcessKnownAncestorsStep.java
  37. 5
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/backwardsync/SyncStepStep.java
  38. 7
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastImportBlocksStep.java
  39. 9
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/FastSyncActions.java
  40. 10
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/fastsync/PivotSelectorFromSafeBlock.java
  41. 58
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/snapsync/DynamicPivotBlockManager.java
  42. 6
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/sync/tasks/PersistBlockTask.java
  43. 14
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/NewPooledTransactionHashesMessageProcessor.java
  44. 14
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/NewPooledTransactionHashesMessageSender.java
  45. 20
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/TransactionBroadcaster.java
  46. 58
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/TransactionPool.java
  47. 20
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/TransactionsMessageProcessor.java
  48. 18
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/TransactionsMessageSender.java
  49. 83
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/sorter/AbstractPendingTransactionsSorter.java
  50. 48
      ethereum/eth/src/main/java/org/hyperledger/besu/ethereum/eth/transactions/sorter/BaseFeePendingTransactionsSorter.java
  51. 40
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/discovery/VertxPeerDiscoveryAgent.java
  52. 7
      ethereum/p2p/src/main/java/org/hyperledger/besu/ethereum/p2p/rlpx/RlpxAgent.java
  53. 14
      ethereum/permissioning/src/main/java/org/hyperledger/besu/ethereum/permissioning/AllowlistPersistor.java
  54. 4
      ethereum/stratum/src/main/java/org/hyperledger/besu/ethereum/stratum/GetWorkProtocol.java
  55. 120
      gradle/verification-metadata.xml
  56. 10
      gradle/versions.gradle
  57. 2
      util/build.gradle
  58. 85
      util/src/main/java/org/hyperledger/besu/util/Slf4jLambdaHelper.java
  59. 94
      util/src/test/java/org/hyperledger/besu/util/Slf4jLambdaHelperTest.java

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.consensus.merge;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.ConsensusContext;
@ -239,12 +237,12 @@ public class PostMergeContext implements MergeContext {
maybeCurrBestBlock.ifPresentOrElse(
currBestBlock -> {
if (compareByGasUsedDesc.compare(newBlockWithReceipts, currBestBlock) < 0) {
debugLambda(
LOG,
"New proposal for payloadId {} {} is better than the previous one {}",
payloadId::toString,
() -> logBlockProposal(newBlockWithReceipts.getBlock()),
() -> logBlockProposal(currBestBlock.getBlock()));
LOG.atDebug()
.setMessage("New proposal for payloadId {} {} is better than the previous one {}")
.addArgument(payloadId)
.addArgument(() -> logBlockProposal(newBlockWithReceipts.getBlock()))
.addArgument(() -> logBlockProposal(currBestBlock.getBlock()))
.log();
blocksInProgress.removeAll(
retrieveTuplesById(payloadId).collect(Collectors.toUnmodifiableList()));
blocksInProgress.add(new PayloadTuple(payloadId, newBlockWithReceipts));
@ -252,14 +250,15 @@ public class PostMergeContext implements MergeContext {
},
() -> blocksInProgress.add(new PayloadTuple(payloadId, newBlockWithReceipts)));
debugLambda(
LOG,
"Current best proposal for payloadId {} {}",
payloadId::toString,
() ->
retrieveBlockById(payloadId)
.map(bb -> logBlockProposal(bb.getBlock()))
.orElse("N/A"));
LOG.atDebug()
.setMessage("Current best proposal for payloadId {} {}")
.addArgument(payloadId)
.addArgument(
() ->
retrieveBlockById(payloadId)
.map(bb -> logBlockProposal(bb.getBlock()))
.orElse("N/A"))
.log();
}
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.consensus.merge;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.core.Difficulty;
@ -124,10 +122,10 @@ public class TransitionProtocolSchedule implements ProtocolSchedule {
// if head is not post-merge, return pre-merge schedule:
if (!mergeContext.isPostMerge()) {
debugLambda(
LOG,
"for {} returning a pre-merge schedule because we are not post-merge",
blockHeader::toLogString);
LOG.atDebug()
.setMessage("for {} returning a pre-merge schedule because we are not post-merge")
.addArgument(blockHeader::toLogString)
.log();
return getPreMergeSchedule().getByBlockNumber(blockHeader.getNumber());
}
@ -139,26 +137,29 @@ public class TransitionProtocolSchedule implements ProtocolSchedule {
.orElse(Difficulty.ZERO);
Difficulty thisDifficulty = parentDifficulty.add(blockHeader.getDifficulty());
Difficulty terminalDifficulty = mergeContext.getTerminalTotalDifficulty();
debugLambda(
LOG,
" block {} ttd is: {}, parent total diff is: {}, this total diff is: {}",
blockHeader::toLogString,
() -> terminalDifficulty,
() -> parentDifficulty,
() -> thisDifficulty);
LOG.atDebug()
.setMessage(" block {} ttd is: {}, parent total diff is: {}, this total diff is: {}")
.addArgument(blockHeader::toLogString)
.addArgument(terminalDifficulty)
.addArgument(parentDifficulty)
.addArgument(thisDifficulty)
.log();
// if this block is pre-merge or a TTD block
if (thisDifficulty.lessThan(terminalDifficulty)
|| TransitionUtils.isTerminalProofOfWorkBlock(blockHeader, protocolContext)) {
debugLambda(
LOG,
"returning a pre-merge schedule because block {} is pre-merge or TTD",
blockHeader::toLogString);
LOG.atDebug()
.setMessage("returning a pre-merge schedule because block {} is pre-merge or TTD")
.addArgument(blockHeader::toLogString)
.log();
return getPreMergeSchedule().getByBlockNumber(blockHeader.getNumber());
}
}
// else return post-merge schedule
debugLambda(LOG, " for {} returning a post-merge schedule", blockHeader::toLogString);
LOG.atDebug()
.setMessage(" for {} returning a post-merge schedule")
.addArgument(blockHeader::toLogString)
.log();
return getPostMergeSchedule().getByBlockNumber(blockHeader.getNumber());
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.consensus.merge;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.warnLambda;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.core.Difficulty;
import org.hyperledger.besu.ethereum.core.ProcessableBlockHeader;
@ -124,11 +122,11 @@ public class TransitionUtils<SwitchingObject> {
}
if (currentChainTotalDifficulty.isZero()) {
warnLambda(
LOG,
"unable to get total difficulty for {}, parent hash {} difficulty not found",
header::toLogString,
header::getParentHash);
LOG.atWarn()
.setMessage("unable to get total difficulty for {}, parent hash {} difficulty not found")
.addArgument(header::toLogString)
.addArgument(header::getParentHash)
.log();
}
Difficulty configuredTotalTerminalDifficulty = consensusContext.getTerminalTotalDifficulty();

@ -16,7 +16,6 @@ package org.hyperledger.besu.consensus.merge.blockcreation;
import static org.hyperledger.besu.consensus.merge.TransitionUtils.isTerminalProofOfWorkBlock;
import static org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator.ForkchoiceResult.Status.INVALID;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.consensus.merge.MergeContext;
import org.hyperledger.besu.datatypes.Address;
@ -274,11 +273,11 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
if (result.isSuccessful()) {
mergeContext.putPayloadById(
payloadIdentifier, new BlockWithReceipts(emptyBlock, result.getReceipts()));
debugLambda(
LOG,
"Built empty block proposal {} for payload {}",
emptyBlock::toLogString,
payloadIdentifier::toString);
LOG.atDebug()
.setMessage("Built empty block proposal {} for payload {}")
.addArgument(emptyBlock::toLogString)
.addArgument(payloadIdentifier)
.log();
} else {
LOG.warn(
"failed to validate empty block proposal {}, reason {}",
@ -326,11 +325,11 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
.whenComplete(
(unused, throwable) -> {
if (throwable != null) {
debugLambda(
LOG,
"Exception building block for payload id {}, reason {}",
payloadIdentifier::toString,
() -> logException(throwable));
LOG.atDebug()
.setMessage("Exception building block for payload id {}, reason {}")
.addArgument(payloadIdentifier)
.addArgument(() -> logException(throwable))
.log();
}
blockCreationTask.computeIfPresent(
payloadIdentifier,
@ -358,11 +357,11 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
Thread.sleep(waitBeforeRepetition);
}
} catch (final CancellationException | InterruptedException ce) {
debugLambda(
LOG,
"Block creation for payload id {} has been cancelled, reason {}",
payloadIdentifier::toString,
() -> logException(ce));
LOG.atDebug()
.setMessage("Block creation for payload id {} has been cancelled, reason {}")
.addArgument(payloadIdentifier)
.addArgument(() -> logException(ce))
.log();
return null;
} catch (final Throwable e) {
LOG.warn(
@ -384,11 +383,11 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
evaluateNewBlock(blockCreator.get().getBlock(), payloadIdentifier, startedAt);
} catch (final Throwable throwable) {
if (canRetryBlockCreation(throwable) && !isBlockCreationCancelled(payloadIdentifier)) {
debugLambda(
LOG,
"Retrying block creation for payload id {} after recoverable error {}",
payloadIdentifier::toString,
() -> logException(throwable));
LOG.atDebug()
.setMessage("Retrying block creation for payload id {} after recoverable error {}")
.addArgument(payloadIdentifier)
.addArgument(() -> logException(throwable))
.log();
recoverableBlockCreation(payloadIdentifier, blockCreator, startedAt);
} else {
throw throwable;
@ -408,13 +407,14 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
mergeContext.putPayloadById(
payloadIdentifier, new BlockWithReceipts(bestBlock, resultBest.getReceipts()));
debugLambda(
LOG,
"Successfully built block {} for proposal identified by {}, with {} transactions, in {}ms",
bestBlock::toLogString,
payloadIdentifier::toString,
bestBlock.getBody().getTransactions()::size,
() -> System.currentTimeMillis() - startedAt);
LOG.atDebug()
.setMessage(
"Successfully built block {} for proposal identified by {}, with {} transactions, in {}ms")
.addArgument(bestBlock::toLogString)
.addArgument(payloadIdentifier)
.addArgument(bestBlock.getBody().getTransactions()::size)
.addArgument(() -> System.currentTimeMillis() - startedAt)
.log();
} else {
LOG.warn(
"Block {} built for proposal identified by {}, is not valid reason {}",
@ -442,9 +442,15 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
final var maybeHeadHeader = chain.getBlockHeader(headHash);
if (maybeHeadHeader.isPresent()) {
debugLambda(LOG, "BlockHeader {} is already present", maybeHeadHeader.get()::toLogString);
LOG.atDebug()
.setMessage("BlockHeader {} is already present")
.addArgument(maybeHeadHeader.get()::toLogString)
.log();
} else {
debugLambda(LOG, "Appending new head block hash {} to backward sync", headHash::toHexString);
LOG.atDebug()
.setMessage("Appending new head block hash {} to backward sync")
.addArgument(headHash::toHexString)
.log();
backwardSyncContext.updateHead(headHash);
backwardSyncContext
.syncBackwardsUntil(headHash)
@ -459,7 +465,10 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
.map(BlockHeader::getHash)
.map(finalizedHash::equals)
.orElse(Boolean.FALSE)) {
LOG.debug("Finalized block already set to {}, nothing to do", finalizedHash);
LOG.atDebug()
.setMessage("Finalized block already set to {}, nothing to do")
.addArgument(finalizedHash)
.log();
return;
}
@ -468,8 +477,10 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
.getBlockHeader(finalizedHash)
.ifPresentOrElse(
finalizedHeader -> {
debugLambda(
LOG, "Setting finalized block header to {}", finalizedHeader::toLogString);
LOG.atDebug()
.setMessage("Setting finalized block header to {}")
.addArgument(finalizedHeader::toLogString)
.log();
mergeContext.setFinalized(finalizedHeader);
},
() ->
@ -512,7 +523,7 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
@Override
public BlockProcessingResult rememberBlock(final Block block) {
debugLambda(LOG, "Remember block {}", block::toLogString);
LOG.atDebug().setMessage("Remember block {}").addArgument(block::toLogString).log();
final var chain = protocolContext.getBlockchain();
final var validationResult = validateBlock(block);
validationResult
@ -531,7 +542,10 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
if (newHead.getNumber() < blockchain.getChainHeadBlockNumber()
&& isDescendantOf(newHead, blockchain.getChainHeadHeader())) {
debugLambda(LOG, "Ignoring update to old head {}", newHead::toLogString);
LOG.atDebug()
.setMessage("Ignoring update to old head {}")
.addArgument(newHead::toLogString)
.log();
return ForkchoiceResult.withIgnoreUpdateToOldHead(newHead);
}
@ -567,29 +581,36 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
private boolean setNewHead(final MutableBlockchain blockchain, final BlockHeader newHead) {
if (newHead.getHash().equals(blockchain.getChainHeadHash())) {
debugLambda(LOG, "Nothing to do new head {} is already chain head", newHead::toLogString);
LOG.atDebug()
.setMessage("Nothing to do new head {} is already chain head")
.addArgument(newHead::toLogString)
.log();
return true;
}
if (newHead.getParentHash().equals(blockchain.getChainHeadHash())) {
debugLambda(
LOG,
"Forwarding chain head to the block {} saved from a previous newPayload invocation",
newHead::toLogString);
LOG.atDebug()
.setMessage(
"Forwarding chain head to the block {} saved from a previous newPayload invocation")
.addArgument(newHead::toLogString)
.log();
if (forwardWorldStateTo(newHead)) {
// move chain head forward:
return blockchain.forwardToBlock(newHead);
} else {
debugLambda(
LOG,
"Failed to move the worldstate forward to hash {}, not moving chain head",
newHead::toLogString);
LOG.atDebug()
.setMessage("Failed to move the worldstate forward to hash {}, not moving chain head")
.addArgument(newHead::toLogString)
.log();
return false;
}
}
debugLambda(LOG, "New head {} is a chain reorg, rewind chain head to it", newHead::toLogString);
LOG.atDebug()
.setMessage("New head {} is a chain reorg, rewind chain head to it")
.addArgument(newHead::toLogString)
.log();
return blockchain.rewindToBlock(newHead.getHash());
}
@ -601,11 +622,12 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
newWorldState.ifPresentOrElse(
mutableWorldState ->
debugLambda(
LOG,
"World state for state root hash {} and block hash {} persisted successfully",
mutableWorldState::rootHash,
newHead::getHash),
LOG.atDebug()
.setMessage(
"World state for state root hash {} and block hash {} persisted successfully")
.addArgument(mutableWorldState::rootHash)
.addArgument(newHead::getHash)
.log(),
() ->
LOG.error(
"Could not persist world for root hash {} and block hash {}",
@ -769,11 +791,11 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
@Override
public boolean isDescendantOf(final BlockHeader ancestorBlock, final BlockHeader newBlock) {
debugLambda(
LOG,
"checking if block {} is ancestor of {}",
ancestorBlock::toLogString,
newBlock::toLogString);
LOG.atDebug()
.setMessage("checking if block {} is ancestor of {}")
.addArgument(ancestorBlock::toLogString)
.addArgument(newBlock::toLogString)
.log();
// start with self, because descending from yourself is valid
Optional<BlockHeader> parentOf = Optional.of(newBlock);
@ -789,11 +811,11 @@ public class MergeCoordinator implements MergeMiningCoordinator, BadChainListene
&& ancestorBlock.getBlockHash().equals(parentOf.get().getBlockHash())) {
return true;
} else {
debugLambda(
LOG,
"looped all the way back, did not find ancestor {} of child {}",
ancestorBlock::toLogString,
newBlock::toLogString);
LOG.atDebug()
.setMessage("looped all the way back, did not find ancestor {} of child {}")
.addArgument(ancestorBlock::toLogString)
.addArgument(newBlock::toLogString)
.log();
return false;
}
}

@ -16,7 +16,6 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.BLOCK_NOT_FOUND;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.INTERNAL_ERROR;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
@ -61,13 +60,13 @@ public class TraceCall extends AbstractTraceByBlock implements JsonRpcMethod {
final TraceTypeParameter traceTypeParameter =
requestContext.getRequiredParameter(1, TraceTypeParameter.class);
final String blockNumberString = String.valueOf(blockNumber);
traceLambda(
LOG,
"Received RPC rpcName={} callParams={} block={} traceTypes={}",
this::getName,
callParams::toString,
blockNumberString::toString,
traceTypeParameter::toString);
LOG.atTrace()
.setMessage("Received RPC rpcName={} callParams={} block={} traceTypes={}")
.addArgument(this::getName)
.addArgument(callParams)
.addArgument(blockNumberString)
.addArgument(traceTypeParameter)
.log();
final Optional<BlockHeader> maybeBlockHeader =
blockchainQueriesSupplier.get().getBlockHeaderByNumber(blockNumber);

@ -16,7 +16,6 @@ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.BLOCK_NOT_FOUND;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.INTERNAL_ERROR;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
@ -88,12 +87,12 @@ public class TraceCallMany extends TraceCall implements JsonRpcMethod {
transactionsAndTraceTypeParameters =
requestContext.getRequiredParameter(0, TraceCallManyParameter[].class);
final String blockNumberString = String.valueOf(blockNumber);
traceLambda(
LOG,
"Received RPC rpcName={} trace_callManyParams={} block={}",
this::getName,
transactionsAndTraceTypeParameters::toString,
blockNumberString::toString);
LOG.atTrace()
.setMessage("Received RPC rpcName={} trace_callManyParams={} block={}")
.addArgument(this::getName)
.addArgument(transactionsAndTraceTypeParameters)
.addArgument(blockNumberString)
.log();
} catch (final Exception e) {
LOG.error("Error parsing trace_callMany parameters: {}", e.getLocalizedMessage());
return new JsonRpcErrorResponse(

@ -19,8 +19,6 @@ import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.Executi
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.ExecutionEngineJsonRpcMethod.EngineStatus.SYNCING;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.ExecutionEngineJsonRpcMethod.EngineStatus.VALID;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine.WithdrawalsValidatorProvider.getWithdrawalsValidator;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.warnLambda;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator.ForkchoiceResult;
@ -151,11 +149,14 @@ public abstract class AbstractEngineForkchoiceUpdated extends ExecutionEngineJso
if (maybePayloadAttributes.isPresent()
&& !isPayloadAttributesValid(maybePayloadAttributes.get(), withdrawals, newHead)) {
warnLambda(
LOG,
"Invalid payload attributes: {}",
() ->
maybePayloadAttributes.map(EnginePayloadAttributesParameter::serialize).orElse(null));
LOG.atWarn()
.setMessage("Invalid payload attributes: {}")
.addArgument(
() ->
maybePayloadAttributes
.map(EnginePayloadAttributesParameter::serialize)
.orElse(null))
.log();
return new JsonRpcErrorResponse(requestId, getInvalidPayloadError());
}
@ -181,11 +182,12 @@ public abstract class AbstractEngineForkchoiceUpdated extends ExecutionEngineJso
payloadId.ifPresent(
pid ->
debugLambda(
LOG,
"returning identifier {} for requested payload {}",
pid::toHexString,
() -> maybePayloadAttributes.map(EnginePayloadAttributesParameter::serialize)));
LOG.atDebug()
.setMessage("returning identifier {} for requested payload {}")
.addArgument(pid::toHexString)
.addArgument(
() -> maybePayloadAttributes.map(EnginePayloadAttributesParameter::serialize))
.log());
logForkchoiceUpdatedCall(VALID, forkChoice);
return new JsonRpcSuccessResponse(
@ -241,12 +243,12 @@ public abstract class AbstractEngineForkchoiceUpdated extends ExecutionEngineJso
}
private void logPayload(final EnginePayloadAttributesParameter payloadAttributes) {
debugLambda(
LOG,
"timestamp: {}, prevRandao: {}, suggestedFeeRecipient: {}",
payloadAttributes::getTimestamp,
() -> payloadAttributes.getPrevRandao().toHexString(),
() -> payloadAttributes.getSuggestedFeeRecipient().toHexString());
LOG.atDebug()
.setMessage("timestamp: {}, prevRandao: {}, suggestedFeeRecipient: {}")
.addArgument(payloadAttributes::getTimestamp)
.addArgument(() -> payloadAttributes.getPrevRandao().toHexString())
.addArgument(() -> payloadAttributes.getSuggestedFeeRecipient().toHexString())
.log();
}
private boolean isValidForkchoiceState(

@ -14,9 +14,6 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.infoLambda;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.consensus.merge.blockcreation.PayloadIdentifier;
import org.hyperledger.besu.ethereum.ProtocolContext;
@ -62,15 +59,16 @@ public abstract class AbstractEngineGetPayload extends ExecutionEngineJsonRpcMet
if (blockWithReceipts.isPresent()) {
final var proposal = blockWithReceipts.get();
final var proposalHeader = proposal.getHeader();
infoLambda(
LOG,
"Fetch block proposal by identifier: {}, hash: {}, number: {}, coinbase: {}, transaction count: {}",
payloadId::toHexString,
proposalHeader::getHash,
proposalHeader::getNumber,
proposalHeader::getCoinbase,
() -> proposal.getBlock().getBody().getTransactions().size());
debugLambda(LOG, "assembledBlock {}", () -> proposal);
LOG.atInfo()
.setMessage(
"Fetch block proposal by identifier: {}, hash: {}, number: {}, coinbase: {}, transaction count: {}")
.addArgument(payloadId::toHexString)
.addArgument(proposalHeader::getHash)
.addArgument(proposalHeader::getNumber)
.addArgument(proposalHeader::getCoinbase)
.addArgument(() -> proposal.getBlock().getBody().getTransactions().size())
.log();
LOG.atDebug().setMessage("assembledBlock {}").addArgument(() -> proposal).log();
return createResponse(request, proposal);
}
return new JsonRpcErrorResponse(request.getRequest().getId(), JsonRpcError.UNKNOWN_PAYLOAD);

@ -22,8 +22,6 @@ import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.Executi
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.ExecutionEngineJsonRpcMethod.EngineStatus.VALID;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine.WithdrawalsValidatorProvider.getWithdrawalsValidator;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.INVALID_PARAMS;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.consensus.merge.blockcreation.MergeMiningCoordinator;
import org.hyperledger.besu.datatypes.Hash;
@ -98,7 +96,10 @@ public abstract class AbstractEngineNewPayload extends ExecutionEngineJsonRpcMet
Object reqId = requestContext.getRequest().getId();
traceLambda(LOG, "blockparam: {}", () -> Json.encodePrettily(blockParam));
LOG.atTrace()
.setMessage("blockparam: {}")
.addArgument(() -> Json.encodePrettily(blockParam))
.log();
final Optional<List<Withdrawal>> maybeWithdrawals =
Optional.ofNullable(blockParam.getWithdrawals())
@ -203,8 +204,10 @@ public abstract class AbstractEngineNewPayload extends ExecutionEngineJsonRpcMet
newBlockHeader, new BlockBody(transactions, Collections.emptyList(), maybeWithdrawals));
if (parentHeader.isEmpty()) {
debugLambda(
LOG, "Parent of block {} is not present, append it to backward sync", block::toLogString);
LOG.atDebug()
.setMessage("Parent of block {} is not present, append it to backward sync")
.addArgument(block::toLogString)
.log();
mergeCoordinator.appendNewPayloadToSync(block);
return respondWith(reqId, blockParam, null, SYNCING);
@ -266,14 +269,15 @@ public abstract class AbstractEngineNewPayload extends ExecutionEngineJsonRpcMet
throw new IllegalArgumentException(
"Don't call respondWith() with invalid status of " + status.toString());
}
debugLambda(
LOG,
"New payload: number: {}, hash: {}, parentHash: {}, latestValidHash: {}, status: {}",
() -> param.getBlockNumber(),
() -> param.getBlockHash(),
() -> param.getParentHash(),
() -> latestValidHash == null ? null : latestValidHash.toHexString(),
status::name);
LOG.atDebug()
.setMessage(
"New payload: number: {}, hash: {}, parentHash: {}, latestValidHash: {}, status: {}")
.addArgument(param::getBlockNumber)
.addArgument(param::getBlockHash)
.addArgument(param::getParentHash)
.addArgument(() -> latestValidHash == null ? null : latestValidHash.toHexString())
.addArgument(status::name)
.log();
return new JsonRpcSuccessResponse(
requestId, new EnginePayloadStatusResult(status, latestValidHash, Optional.empty()));
}

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod.ENGINE_EXCHANGE_CAPABILITIES;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
@ -51,10 +50,12 @@ public class EngineExchangeCapabilities extends ExecutionEngineJsonRpcMethod {
public JsonRpcResponse syncResponse(final JsonRpcRequestContext requestContext) {
engineCallListener.executionEngineCalled();
final String[] remoteCapabilities = requestContext.getRequiredParameter(0, String[].class);
final Object reqId = requestContext.getRequest().getId();
traceLambda(LOG, "received remote capabilities: {}", () -> remoteCapabilities);
LOG.atTrace()
.setMessage("received remote capabilities: {}")
.addArgument(() -> requestContext.getRequiredParameter(0, String[].class))
.log();
final List<String> localCapabilities =
Stream.of(RpcMethod.values())

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod.ENGINE_EXCHANGE_TRANSITION_CONFIGURATION;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.consensus.merge.MergeContext;
import org.hyperledger.besu.datatypes.Hash;
@ -71,16 +70,17 @@ public class EngineExchangeTransitionConfiguration extends ExecutionEngineJsonRp
0, EngineExchangeTransitionConfigurationParameter.class);
final Object reqId = requestContext.getRequest().getId();
traceLambda(
LOG,
"received transitionConfiguration: {}",
() -> {
try {
return mapperSupplier.get().writeValueAsString(remoteTransitionConfiguration);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
});
LOG.atTrace()
.setMessage("received transitionConfiguration: {}")
.addArgument(
() -> {
try {
return mapperSupplier.get().writeValueAsString(remoteTransitionConfiguration);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
})
.log();
final Optional<BlockHeader> maybeTerminalPoWBlockHeader =
mergeContextOptional.flatMap(MergeContext::getTerminalPoWBlock);

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
@ -64,7 +62,11 @@ public class EngineGetPayloadBodiesByHashV1 extends ExecutionEngineJsonRpcMethod
final Hash[] blockHashes = request.getRequiredParameter(0, Hash[].class);
traceLambda(LOG, "{} parameters: blockHashes {}", () -> getName(), () -> blockHashes);
LOG.atTrace()
.setMessage("{} parameters: blockHashes {}")
.addArgument(this::getName)
.addArgument(blockHashes)
.log();
if (blockHashes.length > getMaxRequestBlocks()) {
return new JsonRpcErrorResponse(reqId, JsonRpcError.INVALID_RANGE_REQUEST_TOO_LARGE);

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.engine;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
@ -64,12 +62,12 @@ public class EngineGetPayloadBodiesByRangeV1 extends ExecutionEngineJsonRpcMetho
final long count = request.getRequiredParameter(1, UnsignedLongParameter.class).getValue();
final Object reqId = request.getRequest().getId();
traceLambda(
LOG,
"{} parameters: start block number {} count {}",
() -> getName(),
() -> startBlockNumber,
() -> count);
LOG.atTrace()
.setMessage("{} parameters: start block number {} count {}")
.addArgument(this::getName)
.addArgument(startBlockNumber)
.addArgument(count)
.log();
if (startBlockNumber < 1 || count < 1) {
return new JsonRpcErrorResponse(reqId, JsonRpcError.INVALID_PARAMS);

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.blockcreation;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.GasLimitCalculator;
@ -136,13 +134,14 @@ public class BlockTransactionSelector {
receipts.add(receipt);
cumulativeGasUsed += gasUsed;
cumulativeDataGasUsed += dataGasUsed;
traceLambda(
LOG,
"New selected transaction {}, total transactions {}, cumulative gas used {}, cumulative data gas used {}",
transaction::toTraceLog,
transactions::size,
() -> cumulativeGasUsed,
() -> cumulativeDataGasUsed);
LOG.atTrace()
.setMessage(
"New selected transaction {}, total transactions {}, cumulative gas used {}, cumulative data gas used {}")
.addArgument(transaction::toTraceLog)
.addArgument(transactions::size)
.addArgument(cumulativeGasUsed)
.addArgument(cumulativeDataGasUsed)
.log();
}
private void updateWithInvalidTransaction(
@ -266,12 +265,16 @@ public class BlockTransactionSelector {
*/
public TransactionSelectionResults buildTransactionListForBlock() {
LOG.debug("Transaction pool size {}", pendingTransactions.size());
traceLambda(
LOG, "Transaction pool content {}", () -> pendingTransactions.toTraceLog(false, false));
LOG.atTrace()
.setMessage("Transaction pool content {}")
.addArgument(() -> pendingTransactions.toTraceLog(false, false))
.log();
pendingTransactions.selectTransactions(
pendingTransaction -> evaluateTransaction(pendingTransaction, false));
traceLambda(
LOG, "Transaction selection result result {}", transactionSelectionResult::toTraceLog);
LOG.atTrace()
.setMessage("Transaction selection result result {}")
.addArgument(transactionSelectionResult::toTraceLog)
.log();
return transactionSelectionResult;
}
@ -302,10 +305,12 @@ public class BlockTransactionSelector {
}
if (transactionTooLargeForBlock(transaction)) {
traceLambda(
LOG, "Transaction {} too large to select for block creation", transaction::toTraceLog);
LOG.atTrace()
.setMessage("Transaction {} too large to select for block creation")
.addArgument(transaction::toTraceLog)
.log();
if (blockOccupancyAboveThreshold()) {
traceLambda(LOG, "Block occupancy above threshold, completing operation");
LOG.trace("Block occupancy above threshold, completing operation");
return TransactionSelectionResult.COMPLETE_OPERATION;
} else {
return TransactionSelectionResult.CONTINUE;
@ -359,7 +364,10 @@ public class BlockTransactionSelector {
if (!effectiveResult.isInvalid()) {
worldStateUpdater.commit();
traceLambda(LOG, "Selected {} for block creation", transaction::toTraceLog);
LOG.atTrace()
.setMessage("Selected {} for block creation")
.addArgument(transaction::toTraceLog)
.log();
updateTransactionResultTracking(transaction, effectiveResult);
} else {
final boolean isIncorrectNonce = isIncorrectNonce(effectiveResult.getValidationResult());
@ -416,19 +424,19 @@ public class BlockTransactionSelector {
final TransactionInvalidReason invalidReason = invalidReasonValidationResult.getInvalidReason();
// If the invalid reason is transient, then leave the transaction in the pool and continue
if (isTransientValidationError(invalidReason)) {
traceLambda(
LOG,
"Transient validation error {} for transaction {} keeping it in the pool",
invalidReason::toString,
transaction::toTraceLog);
LOG.atTrace()
.setMessage("Transient validation error {} for transaction {} keeping it in the pool")
.addArgument(invalidReason)
.addArgument(transaction::toTraceLog)
.log();
return TransactionSelectionResult.CONTINUE;
}
// If the transaction was invalid for any other reason, delete it, and continue.
traceLambda(
LOG,
"Delete invalid transaction {}, reason {}",
transaction::toTraceLog,
invalidReason::toString);
LOG.atTrace()
.setMessage("Delete invalid transaction {}, reason {}")
.addArgument(transaction::toTraceLog)
.addArgument(invalidReason)
.log();
return TransactionSelectionResult.DELETE_TRANSACTION_AND_CONTINUE;
}

@ -15,8 +15,6 @@
*/
package org.hyperledger.besu.ethereum.bonsai;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStorage.BonsaiUpdater;
import org.hyperledger.besu.ethereum.chain.Blockchain;
@ -102,7 +100,10 @@ public abstract class AbstractTrieLogManager<T extends MutableWorldState>
final BonsaiWorldStateUpdater localUpdater,
final BonsaiWorldStateArchive worldStateArchive,
final BonsaiPersistedWorldState forWorldState) {
debugLambda(LOG, "Adding layered world state for {}", forBlockHeader::toLogString);
LOG.atDebug()
.setMessage("Adding layered world state for {}")
.addArgument(forBlockHeader::toLogString)
.log();
final TrieLogLayer trieLog = localUpdater.generateTrieLog(forBlockHeader.getBlockHash());
trieLog.freeze();
addCachedLayer(
@ -140,11 +141,11 @@ public abstract class AbstractTrieLogManager<T extends MutableWorldState>
final Hash worldStateRootHash,
final TrieLogLayer trieLog,
final BonsaiUpdater stateUpdater) {
debugLambda(
LOG,
"Persisting trie log for block hash {} and world state root {}",
blockHeader::toLogString,
worldStateRootHash::toHexString);
LOG.atDebug()
.setMessage("Persisting trie log for block hash {} and world state root {}")
.addArgument(blockHeader::toLogString)
.addArgument(worldStateRootHash::toHexString)
.log();
final BytesValueRLPOutput rlpLog = new BytesValueRLPOutput();
trieLog.writeTo(rlpLog);
stateUpdater

@ -19,7 +19,6 @@ package org.hyperledger.besu.ethereum.bonsai;
import static org.hyperledger.besu.ethereum.bonsai.BonsaiAccount.fromRLP;
import static org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStorage.WORLD_BLOCK_HASH_KEY;
import static org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
@ -299,7 +298,10 @@ public class BonsaiPersistedWorldState implements MutableWorldState, BonsaiWorld
@Override
public void persist(final BlockHeader blockHeader) {
final Optional<BlockHeader> maybeBlockHeader = Optional.ofNullable(blockHeader);
debugLambda(LOG, "Persist world state for block {}", maybeBlockHeader::toString);
LOG.atDebug()
.setMessage("Persist world state for block {}")
.addArgument(maybeBlockHeader)
.log();
boolean success = false;
final BonsaiWorldStateUpdater localUpdater = updater.copy();

@ -15,8 +15,6 @@
*/
package org.hyperledger.besu.ethereum.bonsai;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.warnLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStorage.BonsaiStorageSubscriber;
import org.hyperledger.besu.ethereum.trie.MerklePatriciaTrie;
@ -99,7 +97,10 @@ public class BonsaiSnapshotWorldStateKeyValueStorage extends BonsaiWorldStateKey
try {
tryClose();
} catch (Exception e) {
warnLambda(LOG, "exception while trying to close : {}", e::getMessage);
LOG.atWarn()
.setMessage("exception while trying to close : {}")
.addArgument(e::getMessage)
.log();
}
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.bonsai;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
@ -62,11 +60,11 @@ public class LayeredTrieLogManager extends AbstractTrieLogManager<BonsaiLayeredW
blockHeader.getNumber(),
worldStateRootHash,
trieLog);
debugLambda(
LOG,
"adding layered world state for block {}, state root hash {}",
blockHeader::toLogString,
worldStateRootHash::toShortHexString);
LOG.atDebug()
.setMessage("adding layered world state for block {}, state root hash {}")
.addArgument(blockHeader::toLogString)
.addArgument(worldStateRootHash::toShortHexString)
.log();
cachedWorldStatesByHash.put(
blockHeader.getHash(), new LayeredWorldStateCache(bonsaiLayeredWorldState));
}

@ -15,8 +15,6 @@
*/
package org.hyperledger.besu.ethereum.bonsai;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStorage.BonsaiStorageSubscriber;
import org.hyperledger.besu.ethereum.chain.Blockchain;
@ -60,11 +58,11 @@ public class SnapshotTrieLogManager extends AbstractTrieLogManager<BonsaiSnapsho
final BonsaiWorldStateArchive worldStateArchive,
final BonsaiPersistedWorldState worldState) {
debugLambda(
LOG,
"adding snapshot world state for block {}, state root hash {}",
blockHeader::toLogString,
worldStateRootHash::toShortHexString);
LOG.atDebug()
.setMessage("adding snapshot world state for block {}, state root hash {}")
.addArgument(blockHeader::toLogString)
.addArgument(worldStateRootHash::toShortHexString)
.log();
// TODO: add a generic param so we don't have to cast:
BonsaiSnapshotWorldState snapshotWorldState;

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.mainnet.feemarket;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.DataGas;
import org.hyperledger.besu.datatypes.Wei;
@ -46,11 +44,11 @@ public class CancunFeeMarket extends LondonFeeMarket {
Wei.of(
fakeExponential(
MIN_DATA_GAS_PRICE, excessDataGas.toBigInteger(), DATA_GAS_PRICE_UPDATE_FRACTION));
traceLambda(
LOG,
"parentExcessDataGas: {} dataGasPrice: {}",
excessDataGas::toShortHexString,
dataGasPrice::toHexString);
LOG.atTrace()
.setMessage("parentExcessDataGas: {} dataGasPrice: {}")
.addArgument(excessDataGas::toShortHexString)
.addArgument(dataGasPrice::toHexString)
.log();
return dataGasPrice;
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.manager;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.infoLambda;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer.DisconnectCallback;
import org.hyperledger.besu.ethereum.eth.peervalidation.PeerValidator;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSpec;
@ -277,12 +275,13 @@ public class EthPeers {
.findFirst()
.ifPresent(
peer -> {
infoLambda(
LOG,
"disconnecting peer {}. Waiting for better peers. Current {} of max {}",
peer::toString,
this::peerCount,
this::getMaxPeers);
LOG.atInfo()
.setMessage(
"disconnecting peer {}. Waiting for better peers. Current {} of max {}")
.addArgument(peer)
.addArgument(this::peerCount)
.addArgument(this::getMaxPeers)
.log();
peer.disconnect(DisconnectMessage.DisconnectReason.USELESS_PEER);
});
}

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.manager.task;
import static com.google.common.base.Preconditions.checkArgument;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
@ -126,11 +125,11 @@ public abstract class AbstractGetHeadersFromPeerTask
private void updatePeerChainState(final EthPeer peer, final BlockHeader blockHeader) {
if (blockHeader.getNumber() > peer.chainState().getEstimatedHeight()) {
traceLambda(
LOG,
"Updating chain state for peer {} to block header {}",
peer::getShortNodeId,
blockHeader::toLogString);
LOG.atTrace()
.setMessage("Updating chain state for peer {} to block header {}")
.addArgument(peer::getShortNodeId)
.addArgument(blockHeader::toLogString)
.log();
peer.chainState().update(blockHeader);
}
LOG.trace("Peer chain state {}", peer.chainState());

@ -14,9 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.manager.task;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer;
import org.hyperledger.besu.ethereum.eth.manager.EthPeers;
@ -69,11 +66,11 @@ public abstract class AbstractRetryingSwitchingPeerTask<T> extends AbstractRetry
.orElseGet(this::selectNextPeer); // otherwise, select a new one from the pool
if (maybePeer.isEmpty()) {
traceLambda(
LOG,
"No peer found to try to execute task at attempt {}, tried peers {}",
this::getRetryCount,
triedPeers::toString);
LOG.atTrace()
.setMessage("No peer found to try to execute task at attempt {}, tried peers {}")
.addArgument(this::getRetryCount)
.addArgument(triedPeers)
.log();
final var ex = new NoAvailablePeersException();
return CompletableFuture.failedFuture(ex);
}
@ -81,21 +78,21 @@ public abstract class AbstractRetryingSwitchingPeerTask<T> extends AbstractRetry
final EthPeer peerToUse = maybePeer.get();
assignPeer(peerToUse);
traceLambda(
LOG,
"Trying to execute task on peer {}, attempt {}",
this::getAssignedPeer,
this::getRetryCount);
LOG.atTrace()
.setMessage("Trying to execute task on peer {}, attempt {}")
.addArgument(this::getAssignedPeer)
.addArgument(this::getRetryCount)
.log();
return executeTaskOnCurrentPeer(peerToUse)
.thenApply(
peerResult -> {
traceLambda(
LOG,
"Got result {} from peer {}, attempt {}",
peerResult::toString,
peerToUse::toString,
this::getRetryCount);
LOG.atTrace()
.setMessage("Got result {} from peer {}, attempt {}")
.addArgument(peerResult)
.addArgument(peerToUse)
.addArgument(this::getRetryCount)
.log();
result.complete(peerResult);
return peerResult;
});
@ -146,12 +143,13 @@ public abstract class AbstractRetryingSwitchingPeerTask<T> extends AbstractRetry
.or(() -> peers.streamAvailablePeers().sorted(peers.getBestChainComparator()).findFirst())
.ifPresent(
peer -> {
debugLambda(
LOG,
"Refresh peers disconnecting peer {}. Waiting for better peers. Current {} of max {}",
peer::toString,
peers::peerCount,
peers::getMaxPeers);
LOG.atDebug()
.setMessage(
"Refresh peers disconnecting peer {}. Waiting for better peers. Current {} of max {}")
.addArgument(peer)
.addArgument(peers::peerCount)
.addArgument(peers::getMaxPeers)
.log();
peer.disconnect(DisconnectReason.USELESS_PEER);
});
}

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.manager.task;
import static org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.Transaction;
@ -97,12 +96,12 @@ public class BufferedGetPooledTransactionsFromPeerFetcher {
List<Transaction> retrievedTransactions = result.getResult();
transactionTracker.markTransactionsAsSeen(peer, retrievedTransactions);
traceLambda(
LOG,
"Got {} transactions of {} hashes requested from peer {}",
retrievedTransactions::size,
task.getTransactionHashes()::size,
peer::toString);
LOG.atTrace()
.setMessage("Got {} transactions of {} hashes requested from peer {}")
.addArgument(retrievedTransactions::size)
.addArgument(task.getTransactionHashes()::size)
.addArgument(peer)
.log();
transactionPool.addRemoteTransactions(retrievedTransactions);
});
@ -127,12 +126,13 @@ public class BufferedGetPooledTransactionsFromPeerFetcher {
final int alreadySeenCount = discarded;
alreadySeenTransactionsCounter.inc(alreadySeenCount);
traceLambda(
LOG,
"Transaction hashes to request from peer {}, fresh count {}, already seen count {}",
peer::toString,
toRetrieve::size,
() -> alreadySeenCount);
LOG.atTrace()
.setMessage(
"Transaction hashes to request from peer {}, fresh count {}, already seen count {}")
.addArgument(peer)
.addArgument(toRetrieve::size)
.addArgument(alreadySeenCount)
.log();
return toRetrieve;
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.manager.task;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
@ -76,12 +74,12 @@ public class RetryingGetBlockFromPeersTask
return executeSubTask(getBlockTask::run)
.thenApply(
peerResult -> {
debugLambda(
LOG,
"Got block {} from peer {}, attempt {}",
peerResult.getResult()::toLogString,
peerResult.getPeer()::toString,
this::getRetryCount);
LOG.atDebug()
.setMessage("Got block {} from peer {}, attempt {}")
.addArgument(peerResult.getResult()::toLogString)
.addArgument(peerResult.getPeer())
.addArgument(this::getRetryCount)
.log();
result.complete(peerResult);
return peerResult;
});
@ -95,18 +93,18 @@ public class RetryingGetBlockFromPeersTask
@Override
protected void handleTaskError(final Throwable error) {
if (getRetryCount() < getMaxRetries()) {
debugLambda(
LOG,
"Failed to get block {} from peer {}, attempt {}, retrying later",
this::logBlockNumberMaybeHash,
this::getAssignedPeer,
this::getRetryCount);
LOG.atDebug()
.setMessage("Failed to get block {} from peer {}, attempt {}, retrying later")
.addArgument(this::logBlockNumberMaybeHash)
.addArgument(this::getAssignedPeer)
.addArgument(this::getRetryCount)
.log();
} else {
debugLambda(
LOG,
"Failed to get block {} after {} retries",
this::logBlockNumberMaybeHash,
this::getRetryCount);
LOG.atDebug()
.setMessage("Failed to get block {} after {} retries")
.addArgument(this::logBlockNumberMaybeHash)
.addArgument(this::getRetryCount)
.log();
}
super.handleTaskError(error);
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.manager.task;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
@ -72,12 +70,12 @@ public class RetryingGetBlocksFromPeersTask
return executeSubTask(getBodiesTask::run)
.thenApply(
peerResult -> {
debugLambda(
LOG,
"Got {} blocks from peer {}, attempt {}",
peerResult.getResult()::size,
peerResult.getPeer()::toString,
this::getRetryCount);
LOG.atDebug()
.setMessage("Got {} blocks from peer {}, attempt {}")
.addArgument(peerResult.getResult()::size)
.addArgument(peerResult.getPeer())
.addArgument(this::getRetryCount)
.log();
if (peerResult.getResult().isEmpty()) {
currentPeer.recordUselessResponse("GetBodiesFromPeerTask");
@ -98,12 +96,12 @@ public class RetryingGetBlocksFromPeersTask
@Override
protected void handleTaskError(final Throwable error) {
if (getRetryCount() < getMaxRetries()) {
debugLambda(
LOG,
"Failed to get {} blocks from peer {}, attempt {}, retrying later",
headers::size,
this::getAssignedPeer,
this::getRetryCount);
LOG.atDebug()
.setMessage("Failed to get {} blocks from peer {}, attempt {}, retrying later")
.addArgument(headers::size)
.addArgument(this::getAssignedPeer)
.addArgument(this::getRetryCount)
.log();
} else {
LOG.debug("Failed to get {} blocks after {} retries", headers.size(), getRetryCount());
}

@ -15,8 +15,6 @@
package org.hyperledger.besu.ethereum.eth.sync;
import static org.hyperledger.besu.util.FutureUtils.exceptionallyCompose;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.consensus.merge.ForkchoiceEvent;
import org.hyperledger.besu.consensus.merge.UnverifiedForkchoiceListener;
@ -185,18 +183,19 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
private void onBlockAdded(final BlockAddedEvent blockAddedEvent) {
// Check to see if any of our pending blocks are now ready for import
final Block newBlock = blockAddedEvent.getBlock();
traceLambda(
LOG,
"Block added event type {} for block {}. Current status {}",
blockAddedEvent::getEventType,
newBlock::toLogString,
() -> this);
LOG.atTrace()
.setMessage("Block added event type {} for block {}. Current status {}")
.addArgument(blockAddedEvent::getEventType)
.addArgument(newBlock::toLogString)
.addArgument(this)
.log();
// If there is no children to process, maybe try non announced blocks
if (!maybeProcessPendingChildrenBlocks(newBlock)) {
traceLambda(
LOG, "There are no pending blocks ready to import for block {}", newBlock::toLogString);
LOG.atTrace()
.setMessage("There are no pending blocks ready to import for block {}")
.addArgument(newBlock::toLogString)
.log();
maybeProcessNonAnnouncedBlocks(newBlock);
}
@ -224,11 +223,14 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
}
if (!readyForImport.isEmpty()) {
traceLambda(
LOG,
"Ready to import pending blocks found [{}] for block {}",
() -> readyForImport.stream().map(Block::toLogString).collect(Collectors.joining(", ")),
block::toLogString);
LOG.atTrace()
.setMessage("Ready to import pending blocks found [{}] for block {}")
.addArgument(
() ->
readyForImport.stream().map(Block::toLogString).collect(Collectors.joining(", ")))
.addArgument(block::toLogString)
.log();
final Supplier<CompletableFuture<List<Block>>> importBlocksTask =
PersistBlockTask.forUnorderedBlocks(
@ -290,12 +292,12 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
final NewBlockMessage newBlockMessage = NewBlockMessage.readFrom(message.getData());
try {
final Block block = newBlockMessage.block(protocolSchedule);
traceLambda(
LOG,
"New block from network {} from peer {}. Current status {}",
block::toLogString,
message::getPeer,
() -> this);
LOG.atTrace()
.setMessage("New block from network {} from peer {}. Current status {}")
.addArgument(block::toLogString)
.addArgument(message::getPeer)
.addArgument(this)
.log();
final Difficulty totalDifficulty = newBlockMessage.totalDifficulty(protocolSchedule);
@ -306,20 +308,27 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
final long bestChainHeight = syncState.bestChainHeight(localChainHeight);
if (!shouldImportBlockAtHeight(
block.getHeader().getNumber(), localChainHeight, bestChainHeight)) {
traceLambda(
LOG,
"Do not import new block from network {}, current chain heights are: local {}, best {}",
block::toLogString,
() -> localChainHeight,
() -> bestChainHeight);
LOG.atTrace()
.setMessage(
"Do not import new block from network {}, current chain heights are: local {}, best {}")
.addArgument(block::toLogString)
.addArgument(localChainHeight)
.addArgument(bestChainHeight)
.log();
return;
}
if (pendingBlocksManager.contains(block.getHash())) {
traceLambda(LOG, "New block from network {} is already pending", block::toLogString);
LOG.atTrace()
.setMessage("New block from network {} is already pending")
.addArgument(block::toLogString)
.log();
return;
}
if (blockchain.contains(block.getHash())) {
traceLambda(LOG, "New block from network {} is already present", block::toLogString);
LOG.atTrace()
.setMessage("New block from network {} is already present")
.addArgument(block::toLogString)
.log();
return;
}
@ -341,12 +350,12 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
// Register announced blocks
final List<NewBlockHash> announcedBlocks =
Lists.newArrayList(newBlockHashesMessage.getNewHashes());
traceLambda(
LOG,
"New block hashes from network {} from peer {}. Current status {}",
() -> toLogString(announcedBlocks),
message::getPeer,
() -> this);
LOG.atTrace()
.setMessage("New block hashes from network {} from peer {}. Current status {}")
.addArgument(() -> toLogString(announcedBlocks))
.addArgument(message::getPeer)
.addArgument(this)
.log();
for (final NewBlockHash announcedBlock : announcedBlocks) {
message.getPeer().registerKnownBlock(announcedBlock.hash());
@ -432,7 +441,10 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
.whenComplete(
(block, throwable) -> {
if (block != null) {
debugLambda(LOG, "Successfully retrieved block {}", block::toLogString);
LOG.atDebug()
.setMessage("Successfully retrieved block {}")
.addArgument(block::toLogString)
.log();
processingBlocksManager.registerReceivedBlock(block);
} else {
if (throwable != null) {
@ -443,10 +455,10 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
} else {
// this could happen if we give up at some point since we find that it make no
// sense to retry
debugLambda(
LOG,
"Block {} not retrieved",
() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash));
LOG.atDebug()
.setMessage("Block {} not retrieved")
.addArgument(() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash))
.log();
}
processingBlocksManager.registerFailedGetBlock(blockNumber, maybeBlockHash);
}
@ -466,11 +478,11 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
private Function<Throwable, CompletionStage<Block>> handleGetBlockErrors(
final long blockNumber, final Optional<Hash> maybeBlockHash) {
return throwable -> {
debugLambda(
LOG,
"Temporary failure retrieving block {} from peers with error {}",
() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash),
throwable::toString);
LOG.atDebug()
.setMessage("Temporary failure retrieving block {} from peers with error {}")
.addArgument(() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash))
.addArgument(throwable)
.log();
return CompletableFuture.completedFuture(null);
};
}
@ -486,26 +498,28 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
// check if we got this block by other means
if (maybeBlock.isPresent()) {
final Block block = maybeBlock.get();
debugLambda(
LOG, "No need to retry to get block {} since it is already present", block::toLogString);
LOG.atDebug()
.setMessage("No need to retry to get block {} since it is already present")
.addArgument(block::toLogString)
.log();
return CompletableFuture.completedFuture(block);
}
final long localChainHeight = blockchain.getChainHeadBlockNumber();
final long bestChainHeight = syncState.bestChainHeight(localChainHeight);
if (!shouldImportBlockAtHeight(blockNumber, localChainHeight, bestChainHeight)) {
debugLambda(
LOG,
"Not retrying to get block {} since we are too far from local chain head {}",
() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash),
blockchain.getChainHead()::toLogString);
LOG.atDebug()
.setMessage("Not retrying to get block {} since we are too far from local chain head {}")
.addArgument(() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash))
.addArgument(blockchain.getChainHead()::toLogString)
.log();
return CompletableFuture.completedFuture(null);
}
debugLambda(
LOG,
"Retrying to get block {}",
() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash));
LOG.atDebug()
.setMessage("Retrying to get block {}")
.addArgument(() -> logBlockNumberMaybeHash(blockNumber, maybeBlockHash))
.log();
return ethContext
.getScheduler()
@ -553,7 +567,10 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
// Synchronize to avoid race condition where block import event fires after the
// blockchain.contains() check and before the block is registered, causing onBlockAdded() to be
// invoked for the parent of this block before we are able to register it.
traceLambda(LOG, "Import or save pending block {}", block::toLogString);
LOG.atTrace()
.setMessage("Import or save pending block {}")
.addArgument(block::toLogString)
.log();
if (!protocolContext.getBlockchain().contains(block.getHeader().getParentHash())) {
// Block isn't connected to local chain, save it to pending blocks collection
@ -565,12 +582,18 @@ public class BlockPropagationManager implements UnverifiedForkchoiceListener {
}
if (!processingBlocksManager.addImportingBlock(block.getHash())) {
traceLambda(LOG, "We're already importing this block {}", block::toLogString);
LOG.atTrace()
.setMessage("We're already importing this block {}")
.addArgument(block::toLogString)
.log();
return CompletableFuture.completedFuture(block);
}
if (protocolContext.getBlockchain().contains(block.getHash())) {
traceLambda(LOG, "We've already imported this block {}", block::toLogString);
LOG.atTrace()
.setMessage("We've already imported this block {}")
.addArgument(block::toLogString)
.log();
processingBlocksManager.registerBlockImportDone(block.getHash());
return CompletableFuture.completedFuture(block);
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.sync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
@ -83,11 +81,16 @@ public class ChainHeadTracker implements ConnectCallback {
final BlockHeader chainHeadHeader = peerResult.getResult().get(0);
peer.chainState().update(chainHeadHeader);
trailingPeerLimiter.enforceTrailingPeerLimit();
debugLambda(
LOG,
"Retrieved chain head info {} from {}",
() -> chainHeadHeader.getNumber() + " (" + chainHeadHeader.getBlockHash() + ")",
() -> peer);
LOG.atDebug()
.setMessage("Retrieved chain head info {} from {}")
.addArgument(
() ->
chainHeadHeader.getNumber()
+ " ("
+ chainHeadHeader.getBlockHash()
+ ")")
.addArgument(peer)
.log();
} else {
LOG.debug("Failed to retrieve chain head info. Disconnecting {}", peer, error);
peer.disconnect(DisconnectReason.USELESS_PEER);

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.sync;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.infoLambda;
import org.hyperledger.besu.consensus.merge.ForkchoiceEvent;
import org.hyperledger.besu.consensus.merge.UnverifiedForkchoiceListener;
@ -317,7 +316,7 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
lines.add("Besu has identified a problem with its worldstate database.");
lines.add("Your node will fetch the correct data from peers to repair the problem.");
lines.add("Starting the sync pipeline...");
infoLambda(LOG, FramedLogMessage.generate(lines));
LOG.atInfo().setMessage(FramedLogMessage.generate(lines)).log();
this.syncState.markInitialSyncRestart();
this.syncState.markResyncNeeded();

@ -16,7 +16,6 @@ package org.hyperledger.besu.ethereum.eth.sync;
import static java.util.concurrent.CompletableFuture.completedFuture;
import static org.hyperledger.besu.util.FutureUtils.exceptionallyCompose;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.ethereum.eth.manager.EthScheduler;
import org.hyperledger.besu.ethereum.eth.manager.exceptions.EthTaskException;
@ -159,12 +158,12 @@ public class PipelineChainDownloader implements ChainDownloader {
}
syncState.setSyncTarget(target.peer(), target.commonAncestor());
debugLambda(
LOG,
"Starting download pipeline for sync target {}, common ancestor {} ({})",
() -> target,
() -> target.commonAncestor().getNumber(),
() -> target.commonAncestor().getBlockHash());
LOG.atDebug()
.setMessage("Starting download pipeline for sync target {}, common ancestor {} ({})")
.addArgument(target)
.addArgument(() -> target.commonAncestor().getNumber())
.addArgument(() -> target.commonAncestor().getBlockHash())
.log();
currentDownloadPipeline = downloadPipelineFactory.createDownloadPipelineForSyncTarget(target);
return downloadPipelineFactory.startPipeline(
scheduler, syncState, target, currentDownloadPipeline);

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.sync.backwardsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.slf4j.LoggerFactory.getLogger;
import org.hyperledger.besu.datatypes.Hash;
@ -97,12 +96,12 @@ public class BackwardChain {
headers.put(blockHeader.getHash(), blockHeader);
chainStorage.put(blockHeader.getHash(), firstHeader.getHash());
firstStoredAncestor = Optional.of(blockHeader);
debugLambda(
LOG,
"Added header {} to backward chain led by pivot {} on height {}",
blockHeader::toLogString,
() -> lastStoredPivot.orElseThrow().toLogString(),
firstHeader::getNumber);
LOG.atDebug()
.setMessage("Added header {} to backward chain led by pivot {} on height {}")
.addArgument(blockHeader::toLogString)
.addArgument(() -> lastStoredPivot.orElseThrow().toLogString())
.addArgument(firstHeader::getNumber)
.log();
}
public synchronized Optional<Block> getPivot() {
@ -126,23 +125,26 @@ public class BackwardChain {
}
public synchronized void appendTrustedBlock(final Block newPivot) {
debugLambda(LOG, "Appending trusted block {}", newPivot::toLogString);
LOG.atDebug().setMessage("Appending trusted block {}").addArgument(newPivot::toLogString).log();
headers.put(newPivot.getHash(), newPivot.getHeader());
blocks.put(newPivot.getHash(), newPivot);
if (lastStoredPivot.isEmpty()) {
firstStoredAncestor = Optional.of(newPivot.getHeader());
} else {
if (newPivot.getHeader().getParentHash().equals(lastStoredPivot.get().getHash())) {
debugLambda(
LOG,
"Added block {} to backward chain led by pivot {} on height {}",
newPivot::toLogString,
lastStoredPivot.get()::toLogString,
firstStoredAncestor.get()::getNumber);
LOG.atDebug()
.setMessage("Added block {} to backward chain led by pivot {} on height {}")
.addArgument(newPivot::toLogString)
.addArgument(lastStoredPivot.get()::toLogString)
.addArgument(firstStoredAncestor.get()::getNumber)
.log();
chainStorage.put(lastStoredPivot.get().getHash(), newPivot.getHash());
} else {
firstStoredAncestor = Optional.of(newPivot.getHeader());
debugLambda(LOG, "Re-pivoting to new target block {}", newPivot::toLogString);
LOG.atDebug()
.setMessage("Re-pivoting to new target block {}")
.addArgument(newPivot::toLogString)
.log();
}
}
lastStoredPivot = Optional.of(newPivot.getHeader());

@ -15,8 +15,6 @@
package org.hyperledger.besu.ethereum.eth.sync.backwardsync;
import static org.hyperledger.besu.util.FutureUtils.exceptionallyCompose;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.BlockValidator;
@ -153,10 +151,11 @@ public class BackwardSyncContext {
private boolean isTrusted(final Hash hash) {
if (backwardChain.isTrusted(hash)) {
debugLambda(
LOG,
"not fetching or appending hash {} to backwards sync since it is present in successors",
hash::toHexString);
LOG.atDebug()
.setMessage(
"not fetching or appending hash {} to backwards sync since it is present in successors")
.addArgument(hash::toHexString)
.log();
return true;
}
return false;
@ -206,8 +205,10 @@ public class BackwardSyncContext {
ethContext.getEthPeers().peerCount(),
millisBetweenRetries);
} else {
debugLambda(
LOG, "Not recoverable backward sync exception {}", throwable::getMessage);
LOG.atDebug()
.setMessage("Not recoverable backward sync exception {}")
.addArgument(throwable::getMessage)
.log();
throw backwardSyncException;
}
},
@ -296,7 +297,7 @@ public class BackwardSyncContext {
}
protected Void saveBlock(final Block block) {
traceLambda(LOG, "Going to validate block {}", block::toLogString);
LOG.atTrace().setMessage("Going to validate block {}").addArgument(block::toLogString).log();
var optResult =
this.getBlockValidatorForBlock(block)
.validateAndProcessBlock(
@ -305,7 +306,10 @@ public class BackwardSyncContext {
HeaderValidationMode.FULL,
HeaderValidationMode.NONE);
if (optResult.isSuccessful()) {
traceLambda(LOG, "Block {} was validated, going to import it", block::toLogString);
LOG.atTrace()
.setMessage("Block {} was validated, going to import it")
.addArgument(block::toLogString)
.log();
optResult.getYield().get().getWorldState().persist(block.getHeader());
this.getProtocolContext()
.getBlockchain()
@ -344,7 +348,10 @@ public class BackwardSyncContext {
return;
}
debugLambda(LOG, "Rewinding head to last saved block {}", lastSavedBlock::toLogString);
LOG.atDebug()
.setMessage("Rewinding head to last saved block {}")
.addArgument(lastSavedBlock::toLogString)
.log();
blockchain.rewindToBlock(lastSavedBlock.getHash());
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.backwardsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.task.RetryingGetHeadersEndingAtFromPeerByHashTask;
@ -86,11 +84,11 @@ public class BackwardSyncStep {
.scheduleSyncWorkerTask(retryingGetHeadersEndingAtFromPeerByHashTask::run)
.thenApply(
blockHeaders -> {
debugLambda(
LOG,
"Got headers {} -> {}",
blockHeaders.get(0)::getNumber,
blockHeaders.get(blockHeaders.size() - 1)::getNumber);
LOG.atDebug()
.setMessage("Got headers {} -> {}")
.addArgument(blockHeaders.get(0)::getNumber)
.addArgument(blockHeaders.get(blockHeaders.size() - 1)::getNumber)
.log();
return blockHeaders;
});
}

@ -16,7 +16,6 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.backwardsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.slf4j.LoggerFactory.getLogger;
import org.hyperledger.besu.datatypes.Hash;
@ -91,18 +90,18 @@ public class BackwardsSyncAlgorithm implements BesuEvents.InitialSyncCompletionL
}
if (chainHeader.getNumber() > firstAncestorHeader.getNumber()) {
debugLambda(
LOG,
"Backward reached below current chain head {} : {}",
() -> blockchain.getChainHead().toLogString(),
firstAncestorHeader::toLogString);
LOG.atDebug()
.setMessage("Backward reached below current chain head {} : {}")
.addArgument(() -> blockchain.getChainHead().toLogString())
.addArgument(firstAncestorHeader::toLogString)
.log();
}
if (finalBlockConfirmation.ancestorHeaderReached(firstAncestorHeader)) {
debugLambda(
LOG,
"Backward sync reached ancestor header with {}, starting forward sync",
firstAncestorHeader::toLogString);
LOG.atDebug()
.setMessage("Backward sync reached ancestor header with {}, starting forward sync")
.addArgument(firstAncestorHeader::toLogString)
.log();
return executeForwardAsync();
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.backwardsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.task.AbstractPeerTask;
@ -52,24 +50,25 @@ public class ForwardSyncStep {
if (blockHeaders.isEmpty()) {
return CompletableFuture.completedFuture(null);
} else {
debugLambda(
LOG,
"Requesting {} blocks {}->{} ({})",
blockHeaders::size,
() -> blockHeaders.get(0).getNumber(),
() -> blockHeaders.get(blockHeaders.size() - 1).getNumber(),
() -> blockHeaders.get(0).getHash().toHexString());
LOG.atDebug()
.setMessage("Requesting {} blocks {}->{} ({})")
.addArgument(blockHeaders::size)
.addArgument(() -> blockHeaders.get(0).getNumber())
.addArgument(() -> blockHeaders.get(blockHeaders.size() - 1).getNumber())
.addArgument(() -> blockHeaders.get(0).getHash().toHexString())
.log();
return requestBodies(blockHeaders)
.thenApply(this::saveBlocks)
.exceptionally(
throwable -> {
context.halveBatchSize();
debugLambda(
LOG,
"Getting {} blocks from peers failed with reason {}, reducing batch size to {}",
blockHeaders::size,
throwable::getMessage,
context::getBatchSize);
LOG.atDebug()
.setMessage(
"Getting {} blocks from peers failed with reason {}, reducing batch size to {}")
.addArgument(blockHeaders::size)
.addArgument(throwable::getMessage)
.addArgument(context::getBatchSize)
.log();
return null;
});
}
@ -113,12 +112,13 @@ public class ForwardSyncStep {
if (parent.isEmpty()) {
context.halveBatchSize();
debugLambda(
LOG,
"Parent block {} not found, while saving block {}, reducing batch size to {}",
block.getHeader().getParentHash()::toString,
block::toLogString,
context::getBatchSize);
LOG.atDebug()
.setMessage(
"Parent block {} not found, while saving block {}, reducing batch size to {}")
.addArgument(block.getHeader().getParentHash())
.addArgument(block::toLogString)
.addArgument(context::getBatchSize)
.log();
return null;
} else {
context.saveBlock(block);

@ -17,7 +17,6 @@
package org.hyperledger.besu.ethereum.eth.sync.backwardsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.slf4j.LoggerFactory.getLogger;
import org.hyperledger.besu.ethereum.core.Block;
@ -54,10 +53,10 @@ public class ProcessKnownAncestorsStep {
boolean isFirstUnProcessedHeader = true;
if (context.getProtocolContext().getBlockchain().contains(header.getHash())
&& header.getNumber() <= chainHeadBlockNumber) {
debugLambda(
LOG,
"Block {} is already imported, we can ignore it for the sync process",
header::toLogString);
LOG.atDebug()
.setMessage("Block {} is already imported, we can ignore it for the sync process")
.addArgument(header::toLogString)
.log();
backwardChain.dropFirstHeader();
isFirstUnProcessedHeader = false;
} else if (context.getProtocolContext().getBlockchain().contains(header.getParentHash())) {
@ -67,7 +66,7 @@ public class ProcessKnownAncestorsStep {
? Optional.of(backwardChain.getTrustedBlock(header.getHash()))
: context.getProtocolContext().getBlockchain().getBlockByHash(header.getHash());
if (block.isPresent()) {
debugLambda(LOG, "Importing block {}", header::toLogString);
LOG.atDebug().setMessage("Importing block {}").addArgument(header::toLogString).log();
context.saveBlock(block.get());
if (isTrustedBlock) {
backwardChain.dropFirstHeader();
@ -76,7 +75,10 @@ public class ProcessKnownAncestorsStep {
}
}
if (isFirstUnProcessedHeader) {
debugLambda(LOG, "First unprocessed header is {}", header::toLogString);
LOG.atDebug()
.setMessage("First unprocessed header is {}")
.addArgument(header::toLogString)
.log();
return;
}
}

@ -17,7 +17,6 @@
package org.hyperledger.besu.ethereum.eth.sync.backwardsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.slf4j.LoggerFactory.getLogger;
import org.hyperledger.besu.datatypes.Hash;
@ -49,7 +48,7 @@ public class SyncStepStep {
}
private CompletableFuture<Block> requestBlock(final Hash targetHash) {
debugLambda(LOG, "Fetching block by hash {} from peers", targetHash::toString);
LOG.atDebug().setMessage("Fetching block by hash {} from peers").addArgument(targetHash).log();
final RetryingGetBlockFromPeersTask getBlockTask =
RetryingGetBlockFromPeersTask.create(
context.getProtocolSchedule(),
@ -66,7 +65,7 @@ public class SyncStepStep {
}
private Block saveBlock(final Block block) {
debugLambda(LOG, "Appending fetched block {}", block::toLogString);
LOG.atDebug().setMessage("Appending fetched block {}").addArgument(block::toLogString).log();
backwardChain.appendTrustedBlock(block);
return block;
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.fastsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockImporter;
@ -73,7 +71,10 @@ public class FastImportBlocksStep implements Consumer<List<BlockWithReceipts>> {
blockWithReceipts.getHeader().getNumber(),
blockWithReceipts.getHash());
}
traceLambda(LOG, "Imported block {}", blockWithReceipts.getBlock()::toLogString);
LOG.atTrace()
.setMessage("Imported block {}")
.addArgument(blockWithReceipts.getBlock()::toLogString)
.log();
}
if (logStartBlock.isEmpty()) {
logStartBlock = OptionalLong.of(blocksWithReceipts.get(0).getNumber());

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.sync.fastsync;
import static java.util.concurrent.CompletableFuture.completedFuture;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.ProtocolContext;
@ -182,10 +181,10 @@ public class FastSyncActions {
if (throwable != null) {
LOG.debug("Error downloading block header by hash {}", hash);
} else {
debugLambda(
LOG,
"Successfully downloaded pivot block header by hash {}",
blockHeader::toLogString);
LOG.atDebug()
.setMessage("Successfully downloaded pivot block header by hash {}")
.addArgument(blockHeader::toLogString)
.log();
}
})
.thenApply(FastSyncState::new);

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.fastsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.consensus.merge.ForkchoiceEvent;
import org.hyperledger.besu.datatypes.Hash;
@ -144,10 +142,10 @@ public class PivotSelectorFromSafeBlock implements PivotBlockSelector {
if (throwable != null) {
LOG.debug("Error downloading block header by hash {}", hash);
} else {
debugLambda(
LOG,
"Successfully downloaded pivot block header by hash {}",
blockHeader::toLogString);
LOG.atDebug()
.setMessage("Successfully downloaded pivot block header by hash {}")
.addArgument(blockHeader::toLogString)
.log();
}
});
}

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.eth.sync.snapsync;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.ProcessableBlockHeader;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
@ -83,13 +81,14 @@ public class DynamicPivotBlockManager {
final CompletableFuture<Void> searchForNewPivot;
if (distanceNextPivotBlock > pivotBlockDistanceBeforeCaching) {
debugLambda(
LOG,
"Searching for a new pivot: current pivot {} best chain height {} distance next pivot {} last pivot block found {}",
() -> currentPivotBlockNumber,
() -> bestChainHeight,
() -> distanceNextPivotBlock,
this::logLastPivotBlockFound);
LOG.atDebug()
.setMessage(
"Searching for a new pivot: current pivot {} best chain height {} distance next pivot {} last pivot block found {}")
.addArgument(currentPivotBlockNumber)
.addArgument(bestChainHeight)
.addArgument(distanceNextPivotBlock)
.addArgument(this::logLastPivotBlockFound)
.log();
searchForNewPivot =
CompletableFuture.completedFuture(FastSyncState.EMPTY_SYNC_STATE)
@ -97,11 +96,12 @@ public class DynamicPivotBlockManager {
.thenCompose(
fss -> {
if (isSamePivotBlock(fss)) {
debugLambda(
LOG,
"New pivot {} is equal to last found {}, nothing to do",
fss::getPivotBlockHash,
this::logLastPivotBlockFound);
LOG.atDebug()
.setMessage(
"New pivot {} is equal to last found {}, nothing to do")
.addArgument(fss::getPivotBlockHash)
.addArgument(this::logLastPivotBlockFound)
.log();
return CompletableFuture.completedFuture(null);
}
return downloadNewPivotBlock(fss);
@ -122,13 +122,14 @@ public class DynamicPivotBlockManager {
() -> {
final long distance = bestChainHeight - currentPivotBlockNumber;
if (distance > pivotBlockWindowValidity) {
debugLambda(
LOG,
"Switch to new pivot: current pivot {} is distant {} from current best chain height {} last pivot block found {}",
() -> currentPivotBlockNumber,
() -> distance,
() -> bestChainHeight,
this::logLastPivotBlockFound);
LOG.atDebug()
.setMessage(
"Switch to new pivot: current pivot {} is distant {} from current best chain height {} last pivot block found {}")
.addArgument(currentPivotBlockNumber)
.addArgument(distance)
.addArgument(bestChainHeight)
.addArgument(this::logLastPivotBlockFound)
.log();
switchToNewPivotBlock(onNewPivotBlock);
}
// delay next check only if we are successful
@ -150,7 +151,10 @@ public class DynamicPivotBlockManager {
.thenAccept(
fssWithHeader -> {
lastPivotBlockFound = fssWithHeader.getPivotBlockHeader();
debugLambda(LOG, "Found new pivot block {}", this::logLastPivotBlockFound);
LOG.atDebug()
.setMessage("Found new pivot block {}")
.addArgument(this::logLastPivotBlockFound)
.log();
})
.orTimeout(5, TimeUnit.MINUTES);
}
@ -180,11 +184,11 @@ public class DynamicPivotBlockManager {
lastPivotBlockFound.ifPresentOrElse(
blockHeader -> {
if (syncState.getPivotBlockHeader().filter(blockHeader::equals).isEmpty()) {
debugLambda(
LOG,
"Setting new pivot block {} with state root {}",
blockHeader::toLogString,
blockHeader.getStateRoot()::toString);
LOG.atDebug()
.setMessage("Setting new pivot block {} with state root {}")
.addArgument(blockHeader::toLogString)
.addArgument(blockHeader.getStateRoot())
.log();
syncState.setCurrentHeader(blockHeader);
lastPivotBlockFound = Optional.empty();
}

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.sync.tasks;
import static com.google.common.base.Preconditions.checkArgument;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.core.Block;
@ -198,7 +197,10 @@ public class PersistBlockTask extends AbstractEthTask<Block> {
final ProtocolSpec protocolSpec =
protocolSchedule.getByBlockNumber(block.getHeader().getNumber());
final BlockImporter blockImporter = protocolSpec.getBlockImporter();
debugLambda(LOG, "Running import task for block {}", block::toLogString);
LOG.atDebug()
.setMessage("Running import task for block {}")
.addArgument(block::toLogString)
.log();
blockImportResult = blockImporter.importBlock(protocolContext, block, validateHeaders);
if (!blockImportResult.isImported()) {
result.completeExceptionally(

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.transactions;
import static java.time.Instant.now;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
@ -100,12 +99,13 @@ public class NewPooledTransactionHashesMessageProcessor {
try {
final List<Hash> incomingTransactionHashes = transactionsMessage.pendingTransactionHashes();
traceLambda(
LOG,
"Received pooled transaction hashes message from {}, incoming hashes {}, incoming list {}",
peer::toString,
incomingTransactionHashes::size,
incomingTransactionHashes::toString);
LOG.atTrace()
.setMessage(
"Received pooled transaction hashes message from {}, incoming hashes {}, incoming list {}")
.addArgument(peer)
.addArgument(incomingTransactionHashes::size)
.addArgument(incomingTransactionHashes)
.log();
final BufferedGetPooledTransactionsFromPeerFetcher bufferedTask =
scheduledTasks.computeIfAbsent(

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.transactions;
import static org.hyperledger.besu.ethereum.core.Transaction.toHashList;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.Transaction;
@ -49,12 +48,13 @@ class NewPooledTransactionHashesMessageSender {
transactionTracker.claimTransactionsToSendToPeer(peer), MAX_TRANSACTIONS_HASHES)) {
try {
final List<Hash> txHashes = toHashList(txBatch);
traceLambda(
LOG,
"Sending transaction hashes to peer {}, transaction hashes count {}, list {}",
peer::toString,
txHashes::size,
txHashes::toString);
LOG.atTrace()
.setMessage(
"Sending transaction hashes to peer {}, transaction hashes count {}, list {}")
.addArgument(peer)
.addArgument(txHashes::size)
.addArgument(txHashes)
.log();
final NewPooledTransactionHashesMessage message =
NewPooledTransactionHashesMessage.create(txBatch, capability);

@ -16,7 +16,6 @@ package org.hyperledger.besu.ethereum.eth.transactions;
import static org.hyperledger.besu.ethereum.eth.transactions.PendingTransaction.toTransactionList;
import static org.hyperledger.besu.plugin.data.TransactionType.BLOB;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
@ -119,15 +118,16 @@ public class TransactionBroadcaster implements TransactionBatchAddedListener {
movePeersBetweenLists(sendOnlyHashPeers, sendMixedPeers, delta);
}
traceLambda(
LOG,
"Sending full transactions to {} peers, transaction hashes only to {} peers and mixed to {} peers."
+ " Peers w/o eth/65 {}, peers with eth/65 {}",
sendOnlyFullTransactionPeers::size,
sendOnlyHashPeers::size,
sendMixedPeers::size,
sendOnlyFullTransactionPeers::toString,
() -> sendOnlyHashPeers.toString() + sendMixedPeers.toString());
LOG.atTrace()
.setMessage(
"Sending full transactions to {} peers, transaction hashes only to {} peers and mixed to {} peers."
+ " Peers w/o eth/65 {}, peers with eth/65 {}")
.addArgument(sendOnlyFullTransactionPeers::size)
.addArgument(sendOnlyHashPeers::size)
.addArgument(sendMixedPeers::size)
.addArgument(sendOnlyFullTransactionPeers)
.addArgument(() -> sendOnlyHashPeers.toString() + sendMixedPeers.toString())
.log();
sendToFullTransactionsPeers(
transactionByBroadcastMode.get(FULL_BROADCAST), sendOnlyFullTransactionPeers);

@ -21,7 +21,6 @@ import static org.hyperledger.besu.ethereum.eth.transactions.TransactionAddedSta
import static org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason.CHAIN_HEAD_NOT_AVAILABLE;
import static org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason.CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE;
import static org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason.INTERNAL_ERROR;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.Wei;
@ -162,7 +161,10 @@ public class TransactionPool implements BlockAddedObserver {
for (final Transaction transaction : transactions) {
if (pendingTransactions.containsTransaction(transaction.getHash())) {
traceLambda(LOG, "Discard already present transaction {}", transaction::toTraceLog);
LOG.atTrace()
.setMessage("Discard already present transaction {}")
.addArgument(transaction::toTraceLog)
.log();
// We already have this transaction, don't even validate it.
duplicateTransactionCounter.labels(REMOTE).inc();
continue;
@ -175,22 +177,28 @@ public class TransactionPool implements BlockAddedObserver {
pendingTransactions.addRemoteTransaction(transaction, validationResult.maybeAccount);
switch (status) {
case ADDED:
traceLambda(LOG, "Added remote transaction {}", transaction::toTraceLog);
LOG.atTrace()
.setMessage("Added remote transaction {}")
.addArgument(transaction::toTraceLog)
.log();
addedTransactions.add(transaction);
break;
case ALREADY_KNOWN:
traceLambda(LOG, "Duplicate remote transaction {}", transaction::toTraceLog);
LOG.atTrace()
.setMessage("Duplicate remote transaction {}")
.addArgument(transaction::toTraceLog)
.log();
duplicateTransactionCounter.labels(REMOTE).inc();
break;
default:
traceLambda(LOG, "Transaction added status {}", status::name);
LOG.atTrace().setMessage("Transaction added status {}").addArgument(status::name).log();
}
} else {
traceLambda(
LOG,
"Discard invalid transaction {}, reason {}",
transaction::toTraceLog,
validationResult.result::getInvalidReason);
LOG.atTrace()
.setMessage("Discard invalid transaction {}, reason {}")
.addArgument(transaction::toTraceLog)
.addArgument(validationResult.result::getInvalidReason)
.log();
pendingTransactions
.signalInvalidAndGetDependentTransactions(transaction)
.forEach(pendingTransactions::removeTransaction);
@ -199,12 +207,12 @@ public class TransactionPool implements BlockAddedObserver {
if (!addedTransactions.isEmpty()) {
transactionBroadcaster.onTransactionsAdded(addedTransactions);
traceLambda(
LOG,
"Added {} transactions to the pool, current pool size {}, content {}",
addedTransactions::size,
pendingTransactions::size,
() -> pendingTransactions.toTraceLog(true, true));
LOG.atTrace()
.setMessage("Added {} transactions to the pool, current pool size {}, content {}")
.addArgument(addedTransactions::size)
.addArgument(pendingTransactions::size)
.addArgument(() -> pendingTransactions.toTraceLog(true, true))
.log();
}
}
@ -277,10 +285,10 @@ public class TransactionPool implements BlockAddedObserver {
final BlockHeader chainHeadBlockHeader = getChainHeadBlockHeader().orElse(null);
if (chainHeadBlockHeader == null) {
traceLambda(
LOG,
"rejecting transaction {} due to chain head not available yet",
transaction::getHash);
LOG.atTrace()
.setMessage("rejecting transaction {} due to chain head not available yet")
.addArgument(transaction::getHash)
.log();
return ValidationResultAndAccount.invalid(CHAIN_HEAD_NOT_AVAILABLE);
}
@ -378,11 +386,11 @@ public class TransactionPool implements BlockAddedObserver {
}
} else {
if (isMaxGasPriceBelowConfiguredMinGasPrice(transaction)) {
traceLambda(
LOG,
"Discard transaction {} below min gas price {}",
transaction::toTraceLog,
miningParameters::getMinTransactionGasPrice);
LOG.atTrace()
.setMessage("Discard transaction {} below min gas price {}")
.addArgument(transaction::toTraceLog)
.addArgument(miningParameters::getMinTransactionGasPrice)
.log();
return TransactionInvalidReason.GAS_PRICE_TOO_LOW;
}
}

@ -16,7 +16,6 @@ package org.hyperledger.besu.ethereum.eth.transactions;
import static java.time.Instant.now;
import static org.hyperledger.besu.ethereum.core.Transaction.toHashList;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer;
@ -98,15 +97,16 @@ class TransactionsMessageProcessor {
alreadySeenTransactionsCounter.inc(
(long) incomingTransactions.size() - freshTransactions.size());
traceLambda(
LOG,
"Received transactions message from {}, incoming transactions {}, incoming list {}"
+ ", fresh transactions {}, fresh list {}",
peer::toString,
incomingTransactions::size,
() -> toHashList(incomingTransactions),
freshTransactions::size,
() -> toHashList(freshTransactions));
LOG.atTrace()
.setMessage(
"Received transactions message from {}, incoming transactions {}, incoming list {}"
+ ", fresh transactions {}, fresh list {}")
.addArgument(peer)
.addArgument(incomingTransactions::size)
.addArgument(() -> toHashList(incomingTransactions))
.addArgument(freshTransactions::size)
.addArgument(() -> toHashList(freshTransactions))
.log();
transactionPool.addRemoteTransactions(freshTransactions);

@ -15,7 +15,6 @@
package org.hyperledger.besu.ethereum.eth.transactions;
import static org.hyperledger.besu.ethereum.core.Transaction.toHashList;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer;
@ -50,14 +49,15 @@ class TransactionsMessageSender {
LimitedTransactionsMessages.createLimited(allTxToSend);
final Set<Transaction> includedTransactions =
limitedTransactionsMessages.getIncludedTransactions();
traceLambda(
LOG,
"Sending transactions to peer {} all transactions count {}, "
+ "single message transactions {}, single message list {}",
peer::toString,
allTxToSend::size,
includedTransactions::size,
() -> toHashList(includedTransactions));
LOG.atTrace()
.setMessage(
"Sending transactions to peer {} all transactions count {}, "
+ "single message transactions {}, single message list {}")
.addArgument(peer)
.addArgument(allTxToSend::size)
.addArgument(includedTransactions::size)
.addArgument(() -> toHashList(includedTransactions))
.log();
allTxToSend.removeAll(limitedTransactionsMessages.getIncludedTransactions());
try {
peer.send(limitedTransactionsMessages.getTransactionsMessage());

@ -18,8 +18,6 @@ import static org.hyperledger.besu.ethereum.eth.transactions.TransactionAddedSta
import static org.hyperledger.besu.ethereum.eth.transactions.TransactionAddedStatus.LOWER_NONCE_INVALID_TRANSACTION_KNOWN;
import static org.hyperledger.besu.ethereum.eth.transactions.TransactionAddedStatus.NONCE_TOO_FAR_IN_FUTURE_FOR_SENDER;
import static org.hyperledger.besu.ethereum.eth.transactions.TransactionAddedStatus.REJECTED_UNDERPRICED_REPLACEMENT;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
@ -151,7 +149,10 @@ public abstract class AbstractPendingTransactionsSorter implements PendingTransa
.filter(transaction -> transaction.getAddedToPoolAt().isBefore(removeTransactionsBefore))
.forEach(
transactionInfo -> {
traceLambda(LOG, "Evicted {} due to age", transactionInfo::toTraceLog);
LOG.atTrace()
.setMessage("Evicted {} due to age")
.addArgument(transactionInfo::toTraceLog)
.log();
removeTransaction(transactionInfo.getTransaction());
});
}
@ -169,10 +170,11 @@ public abstract class AbstractPendingTransactionsSorter implements PendingTransa
final Transaction transaction, final Optional<Account> maybeSenderAccount) {
if (lowestInvalidKnownNonceCache.hasInvalidLowerNonce(transaction)) {
debugLambda(
LOG,
"Dropping transaction {} since the sender has an invalid transaction with lower nonce",
transaction::toTraceLog);
LOG.atDebug()
.setMessage(
"Dropping transaction {} since the sender has an invalid transaction with lower nonce")
.addArgument(transaction::toTraceLog)
.log();
return LOWER_NONCE_INVALID_TRANSACTION_KNOWN;
}
@ -286,15 +288,17 @@ public abstract class AbstractPendingTransactionsSorter implements PendingTransa
if (existingPendingTx != null) {
if (!transactionReplacementHandler.shouldReplace(
existingPendingTx, pendingTransaction, chainHeadHeaderSupplier.get())) {
traceLambda(
LOG, "Reject underpriced transaction replacement {}", pendingTransaction::toTraceLog);
LOG.atTrace()
.setMessage("Reject underpriced transaction replacement {}")
.addArgument(pendingTransaction::toTraceLog)
.log();
return REJECTED_UNDERPRICED_REPLACEMENT;
}
traceLambda(
LOG,
"Replace existing transaction {}, with new transaction {}",
existingPendingTx::toTraceLog,
pendingTransaction::toTraceLog);
LOG.atTrace()
.setMessage("Replace existing transaction {}, with new transaction {}")
.addArgument(existingPendingTx::toTraceLog)
.addArgument(pendingTransaction::toTraceLog)
.log();
maybeReplacedTransaction = Optional.of(existingPendingTx.getTransaction());
} else {
maybeReplacedTransaction = Optional.empty();
@ -302,7 +306,10 @@ public abstract class AbstractPendingTransactionsSorter implements PendingTransa
pendingTxsForSender.updateSenderAccount(maybeSenderAccount);
pendingTxsForSender.trackPendingTransaction(pendingTransaction);
traceLambda(LOG, "Tracked transaction by sender {}", pendingTxsForSender::toTraceLog);
LOG.atTrace()
.setMessage("Tracked transaction by sender {}")
.addArgument(pendingTxsForSender::toTraceLog)
.log();
maybeReplacedTransaction.ifPresent(this::removeTransaction);
return ADDED;
}
@ -320,11 +327,11 @@ public abstract class AbstractPendingTransactionsSorter implements PendingTransa
transaction.getSender());
transactionsBySender.remove(transaction.getSender());
} else {
traceLambda(
LOG,
"Tracked transaction by sender {} after the removal of {}",
pendingTxsForSender::toTraceLog,
transaction::toTraceLog);
LOG.atTrace()
.setMessage("Tracked transaction by sender {} after the removal of {}")
.addArgument(pendingTxsForSender::toTraceLog)
.addArgument(transaction::toTraceLog)
.log();
}
});
}
@ -418,21 +425,26 @@ public abstract class AbstractPendingTransactionsSorter implements PendingTransa
final Transaction transaction = pendingTransaction.getTransaction();
synchronized (lock) {
if (pendingTransactions.containsKey(pendingTransaction.getHash())) {
traceLambda(LOG, "Already known transaction {}", pendingTransaction::toTraceLog);
LOG.atTrace()
.setMessage("Already known transaction {}")
.addArgument(pendingTransaction::toTraceLog)
.log();
return TransactionAddedStatus.ALREADY_KNOWN;
}
if (transaction.getNonce() - maybeSenderAccount.map(AccountState::getNonce).orElse(0L)
>= poolConfig.getTxPoolMaxFutureTransactionByAccount()) {
traceLambda(
LOG,
"Transaction {} not added because nonce too far in the future for sender {}",
transaction::toTraceLog,
() ->
maybeSenderAccount
.map(Account::getAddress)
.map(Address::toString)
.orElse("unknown"));
LOG.atTrace()
.setMessage(
"Transaction {} not added because nonce too far in the future for sender {}")
.addArgument(transaction::toTraceLog)
.addArgument(
() ->
maybeSenderAccount
.map(Account::getAddress)
.map(Address::toString)
.orElse("unknown"))
.log();
return NONCE_TOO_FAR_IN_FUTURE_FOR_SENDER;
}
@ -524,11 +536,12 @@ public abstract class AbstractPendingTransactionsSorter implements PendingTransa
.filter(pendingTx -> pendingTx.getTransaction().getNonce() > invalidNonce)
.peek(
pendingTx ->
traceLambda(
LOG,
"Transaction {} invalid since there is a lower invalid nonce {} for the sender",
pendingTx::toTraceLog,
() -> invalidNonce))
LOG.atTrace()
.setMessage(
"Transaction {} invalid since there is a lower invalid nonce {} for the sender")
.addArgument(pendingTx::toTraceLog)
.addArgument(invalidNonce)
.log())
.map(PendingTransaction::getTransaction)
.collect(Collectors.toList());
}

@ -16,7 +16,6 @@ package org.hyperledger.besu.ethereum.eth.transactions.sorter;
import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toUnmodifiableList;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.core.Block;
@ -108,7 +107,10 @@ public class BaseFeePendingTransactionsSorter extends AbstractPendingTransaction
@Override
protected void removePrioritizedTransaction(final PendingTransaction removedPendingTx) {
if (prioritizedTransactionsDynamicRange.remove(removedPendingTx)) {
traceLambda(LOG, "Removed dynamic range transaction {}", removedPendingTx::toTraceLog);
LOG.atTrace()
.setMessage("Removed dynamic range transaction {}")
.addArgument(removedPendingTx::toTraceLog)
.log();
} else {
removedPendingTx
.getTransaction()
@ -116,8 +118,10 @@ public class BaseFeePendingTransactionsSorter extends AbstractPendingTransaction
.ifPresent(
__ -> {
if (prioritizedTransactionsStaticRange.remove(removedPendingTx)) {
traceLambda(
LOG, "Removed static range transaction {}", removedPendingTx::toTraceLog);
LOG.atTrace()
.setMessage("Removed static range transaction {}")
.addArgument(removedPendingTx::toTraceLog)
.log();
}
});
}
@ -201,11 +205,11 @@ public class BaseFeePendingTransactionsSorter extends AbstractPendingTransaction
kind = "dynamic";
prioritizedTransactionsDynamicRange.add(pendingTransaction);
}
traceLambda(
LOG,
"Adding {} to pending transactions, range type {}",
pendingTransaction::toTraceLog,
kind::toString);
LOG.atTrace()
.setMessage("Adding {} to pending transactions, range type {}")
.addArgument(pendingTransaction::toTraceLog)
.addArgument(kind)
.log();
}
@Override
@ -247,11 +251,11 @@ public class BaseFeePendingTransactionsSorter extends AbstractPendingTransaction
}
public void updateBaseFee(final Wei newBaseFee) {
traceLambda(
LOG,
"Updating base fee from {} to {}",
this.baseFee::toString,
newBaseFee::toShortHexString);
LOG.atTrace()
.setMessage("Updating base fee from {} to {}")
.addArgument(this.baseFee)
.addArgument(newBaseFee::toShortHexString)
.log();
if (this.baseFee.orElse(Wei.ZERO).equals(newBaseFee)) {
return;
}
@ -268,10 +272,10 @@ public class BaseFeePendingTransactionsSorter extends AbstractPendingTransaction
.collect(toUnmodifiableList())
.forEach(
pendingTx -> {
traceLambda(
LOG,
"Moving {} from static to dynamic gas fee paradigm",
pendingTx::toTraceLog);
LOG.atTrace()
.setMessage("Moving {} from static to dynamic gas fee paradigm")
.addArgument(pendingTx::toTraceLog)
.log();
prioritizedTransactionsStaticRange.remove(pendingTx);
prioritizedTransactionsDynamicRange.add(pendingTx);
});
@ -285,10 +289,10 @@ public class BaseFeePendingTransactionsSorter extends AbstractPendingTransaction
.collect(toUnmodifiableList())
.forEach(
pendingTx -> {
traceLambda(
LOG,
"Moving {} from dynamic to static gas fee paradigm",
pendingTx::toTraceLog);
LOG.atTrace()
.setMessage("Moving {} from dynamic to static gas fee paradigm")
.addArgument(pendingTx::toTraceLog)
.log();
prioritizedTransactionsDynamicRange.remove(pendingTx);
prioritizedTransactionsStaticRange.add(pendingTx);
});

@ -16,8 +16,6 @@ package org.hyperledger.besu.ethereum.p2p.discovery;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.tuweni.bytes.Bytes.wrapBuffer;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.ethereum.forkid.ForkIdManager;
@ -210,13 +208,13 @@ public class VertxPeerDiscoveryAgent extends PeerDiscoveryAgent {
if (err instanceof NativeIoException) {
final var nativeErr = (NativeIoException) err;
if (nativeErr.expectedErr() == Errors.ERROR_ENETUNREACH_NEGATIVE) {
debugLambda(
LOG,
"Peer {} is unreachable, native error code {}, packet: {}, stacktrace: {}",
peer::toString,
nativeErr::expectedErr,
() -> wrapBuffer(packet.encode()),
err::toString);
LOG.atDebug()
.setMessage("Peer {} is unreachable, native error code {}, packet: {}, stacktrace: {}")
.addArgument(peer)
.addArgument(nativeErr::expectedErr)
.addArgument(() -> wrapBuffer(packet.encode()))
.addArgument(err)
.log();
} else {
LOG.warn(
"Sending to peer {} failed, native error code {}, packet: {}, stacktrace: {}",
@ -226,12 +224,12 @@ public class VertxPeerDiscoveryAgent extends PeerDiscoveryAgent {
err);
}
} else if (err instanceof SocketException && err.getMessage().contains("unreachable")) {
debugLambda(
LOG,
"Peer {} is unreachable, packet: {}",
peer::toString,
() -> wrapBuffer(packet.encode()),
err::toString);
LOG.atDebug()
.setMessage("Peer {} is unreachable, packet: {}")
.addArgument(peer)
.addArgument(() -> wrapBuffer(packet.encode()))
.addArgument(err)
.log();
} else if (err instanceof SocketException
&& err.getMessage().contentEquals("Operation not permitted")) {
LOG.debug(
@ -243,12 +241,12 @@ public class VertxPeerDiscoveryAgent extends PeerDiscoveryAgent {
"Unsupported address type exception when connecting to peer {}, this is likely due to ipv6 not being enabled at runtime. "
+ "Set logging level to TRACE to see full stacktrace",
peer);
traceLambda(
LOG,
"Sending to peer {} failed, packet: {}, stacktrace: {}",
peer::toString,
() -> wrapBuffer(packet.encode()),
err::toString);
LOG.atTrace()
.setMessage("Sending to peer {} failed, packet: {}, stacktrace: {}")
.addArgument(peer)
.addArgument(() -> wrapBuffer(packet.encode()))
.addArgument(err)
.log();
} else {
LOG.warn(
"Sending to peer {} failed, packet: {}, stacktrace: {}",

@ -17,7 +17,6 @@ package org.hyperledger.besu.ethereum.p2p.rlpx;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.isNull;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.crypto.SECPPublicKey;
@ -270,7 +269,7 @@ public class RlpxAgent {
}
});
traceLambda(LOG, "{}", this::logConnectionsByIdToString);
LOG.atTrace().setMessage("{}").addArgument(this::logConnectionsByIdToString).log();
return connectionFuture.get();
}
@ -285,7 +284,7 @@ public class RlpxAgent {
final PeerConnection peerConnection,
final DisconnectReason disconnectReason,
final boolean initiatedByPeer) {
traceLambda(LOG, "{}", this::logConnectionsByIdToString);
LOG.atTrace().setMessage("{}").addArgument(this::logConnectionsByIdToString).log();
cleanUpPeerConnection(peerConnection.getPeer().getId());
}
@ -431,7 +430,7 @@ public class RlpxAgent {
// Check remote connections again to control for race conditions
enforceRemoteConnectionLimits();
enforceConnectionLimits();
traceLambda(LOG, "{}", this::logConnectionsByIdToString);
LOG.atTrace().setMessage("{}").addArgument(this::logConnectionsByIdToString).log();
}
private boolean shouldLimitRemoteConnections() {

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.permissioning;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
@ -74,12 +72,12 @@ public class AllowlistPersistor {
: Collections.emptyList();
if (!existingValues.containsAll(checkLists)) {
debugLambda(
LOG,
"\n LISTS DO NOT MATCH configFile::",
existingValues::toString,
configurationFilePath::toString);
debugLambda(LOG, "\nLISTS DO NOT MATCH in-memory ::", checkLists::toString);
LOG.atDebug()
.setMessage("\n LISTS DO NOT MATCH configFile::")
.addArgument(existingValues)
.addArgument(configurationFilePath)
.log();
LOG.atDebug().setMessage("\nLISTS DO NOT MATCH in-memory ::").addArgument(checkLists).log();
throw new AllowlistFileSyncException();
}
return true;

@ -14,8 +14,6 @@
*/
package org.hyperledger.besu.ethereum.stratum;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.Quantity;
@ -158,7 +156,7 @@ public class GetWorkProtocol implements StratumProtocol {
@Override
public void setCurrentWorkTask(final PoWSolverInputs input) {
debugLambda(LOG, "setting current stratum work task {}", input::toString);
LOG.atDebug().setMessage("setting current stratum work task {}").addArgument(input).log();
currentInput = input;
final byte[] dagSeed =
DirectAcyclicGraphSeed.dagSeed(currentInput.getBlockNumber(), epochCalculator);

@ -1615,6 +1615,11 @@
<sha256 value="24150c7dbdc8a5bb987cbeb4898ee1515d2893d3f6224acfc563da1a7fd5c283" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.fabric8" name="kubernetes-client-bom" version="5.12.2">
<artifact name="kubernetes-client-bom-5.12.2.pom">
<sha256 value="eaa03c16956568d54a6ba437d49d40cbf0dd8e93977f984318242576b650bc3b" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.github.java-diff-utils" name="java-diff-utils" version="4.0">
<artifact name="java-diff-utils-4.0.jar">
<sha256 value="810232374e76a954949f0e2185cd7d9515addb918cf3da3481f77e07c356b49a" origin="Generated by Gradle"/>
@ -1851,6 +1856,11 @@
<sha256 value="8cdddf6430392501ad572d673894f7c18da8fbd399e2cc38c40d4730f86665e2" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-bom" version="4.1.86.Final">
<artifact name="netty-bom-4.1.86.Final.pom">
<sha256 value="12716c1fe64cf5bdaa7044d37d13aae3a88821b91d4798420c46a74769178afd" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-buffer" version="4.1.74.Final">
<artifact name="netty-buffer-4.1.74.Final.jar">
<sha256 value="755bd3ed50ae48c2f06471e7e92baebf49220ee8966a5e60e3f262edf42d4647" origin="Generated by Gradle"/>
@ -2833,6 +2843,16 @@
<sha256 value="500fadc547247e8950c5d213b7e106a12a3aa05d65b07fdd430219e76c21c3d8" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="jakarta.platform" name="jakarta.jakartaee-bom" version="9.0.0">
<artifact name="jakarta.jakartaee-bom-9.0.0.pom">
<sha256 value="91903d0dd876dec67f8b923f1332ba72bf295b06bd397d18f3af19307ce1a2ce" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="jakarta.platform" name="jakartaee-api-parent" version="9.0.0">
<artifact name="jakartaee-api-parent-9.0.0.pom">
<sha256 value="f65dcf14b6e1d9148e198a390fafe155faca093253dde900307f3e0ea82cad3b" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="javax.annotation" name="javax.annotation-api" version="1.3.2">
<artifact name="javax.annotation-api-1.3.2.jar">
<sha256 value="e04ba5195bcd555dc95650f7cc614d151e4bcd52d29a10b8aa2197f3ab89ab9b" origin="Generated by Gradle"/>
@ -3377,41 +3397,56 @@
<sha256 value="dc7630cf82cb31f4d476216054221affd525746ee96541240f452f7323703022" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.apache.logging.log4j" name="log4j" version="2.17.2">
<artifact name="log4j-2.17.2.pom">
<sha256 value="f4a7e1d387c1fb9b365d4cb36d270ef8634ba49ad205558c4e1cda7d41b523f5" origin="Generated by Gradle"/>
<component group="org.apache.logging" name="logging-parent" version="7">
<artifact name="logging-parent-7.pom">
<sha256 value="e58911dc9fc6b173a10e5aa9edb93c7994ad06602705dd067edcfc6e1e9e172b" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.apache.logging.log4j" name="log4j-api" version="2.17.2">
<artifact name="log4j-api-2.17.2.jar">
<sha256 value="09351b5a03828f369cdcff76f4ed39e6a6fc20f24f046935d0b28ef5152f8ce4" origin="Generated by Gradle"/>
<component group="org.apache.logging.log4j" name="log4j" version="2.19.0">
<artifact name="log4j-2.19.0.pom">
<sha256 value="15624ba1a56dbf864606074532533618fa32b4642f72851fcbe92e136beaec94" origin="Generated by Gradle"/>
</artifact>
<artifact name="log4j-api-2.17.2.pom">
<sha256 value="2b8f3f6d471df17969921a7f0ffda83e0628ab3defc7e5bca2aefed1667629ef" origin="Generated by Gradle"/>
</component>
<component group="org.apache.logging.log4j" name="log4j" version="2.20.0">
<artifact name="log4j-2.20.0.pom">
<sha256 value="9a37b4a8f67e8d41bc2473717a3021633d6a3c3f310579db9ad0be3f24659c69" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.apache.logging.log4j" name="log4j-core" version="2.17.2">
<artifact name="log4j-core-2.17.2.jar">
<sha256 value="5adb34ff4197cd16a8d24f63035856a933cb59562a6888dde86e9450fcfef646" origin="Generated by Gradle"/>
<component group="org.apache.logging.log4j" name="log4j-api" version="2.20.0">
<artifact name="log4j-api-2.20.0.jar">
<sha256 value="2f43eea679ea66f14ca0f13fec2a8600ac124f5a5231dcb4df8393eddcb97550" origin="Generated by Gradle"/>
</artifact>
<artifact name="log4j-core-2.17.2.pom">
<sha256 value="a3891841fac9be34b1b213ab95be30b53646516dcb4bcfa9cc68bec57547da53" origin="Generated by Gradle"/>
<artifact name="log4j-api-2.20.0.pom">
<sha256 value="cd45832a3d6cd2194435c0d63ca015f194968f2fbea4f291553bf7afb84e1637" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.apache.logging.log4j" name="log4j-jul" version="2.17.2">
<artifact name="log4j-jul-2.17.2.jar">
<sha256 value="cec8d204efa87dfc759faf43d6affde1d18fa25b7c6d98ea5ef0737a56ff195c" origin="Generated by Gradle"/>
<component group="org.apache.logging.log4j" name="log4j-bom" version="2.20.0">
<artifact name="log4j-bom-2.20.0.pom">
<sha256 value="f8bb692e95a6b7bda601e87100958e83d0061b7f123250b68128943a195e9da1" origin="Generated by Gradle"/>
</artifact>
<artifact name="log4j-jul-2.17.2.pom">
<sha256 value="d28189a1d0c3e7955147c1ad98365fcede52b7c1cc9a1a336d176b393ccad592" origin="Generated by Gradle"/>
</component>
<component group="org.apache.logging.log4j" name="log4j-core" version="2.20.0">
<artifact name="log4j-core-2.20.0.jar">
<sha256 value="6137df848cdaed9f4d5076f75513c6c85da80b953f4e7acca38098b770763f55" origin="Generated by Gradle"/>
</artifact>
<artifact name="log4j-core-2.20.0.pom">
<sha256 value="de71ac100551f4a077aecad077bd153e71df7aa69c3042d76516cc5cce0871ee" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.apache.logging.log4j" name="log4j-slf4j-impl" version="2.17.2">
<artifact name="log4j-slf4j-impl-2.17.2.jar">
<sha256 value="77912d47190a5d25d583728e048496a92a2cb32308b71d3439931d7719996637" origin="Generated by Gradle"/>
<component group="org.apache.logging.log4j" name="log4j-jul" version="2.20.0">
<artifact name="log4j-jul-2.20.0.jar">
<sha256 value="c9b33dffb40bd00d4889ea4700f79d87a2e4d9f92911a3a008ae18c0bb3fb167" origin="Generated by Gradle"/>
</artifact>
<artifact name="log4j-slf4j-impl-2.17.2.pom">
<sha256 value="77ddd977abdceda90326c578a1ee40be0c7b3cdf8758f859522cf48b3b11c106" origin="Generated by Gradle"/>
<artifact name="log4j-jul-2.20.0.pom">
<sha256 value="d6665964717795bbba970a4db0923a5ccada5b71d4355443a46bdbcb98216e64" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.apache.logging.log4j" name="log4j-slf4j2-impl" version="2.20.0">
<artifact name="log4j-slf4j2-impl-2.20.0.jar">
<sha256 value="b8dd3e4ea9cffa18db5f301cd8c539158662e691efd4701aa87b4d09961bd8b0" origin="Generated by Gradle"/>
</artifact>
<artifact name="log4j-slf4j2-impl-2.20.0.pom">
<sha256 value="b70c7601ec6d248ca508ea233728c862a062da583476821674072fabb38b914e" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.apache.maven" name="maven" version="3.0.4">
@ -3749,6 +3784,11 @@
<sha256 value="4982adf4a6c6c50567b0be3cf494825e6c2a27d8b3871d9e18822a74272b79aa" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.codehaus.groovy" name="groovy-bom" version="3.0.14">
<artifact name="groovy-bom-3.0.14.pom">
<sha256 value="24e0e9b738de7118de9e73560ffd06034bb5cf07ca13a7e03459e88ba9d1ae27" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.codehaus.groovy" name="groovy-xml" version="3.0.10">
<artifact name="groovy-xml-3.0.10.jar">
<sha256 value="bc2ad5113596a3a9bb650da6471f1b891505421782174a7fe60d790edb629e85" origin="Generated by Gradle"/>
@ -3853,6 +3893,11 @@
<sha256 value="5e2bc0e35180467530c55a67aafabe5ef98e060eeb698e9af6d0b0dcddba95cd" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.eclipse.ee4j" name="project" version="1.0.6">
<artifact name="project-1.0.6.pom">
<sha256 value="4e7d8329d8da7dcf30779d824241be145f27108932f5a5a24eb907677bc8d72d" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.eclipse.jetty" name="jetty-alpn-client" version="9.4.46.v20220331">
<artifact name="jetty-alpn-client-9.4.46.v20220331.jar">
<sha256 value="6aa084d161ace87b908829332ae0ca3b1ed2f077c73621b189fdadd21e475141" origin="Generated by Gradle"/>
@ -3911,6 +3956,11 @@
<sha256 value="4b03bdb6992570667728caf93f8493790ed2010549086428b76371c67fd2ab6e" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.eclipse.jetty" name="jetty-bom" version="9.4.50.v20221201">
<artifact name="jetty-bom-9.4.50.v20221201.pom">
<sha256 value="4cde6e533d601eaf8b65acee956b7706c18192f2755d023d7e8d19bb7d5b3943" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.eclipse.jetty" name="jetty-client" version="9.4.46.v20220331">
<artifact name="jetty-client-9.4.46.v20220331.jar">
<sha256 value="19f92ba9896c4d29a83f26a50d9b4e93af988b5c919466f1ad78819bcc40ceee" origin="Generated by Gradle"/>
@ -5292,12 +5342,12 @@
<sha256 value="7e0747751e9b67e19dcb5206f04ea22cc03d250c422426402eadd03513f2c314" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.slf4j" name="slf4j-api" version="1.7.36">
<artifact name="slf4j-api-1.7.36.jar">
<sha256 value="d3ef575e3e4979678dc01bf1dcce51021493b4d11fb7f1be8ad982877c16a1c0" origin="Generated by Gradle"/>
<component group="org.slf4j" name="slf4j-api" version="2.0.6">
<artifact name="slf4j-api-2.0.6.jar">
<sha256 value="2f2a92d410b268139d7d63b75ed25e21995cfe4100c19bf23577cfdbc8077bda" origin="Generated by Gradle"/>
</artifact>
<artifact name="slf4j-api-1.7.36.pom">
<sha256 value="fb046a9c229437928bb11c2d27c8b5d773eb8a25e60cbd253d985210dedc2684" origin="Generated by Gradle"/>
<artifact name="slf4j-api-2.0.6.pom">
<sha256 value="8b4e86c53d2783608f1aea213d906c5bac5c0433e00b19239bb16764b9fa3736" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.slf4j" name="slf4j-parent" version="1.7.30">
@ -5305,9 +5355,9 @@
<sha256 value="11647956e48a0c5bfb3ac33f6da7e83f341002b6857efd335a505b687be34b75" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.slf4j" name="slf4j-parent" version="1.7.36">
<artifact name="slf4j-parent-1.7.36.pom">
<sha256 value="bb388d37fbcdd3cde64c3cede21838693218dc451f04040c5df360a78ed7e812" origin="Generated by Gradle"/>
<component group="org.slf4j" name="slf4j-parent" version="2.0.6">
<artifact name="slf4j-parent-2.0.6.pom">
<sha256 value="1482650cbe31e408c1dc89021cbae1d3044ad4a01bf8f62ba360b6a813a13124" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.sonatype.forge" name="forge-parent" version="4">
@ -5367,6 +5417,14 @@
<sha256 value="f31a8b6f59b6a0180e38c9c129ba0607bae7a0d4a10b95375770c814ab4cc753" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.springframework" name="spring-framework-bom" version="5.3.24">
<artifact name="spring-framework-bom-5.3.24.module">
<sha256 value="1996e1f617cb03fa76ea11850fe2a1e20b0e30a1046ba6d5db3bdbbf44113fce" origin="Generated by Gradle"/>
</artifact>
<artifact name="spring-framework-bom-5.3.24.pom">
<sha256 value="535213566bbbefe2636a0d4e8dd1a73ade612e2430c8ffd310dcc2a3b3b9ba41" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="org.springframework" name="spring-parent" version="3.1.3.RELEASE">
<artifact name="spring-parent-3.1.3.RELEASE.pom">
<sha256 value="64e9110118f82a142759a316d09d3d8d8d717d5d95079d7f6b388ee479fa782f" origin="Generated by Gradle"/>

@ -125,10 +125,10 @@ dependencyManagement {
dependency 'org.apache.commons:commons-lang3:3.12.0'
dependency 'org.apache.commons:commons-text:1.10.0'
dependency 'org.apache.logging.log4j:log4j-api:2.17.2'
dependency 'org.apache.logging.log4j:log4j-core:2.17.2'
dependency 'org.apache.logging.log4j:log4j-jul:2.17.2'
dependency 'org.apache.logging.log4j:log4j-slf4j-impl:2.17.2'
dependency 'org.apache.logging.log4j:log4j-api:2.20.0'
dependency 'org.apache.logging.log4j:log4j-core:2.20.0'
dependency 'org.apache.logging.log4j:log4j-jul:2.20.0'
dependency 'org.apache.logging.log4j:log4j-slf4j2-impl:2.20.0'
dependency 'org.apache.tuweni:tuweni-bytes:2.3.1'
dependency 'org.apache.tuweni:tuweni-config:2.3.1'
@ -192,7 +192,7 @@ dependencyManagement {
dependency 'org.rocksdb:rocksdbjni:7.7.3'
dependency 'org.slf4j:slf4j-api:1.7.36'
dependency 'org.slf4j:slf4j-api:2.0.6'
dependency 'org.springframework.security:spring-security-crypto:5.7.2'

@ -34,7 +34,7 @@ dependencies {
implementation 'com.google.guava:guava'
implementation 'org.apache.commons:commons-lang3'
implementation 'org.apache.logging.log4j:log4j-core'
implementation 'org.apache.logging.log4j:log4j-slf4j-impl'
implementation 'org.apache.logging.log4j:log4j-slf4j2-impl'
implementation 'org.xerial.snappy:snappy-java'
testImplementation 'junit:junit'

@ -1,85 +0,0 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.util;
import java.util.Arrays;
import java.util.function.Supplier;
import org.slf4j.Logger;
/**
* Static helper class to shim SLF4J with lambda parameter suppliers until the final release of
* SLF4J 2.0.
*/
public class Slf4jLambdaHelper {
private Slf4jLambdaHelper() {}
/**
* Warn lambda.
*
* @param log the log
* @param message the message
* @param params the params
*/
public static void warnLambda(
final Logger log, final String message, final Supplier<?>... params) {
if (log.isWarnEnabled()) {
log.warn(message, Arrays.stream(params).map(Supplier::get).toArray());
}
}
/**
* Info lambda.
*
* @param log the log
* @param message the message
* @param params the params
*/
public static void infoLambda(
final Logger log, final String message, final Supplier<?>... params) {
if (log.isInfoEnabled()) {
log.info(message, Arrays.stream(params).map(Supplier::get).toArray());
}
}
/**
* Debug lambda.
*
* @param log the log
* @param message the message
* @param params the params
*/
public static void debugLambda(
final Logger log, final String message, final Supplier<?>... params) {
if (log.isDebugEnabled()) {
log.debug(message, Arrays.stream(params).map(Supplier::get).toArray());
}
}
/**
* Trace lambda.
*
* @param log the log
* @param message the message
* @param params the params
*/
public static void traceLambda(
final Logger log, final String message, final Supplier<?>... params) {
if (log.isTraceEnabled()) {
log.trace(message, Arrays.stream(params).map(Supplier::get).toArray());
}
}
}

@ -1,94 +0,0 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.util;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.traceLambda;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.warnLambda;
import java.util.ArrayDeque;
import java.util.function.Supplier;
import org.apache.logging.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Slf4jLambdaHelperTest {
private static final Logger LOG = LoggerFactory.getLogger(Slf4jLambdaHelperTest.class);
private static final ArrayDeque<String> paramStack = new ArrayDeque<>();
@Before
public void paramSetup() {
paramStack.push("stuff");
paramStack.push("more stuff");
paramStack.push("last stuff");
}
@Test
public void smokeDebugLambda() {
Log4j2ConfiguratorUtil.setLevel(LOG.getName(), Level.WARN);
debugLambda(
LOG,
"blah",
(Supplier<String>)
() -> {
throw new RuntimeException("should not evaluate");
});
Log4j2ConfiguratorUtil.setLevelDebug(LOG.getName());
assertThat(paramStack.size()).isEqualTo(3);
debugLambda(LOG, "blah {}", paramStack::pop);
assertThat(paramStack.size()).isEqualTo(2);
debugLambda(LOG, "blah {} {}", paramStack::pop, paramStack::pop);
assertThat(paramStack.size()).isZero();
}
@Test
public void smokeTraceLambda() {
traceLambda(
LOG,
"blah",
(Supplier<String>)
() -> {
throw new RuntimeException("should not evaluate");
});
Log4j2ConfiguratorUtil.setLevel(LOG.getName(), Level.TRACE);
assertThat(paramStack.size()).isEqualTo(3);
traceLambda(LOG, "blah {}", paramStack::pop);
assertThat(paramStack.size()).isEqualTo(2);
traceLambda(LOG, "blah {} {}", paramStack::pop, paramStack::pop);
assertThat(paramStack.size()).isZero();
}
@Test
public void smokeWarnLambda() {
Log4j2ConfiguratorUtil.setLevel(LOG.getName(), Level.OFF);
traceLambda(
LOG,
"blah",
(Supplier<String>)
() -> {
throw new RuntimeException("should not evaluate");
});
Log4j2ConfiguratorUtil.setLevel(LOG.getName(), Level.WARN);
assertThat(paramStack.size()).isEqualTo(3);
warnLambda(LOG, "blah {}", paramStack::pop);
assertThat(paramStack.size()).isEqualTo(2);
warnLambda(LOG, "blah {} {}", paramStack::pop, paramStack::pop);
assertThat(paramStack.size()).isZero();
}
}
Loading…
Cancel
Save