When on PoS the head can be only be updated by ForkchoiceUpdate (#4013)

* When executing a newPayload do not move the chain head or update the world state
* When proposing a block, use a lightweight validation, without storing
* forwardToBlock moves head to the block and triggers advanced head event
* Do not persist prepared blocks
Signed-off-by: Fabio Di Fabio <fabio.difabio@consensys.net>
Co-authored-by: garyschulte <garyschulte@gmail.com>
Co-authored-by: Jiri Peinlich <jiri.peinlich@gmail.com>
Co-authored-by: Karim TAAM <karim.t2am@gmail.com>
pull/4044/head
Fabio Di Fabio 2 years ago committed by GitHub
parent b5fa62c0bf
commit 90f891b78c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      CHANGELOG.md
  2. 69
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/blockcreation/MergeCoordinator.java
  3. 4
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/blockcreation/MergeMiningCoordinator.java
  4. 9
      consensus/merge/src/main/java/org/hyperledger/besu/consensus/merge/blockcreation/TransitionCoordinator.java
  5. 102
      consensus/merge/src/test/java/org/hyperledger/besu/consensus/merge/blockcreation/MergeCoordinatorTest.java
  6. 25
      consensus/merge/src/test/java/org/hyperledger/besu/consensus/merge/blockcreation/MergeReorgTest.java
  7. 4
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineForkchoiceUpdated.java
  8. 2
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineNewPayload.java
  9. 4
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/methods/engine/EngineNewPayloadTest.java
  10. 7
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/BlockValidator.java
  11. 14
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/MainnetBlockValidator.java
  12. 19
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiInMemoryWorldState.java
  13. 1
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiLayeredWorldState.java
  14. 97
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiPersistedWorldState.java
  15. 51
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiWorldStateArchive.java
  16. 1
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/bonsai/BonsaiWorldStateKeyValueStorage.java
  17. 36
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/chain/BlockAddedEvent.java
  18. 103
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/chain/DefaultBlockchain.java
  19. 24
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/chain/MutableBlockchain.java
  20. 3
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/mainnet/AbstractBlockProcessor.java
  21. 1
      ethereum/core/src/main/java/org/hyperledger/besu/ethereum/worldstate/WorldStateArchive.java
  22. 26
      ethereum/core/src/test/java/org/hyperledger/besu/ethereum/bonsai/BonsaiWorldStateArchiveTest.java

@ -3,6 +3,8 @@
## 22.7.1
### Additions and Improvements
- Do not require a minimum block height when downloading headers or blocks [#3911](https://github.com/hyperledger/besu/pull/3911)
- When on PoS the head can be only be updated by ForkchoiceUpdate [#3994](https://github.com/hyperledger/besu/pull/3994)
- Version information available in metrics [#3997](https://github.com/hyperledger/besu/pull/3997)
- Add TTD and DNS to Sepolia config [#4024](https://github.com/hyperledger/besu/pull/4024)
- Return `type` with value `0x0` when serializing legacy transactions [#4027](https://github.com/hyperledger/besu/pull/4027)
@ -18,7 +20,6 @@
- Support `finalized` and `safe` as tags for the block parameter in RPC APIs [#3950](https://github.com/hyperledger/besu/pull/3950)
- Added verification of payload attributes in ForkchoiceUpdated [#3837](https://github.com/hyperledger/besu/pull/3837)
- Add support for Gray Glacier hardfork [#3961](https://github.com/hyperledger/besu/issues/3961)
- Do not require a minimum block height when downloading headers or blocks [#3911](https://github.com/hyperledger/besu/pull/3911)
### Bug Fixes
- alias engine-rpc-port parameter with the former rpc param name [#3958](https://github.com/hyperledger/besu/pull/3958)

@ -174,7 +174,7 @@ public class MergeCoordinator implements MergeMiningCoordinator {
final Block emptyBlock =
mergeBlockCreator.createBlock(Optional.of(Collections.emptyList()), random, timestamp);
Result result = executeBlock(emptyBlock);
Result result = validateBlock(emptyBlock);
if (result.blockProcessingOutputs.isPresent()) {
mergeContext.putPayloadById(payloadIdentifier, emptyBlock);
} else {
@ -193,7 +193,7 @@ public class MergeCoordinator implements MergeMiningCoordinator {
if (throwable != null) {
LOG.warn("something went wrong creating block", throwable);
} else {
final var resultBest = executeBlock(bestBlock);
final var resultBest = validateBlock(bestBlock);
if (resultBest.blockProcessingOutputs.isPresent()) {
mergeContext.putPayloadById(payloadIdentifier, bestBlock);
} else {
@ -239,7 +239,7 @@ public class MergeCoordinator implements MergeMiningCoordinator {
}
@Override
public Result executeBlock(final Block block) {
public Result validateBlock(final Block block) {
final var chain = protocolContext.getBlockchain();
chain
@ -254,11 +254,14 @@ public class MergeCoordinator implements MergeMiningCoordinator {
.getByBlockNumber(block.getHeader().getNumber())
.getBlockValidator()
.validateAndProcessBlock(
protocolContext, block, HeaderValidationMode.FULL, HeaderValidationMode.NONE);
protocolContext,
block,
HeaderValidationMode.FULL,
HeaderValidationMode.NONE,
false);
validationResult.blockProcessingOutputs.ifPresentOrElse(
result -> chain.appendBlock(block, result.receipts),
() ->
validationResult.errorMessage.ifPresent(
errMsg ->
protocolSchedule
.getByBlockNumber(chain.getChainHeadBlockNumber())
.getBadBlocksManager()
@ -267,6 +270,16 @@ public class MergeCoordinator implements MergeMiningCoordinator {
return validationResult;
}
@Override
public Result rememberBlock(final Block block) {
debugLambda(LOG, "Remember block {}", block::toLogString);
final var chain = protocolContext.getBlockchain();
final var validationResult = validateBlock(block);
validationResult.blockProcessingOutputs.ifPresent(
result -> chain.storeBlock(block, result.receipts));
return validationResult;
}
@Override
public ForkchoiceResult updateForkChoice(
final BlockHeader newHead,
@ -284,8 +297,8 @@ public class MergeCoordinator implements MergeMiningCoordinator {
return ForkchoiceResult.withFailure(
INVALID, "new head timestamp not greater than parent", latestValid);
}
// set the new head
blockchain.rewindToBlock(newHead.getHash());
setNewHead(blockchain, newHead);
// set and persist the new finalized block if it is present
newFinalized.ifPresent(
@ -310,6 +323,44 @@ public class MergeCoordinator implements MergeMiningCoordinator {
return ForkchoiceResult.withResult(newFinalized, Optional.of(newHead));
}
private boolean setNewHead(final MutableBlockchain blockchain, final BlockHeader newHead) {
if (newHead.getHash().equals(blockchain.getChainHeadHash())) {
debugLambda(LOG, "Nothing to do new head {} is already chain head", newHead::toLogString);
return true;
}
if (newHead.getParentHash().equals(blockchain.getChainHeadHash())) {
debugLambda(
LOG,
"Forwarding chain head to the block {} saved from a previous newPayload invocation",
newHead::toLogString);
forwardWorldStateTo(newHead);
return blockchain.forwardToBlock(newHead);
}
debugLambda(LOG, "New head {} is a chain reorg, rewind chain head to it", newHead::toLogString);
return blockchain.rewindToBlock(newHead.getHash());
}
private void forwardWorldStateTo(final BlockHeader newHead) {
protocolContext
.getWorldStateArchive()
.getMutable(newHead.getStateRoot(), newHead.getHash())
.ifPresentOrElse(
mutableWorldState ->
debugLambda(
LOG,
"World state for state root hash {} and block hash {} persisted successfully",
mutableWorldState::rootHash,
newHead::getHash),
() ->
LOG.error(
"Could not persist world for root hash {} and block hash {}",
newHead.getStateRoot(),
newHead.getHash()));
}
@Override
public boolean latestValidAncestorDescendsFromTerminal(final BlockHeader blockHeader) {
if (blockHeader.getNumber() <= 1L) {

@ -35,7 +35,9 @@ public interface MergeMiningCoordinator extends MiningCoordinator {
final Bytes32 random,
final Address feeRecipient);
Result executeBlock(final Block block);
Result rememberBlock(final Block block);
Result validateBlock(final Block block);
ForkchoiceResult updateForkChoice(
final BlockHeader newHead,

@ -133,8 +133,13 @@ public class TransitionCoordinator extends TransitionUtils<MiningCoordinator>
}
@Override
public Result executeBlock(final Block block) {
return mergeCoordinator.executeBlock(block);
public Result rememberBlock(final Block block) {
return mergeCoordinator.rememberBlock(block);
}
@Override
public Result validateBlock(final Block block) {
return mergeCoordinator.validateBlock(block);
}
@Override

@ -128,18 +128,17 @@ public class MergeCoordinatorTest implements MergeGenesisConfigHelper {
@Test
public void childTimestampExceedsParentsFails() {
BlockHeader terminalHeader = terminalPowBlock();
coordinator.executeBlock(new Block(terminalHeader, BlockBody.empty()));
sendNewPayloadAndForkchoiceUpdate(
new Block(terminalHeader, BlockBody.empty()), Optional.empty(), Hash.ZERO);
BlockHeader parentHeader = nextBlockHeader(terminalHeader);
Block parent = new Block(parentHeader, BlockBody.empty());
coordinator.executeBlock(parent);
sendNewPayloadAndForkchoiceUpdate(parent, Optional.empty(), terminalHeader.getHash());
BlockHeader childHeader = nextBlockHeader(parentHeader, parentHeader.getTimestamp());
Block child = new Block(childHeader, BlockBody.empty());
coordinator.executeBlock(child);
coordinator.rememberBlock(child);
ForkchoiceResult result =
coordinator.updateForkChoice(
@ -161,42 +160,39 @@ public class MergeCoordinatorTest implements MergeGenesisConfigHelper {
@Test
public void latestValidAncestorDescendsFromTerminal() {
BlockHeader terminalHeader = terminalPowBlock();
coordinator.executeBlock(new Block(terminalHeader, BlockBody.empty()));
sendNewPayloadAndForkchoiceUpdate(
new Block(terminalHeader, BlockBody.empty()), Optional.empty(), Hash.ZERO);
BlockHeader parentHeader = nextBlockHeader(terminalHeader);
Block parent = new Block(parentHeader, BlockBody.empty());
coordinator.executeBlock(parent);
sendNewPayloadAndForkchoiceUpdate(parent, Optional.empty(), terminalHeader.getHash());
BlockHeader childHeader = nextBlockHeader(parentHeader);
Block child = new Block(childHeader, BlockBody.empty());
coordinator.executeBlock(child);
coordinator.validateBlock(child);
assertThat(this.coordinator.latestValidAncestorDescendsFromTerminal(child.getHeader()))
.isTrue();
}
@Test
public void latestValidAncestorDescendsFromFinalizedBlock() {
BlockHeader terminalHeader = terminalPowBlock();
coordinator.executeBlock(new Block(terminalHeader, BlockBody.empty()));
sendNewPayloadAndForkchoiceUpdate(
new Block(terminalHeader, BlockBody.empty()), Optional.empty(), Hash.ZERO);
BlockHeader grandParentHeader = nextBlockHeader(terminalHeader);
Block grandParent = new Block(grandParentHeader, BlockBody.empty());
coordinator.executeBlock(grandParent);
when(mergeContext.getFinalized()).thenReturn(Optional.of(grandParentHeader));
sendNewPayloadAndForkchoiceUpdate(grandParent, Optional.empty(), terminalHeader.getHash());
BlockHeader parentHeader = nextBlockHeader(grandParentHeader);
Block parent = new Block(parentHeader, BlockBody.empty());
coordinator.executeBlock(parent);
sendNewPayloadAndForkchoiceUpdate(
parent, Optional.of(grandParentHeader), grandParentHeader.getHash());
BlockHeader childHeader = nextBlockHeader(parentHeader);
Block child = new Block(childHeader, BlockBody.empty());
coordinator.executeBlock(child);
coordinator.validateBlock(child);
assertThat(this.coordinator.latestValidAncestorDescendsFromTerminal(child.getHeader()))
.isTrue();
@ -205,25 +201,19 @@ public class MergeCoordinatorTest implements MergeGenesisConfigHelper {
@Test
public void updateForkChoiceShouldPersistFirstFinalizedBlockHash() {
when(mergeContext.getFinalized()).thenReturn(Optional.empty());
BlockHeader terminalHeader = terminalPowBlock();
coordinator.executeBlock(new Block(terminalHeader, BlockBody.empty()));
sendNewPayloadAndForkchoiceUpdate(
new Block(terminalHeader, BlockBody.empty()), Optional.empty(), Hash.ZERO);
BlockHeader firstFinalizedHeader = nextBlockHeader(terminalHeader);
Block firstFinalizedBlock = new Block(firstFinalizedHeader, BlockBody.empty());
coordinator.executeBlock(firstFinalizedBlock);
sendNewPayloadAndForkchoiceUpdate(
firstFinalizedBlock, Optional.empty(), terminalHeader.getHash());
BlockHeader headBlockHeader = nextBlockHeader(firstFinalizedHeader);
Block headBlock = new Block(headBlockHeader, BlockBody.empty());
coordinator.executeBlock(headBlock);
coordinator.updateForkChoice(
headBlockHeader,
firstFinalizedBlock.getHash(),
firstFinalizedBlock.getHash(),
Optional.empty());
sendNewPayloadAndForkchoiceUpdate(
headBlock, Optional.of(firstFinalizedHeader), firstFinalizedHeader.getHash());
verify(blockchain).setFinalized(firstFinalizedBlock.getHash());
verify(mergeContext).setFinalized(firstFinalizedHeader);
@ -234,27 +224,23 @@ public class MergeCoordinatorTest implements MergeGenesisConfigHelper {
@Test
public void updateForkChoiceShouldPersistLastFinalizedBlockHash() {
BlockHeader terminalHeader = terminalPowBlock();
coordinator.executeBlock(new Block(terminalHeader, BlockBody.empty()));
sendNewPayloadAndForkchoiceUpdate(
new Block(terminalHeader, BlockBody.empty()), Optional.empty(), Hash.ZERO);
BlockHeader prevFinalizedHeader = nextBlockHeader(terminalHeader);
Block prevFinalizedBlock = new Block(prevFinalizedHeader, BlockBody.empty());
coordinator.executeBlock(prevFinalizedBlock);
when(mergeContext.getFinalized()).thenReturn(Optional.of(prevFinalizedHeader));
sendNewPayloadAndForkchoiceUpdate(
prevFinalizedBlock, Optional.empty(), terminalHeader.getHash());
BlockHeader lastFinalizedHeader = nextBlockHeader(prevFinalizedHeader);
Block lastFinalizedBlock = new Block(lastFinalizedHeader, BlockBody.empty());
coordinator.executeBlock(lastFinalizedBlock);
sendNewPayloadAndForkchoiceUpdate(
lastFinalizedBlock, Optional.of(prevFinalizedHeader), prevFinalizedHeader.getHash());
BlockHeader headBlockHeader = nextBlockHeader(lastFinalizedHeader);
Block headBlock = new Block(headBlockHeader, BlockBody.empty());
coordinator.executeBlock(headBlock);
coordinator.updateForkChoice(
headBlockHeader,
lastFinalizedBlock.getHash(),
lastFinalizedBlock.getHash(),
Optional.empty());
sendNewPayloadAndForkchoiceUpdate(
headBlock, Optional.of(lastFinalizedHeader), lastFinalizedHeader.getHash());
verify(blockchain).setFinalized(lastFinalizedBlock.getHash());
verify(mergeContext).setFinalized(lastFinalizedHeader);
@ -427,21 +413,23 @@ public class MergeCoordinatorTest implements MergeGenesisConfigHelper {
@Test
public void invalidPayloadShouldReturnErrorAndUpdateForkchoiceState() {
BlockHeader terminalHeader = terminalPowBlock();
coordinator.executeBlock(new Block(terminalHeader, BlockBody.empty()));
sendNewPayloadAndForkchoiceUpdate(
new Block(terminalHeader, BlockBody.empty()), Optional.empty(), Hash.ZERO);
BlockHeader prevFinalizedHeader = nextBlockHeader(terminalHeader);
Block prevFinalizedBlock = new Block(prevFinalizedHeader, BlockBody.empty());
coordinator.executeBlock(prevFinalizedBlock);
when(mergeContext.getFinalized()).thenReturn(Optional.of(prevFinalizedHeader));
sendNewPayloadAndForkchoiceUpdate(
prevFinalizedBlock, Optional.empty(), terminalHeader.getHash());
BlockHeader lastFinalizedHeader = nextBlockHeader(prevFinalizedHeader);
Block lastFinalizedBlock = new Block(lastFinalizedHeader, BlockBody.empty());
coordinator.executeBlock(lastFinalizedBlock);
sendNewPayloadAndForkchoiceUpdate(
lastFinalizedBlock, Optional.of(prevFinalizedHeader), prevFinalizedHeader.getHash());
BlockHeader headBlockHeader = nextBlockHeader(lastFinalizedHeader);
Block headBlock = new Block(headBlockHeader, BlockBody.empty());
coordinator.executeBlock(headBlock);
assertThat(coordinator.rememberBlock(headBlock).blockProcessingOutputs).isPresent();
var res =
coordinator.updateForkChoice(
@ -461,6 +449,23 @@ public class MergeCoordinatorTest implements MergeGenesisConfigHelper {
verify(mergeContext).setSafeBlock(lastFinalizedHeader);
}
private void sendNewPayloadAndForkchoiceUpdate(
final Block block, final Optional<BlockHeader> finalizedHeader, final Hash safeHash) {
assertThat(coordinator.rememberBlock(block).blockProcessingOutputs).isPresent();
assertThat(
coordinator
.updateForkChoice(
block.getHeader(),
finalizedHeader.map(BlockHeader::getHash).orElse(Hash.ZERO),
safeHash,
Optional.empty())
.isValid())
.isTrue();
when(mergeContext.getFinalized()).thenReturn(finalizedHeader);
}
private BlockHeader terminalPowBlock() {
return headerGenerator
.difficulty(Difficulty.MAX_VALUE)
@ -472,6 +477,7 @@ public class MergeCoordinatorTest implements MergeGenesisConfigHelper {
genesisState.getBlock().getHeader().getBaseFee().orElse(Wei.of(0x3b9aca00)),
0,
15000000l))
.timestamp(1)
.gasLimit(genesisState.getBlock().getHeader().getGasLimit())
.stateRoot(genesisState.getBlock().getHeader().getStateRoot())
.buildHeader();

@ -24,6 +24,7 @@ import org.hyperledger.besu.consensus.merge.MergeContext;
import org.hyperledger.besu.consensus.merge.PostMergeContext;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.BlockValidator.Result;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.chain.GenesisState;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
@ -101,20 +102,19 @@ public class MergeReorgTest implements MergeGenesisConfigHelper {
then you can and should be able to re-org to a different pre-TTD block
say there is viable TTD block A and B, then we can have a PoS chain build on A for a while
and then see another PoS chain build on B that has a higher fork choice weight and causes a re-org
once any post-merge PoS chain is finalied though, you'd never re-org any PoW blocks in the tree ever again */
once any post-merge PoS chain is finalized though, you'd never re-org any PoW blocks in the tree ever again */
@Test
public void reorgsAcrossTDDToDifferentTargetsWhenNotFinal() {
// Add N blocks to chain from genesis, where total diff is < TTD
Log4j2ConfiguratorUtil.setLevelDebug(BlockHeaderValidator.class.getName());
List<Block> endOfWork = subChain(genesisState.getBlock().getHeader(), 10, Difficulty.of(100L));
endOfWork.stream().forEach(coordinator::executeBlock);
endOfWork.stream().forEach(this::appendBlock);
assertThat(blockchain.getChainHead().getHeight()).isEqualTo(10L);
BlockHeader tddPenultimate = this.blockchain.getChainHeadHeader();
// Add TTD block A to chain as child of N.
Block ttdA = new Block(terminalPowBlock(tddPenultimate, Difficulty.ONE), BlockBody.empty());
boolean worked = coordinator.executeBlock(ttdA).blockProcessingOutputs.isPresent();
assertThat(worked).isTrue();
appendBlock(ttdA);
assertThat(blockchain.getChainHead().getHeight()).isEqualTo(11L);
assertThat(blockchain.getTotalDifficultyByHash(ttdA.getHash())).isPresent();
Difficulty tdd = blockchain.getTotalDifficultyByHash(ttdA.getHash()).get();
@ -127,16 +127,15 @@ public class MergeReorgTest implements MergeGenesisConfigHelper {
.toBigInteger());
assertThat(mergeContext.isPostMerge()).isTrue();
List<Block> builtOnTTDA = subChain(ttdA.getHeader(), 5, Difficulty.of(0L));
builtOnTTDA.stream().forEach(coordinator::executeBlock);
builtOnTTDA.stream().forEach(this::appendBlock);
assertThat(blockchain.getChainHead().getHeight()).isEqualTo(16);
assertThat(blockchain.getChainHead().getHash())
.isEqualTo(builtOnTTDA.get(builtOnTTDA.size() - 1).getHash());
Block ttdB = new Block(terminalPowBlock(tddPenultimate, Difficulty.of(2L)), BlockBody.empty());
worked = coordinator.executeBlock(ttdB).blockProcessingOutputs.isPresent();
assertThat(worked).isTrue();
appendBlock(ttdB);
List<Block> builtOnTTDB = subChain(ttdB.getHeader(), 10, Difficulty.of(0L));
builtOnTTDB.stream().forEach(coordinator::executeBlock);
builtOnTTDB.stream().forEach(this::appendBlock);
assertThat(blockchain.getChainHead().getHeight()).isEqualTo(21);
assertThat(blockchain.getChainHead().getHash())
.isEqualTo(builtOnTTDB.get(builtOnTTDB.size() - 1).getHash());
@ -145,6 +144,16 @@ public class MergeReorgTest implements MergeGenesisConfigHelper {
}
private void appendBlock(final Block block) {
final Result result = coordinator.validateBlock(block);
result.blockProcessingOutputs.ifPresentOrElse(
outputs -> blockchain.appendBlock(block, outputs.receipts),
() -> {
throw new RuntimeException(result.errorMessage.get());
});
}
private List<Block> subChain(
final BlockHeader parentHeader, final long length, final Difficulty each) {
BlockHeader newParent = parentHeader;

@ -87,9 +87,9 @@ public class EngineForkchoiceUpdated extends ExecutionEngineJsonRpcMethod {
Optional.ofNullable(forkChoice.getHeadBlockHash())
.filter(hash -> !hash.equals(Hash.ZERO))
.ifPresent(
blockhash ->
blockHash ->
mergeCoordinator.getOrSyncHeaderByHash(
blockhash, forkChoice.getFinalizedBlockHash()));
blockHash, forkChoice.getFinalizedBlockHash()));
return syncingResponse(requestId);
}

@ -176,7 +176,7 @@ public class EngineNewPayload extends ExecutionEngineJsonRpcMethod {
}
// execute block and return result response
final BlockValidator.Result executionResult = mergeCoordinator.executeBlock(block);
final BlockValidator.Result executionResult = mergeCoordinator.rememberBlock(block);
if (executionResult.errorMessage.isEmpty()) {
return respondWith(reqId, newBlockHeader.getHash(), VALID);

@ -96,7 +96,7 @@ public class EngineNewPayloadTest {
.thenReturn(true);
when(mergeCoordinator.getOrSyncHeaderByHash(any(Hash.class)))
.thenReturn(Optional.of(mockHeader));
when(mergeCoordinator.executeBlock(any()))
when(mergeCoordinator.rememberBlock(any()))
.thenReturn(new Result(new BlockProcessingOutputs(null, List.of())));
var resp = resp(mockPayload(mockHeader, Collections.emptyList()));
@ -117,7 +117,7 @@ public class EngineNewPayloadTest {
.thenReturn(true);
when(mergeCoordinator.getOrSyncHeaderByHash(any(Hash.class)))
.thenReturn(Optional.of(mockHeader));
when(mergeCoordinator.executeBlock(any())).thenReturn(new Result("error 42"));
when(mergeCoordinator.rememberBlock(any())).thenReturn(new Result("error 42"));
var resp = resp(mockPayload(mockHeader, Collections.emptyList()));

@ -56,6 +56,13 @@ public interface BlockValidator {
final HeaderValidationMode headerValidationMode,
final HeaderValidationMode ommerValidationMode);
Result validateAndProcessBlock(
final ProtocolContext context,
final Block block,
final HeaderValidationMode headerValidationMode,
final HeaderValidationMode ommerValidationMode,
final boolean shouldPersist);
boolean fastBlockValidation(
final ProtocolContext context,
final Block block,

@ -68,6 +68,16 @@ public class MainnetBlockValidator implements BlockValidator {
final Block block,
final HeaderValidationMode headerValidationMode,
final HeaderValidationMode ommerValidationMode) {
return validateAndProcessBlock(context, block, headerValidationMode, ommerValidationMode, true);
}
@Override
public BlockValidator.Result validateAndProcessBlock(
final ProtocolContext context,
final Block block,
final HeaderValidationMode headerValidationMode,
final HeaderValidationMode ommerValidationMode,
final boolean shouldPersist) {
final BlockHeader header = block.getHeader();
@ -88,6 +98,7 @@ public class MainnetBlockValidator implements BlockValidator {
context
.getWorldStateArchive()
.getMutable(parentHeader.getStateRoot(), parentHeader.getHash());
if (maybeWorldState.isEmpty()) {
return handleAndReportFailure(
block,
@ -95,7 +106,8 @@ public class MainnetBlockValidator implements BlockValidator {
+ parentHeader.getStateRoot()
+ " is not available");
}
final MutableWorldState worldState = maybeWorldState.get();
final MutableWorldState worldState =
shouldPersist ? maybeWorldState.get() : maybeWorldState.get().copy();
final BlockProcessor.Result result = processBlock(context, worldState, block);
if (result.isFailed()) {

@ -30,13 +30,24 @@ public class BonsaiInMemoryWorldState extends BonsaiPersistedWorldState {
@Override
public Hash rootHash() {
final BonsaiWorldStateKeyValueStorage.Updater updater = worldStateStorage.updater();
final Hash calculatedRootHash = calculateRootHash(updater);
updater.rollback();
return Hash.wrap(calculatedRootHash);
try {
final Hash calculatedRootHash = calculateRootHash(updater);
return Hash.wrap(calculatedRootHash);
} finally {
updater.rollback();
}
}
@Override
public void persist(final BlockHeader blockHeader) {
throw new UnsupportedOperationException("In Memory worldState can not be persisted.");
final BonsaiWorldStateUpdater localUpdater = updater.copy();
try {
final Hash newWorldStateRootHash = rootHash();
prepareTrieLog(blockHeader, localUpdater, newWorldStateRootHash);
worldStateBlockHash = blockHeader.getHash();
worldStateRootHash = newWorldStateRootHash;
} finally {
localUpdater.reset();
}
}
}

@ -37,7 +37,6 @@ import org.apache.tuweni.units.bigints.UInt256;
/** A World State backed first by trie log layer and then by another world state. */
public class BonsaiLayeredWorldState implements MutableWorldState, BonsaiWorldView, WorldState {
private Optional<BonsaiWorldView> nextWorldView;
protected final long height;
protected final TrieLogLayer trieLog;

@ -19,6 +19,7 @@ package org.hyperledger.besu.ethereum.bonsai;
import static org.hyperledger.besu.ethereum.bonsai.BonsaiAccount.fromRLP;
import static org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStorage.WORLD_BLOCK_HASH_KEY;
import static org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
@ -50,11 +51,11 @@ public class BonsaiPersistedWorldState implements MutableWorldState, BonsaiWorld
protected final BonsaiWorldStateKeyValueStorage worldStateStorage;
private final BonsaiWorldStateArchive archive;
private final BonsaiWorldStateUpdater updater;
protected final BonsaiWorldStateArchive archive;
protected final BonsaiWorldStateUpdater updater;
private Hash worldStateRootHash;
private Hash worldStateBlockHash;
protected Hash worldStateRootHash;
protected Hash worldStateBlockHash;
public BonsaiPersistedWorldState(
final BonsaiWorldStateArchive archive,
@ -238,52 +239,32 @@ public class BonsaiPersistedWorldState implements MutableWorldState, BonsaiWorld
@Override
public void persist(final BlockHeader blockHeader) {
final Optional<BlockHeader> maybeBlockHeader = Optional.ofNullable(blockHeader);
debugLambda(LOG, "Persist world state for block {}", maybeBlockHeader::toString);
boolean success = false;
final BonsaiWorldStateUpdater localUpdater = updater.copy();
final Hash originalBlockHash = worldStateBlockHash;
final Hash originalRootHash = worldStateRootHash;
final BonsaiWorldStateKeyValueStorage.Updater stateUpdater = worldStateStorage.updater();
try {
worldStateRootHash = calculateRootHash(stateUpdater, localUpdater);
stateUpdater
.getTrieBranchStorageTransaction()
.put(WORLD_ROOT_HASH_KEY, worldStateRootHash.toArrayUnsafe());
final Hash newWorldStateRootHash = calculateRootHash(stateUpdater, localUpdater);
// if we are persisted with a block header, and the prior state is the parent
// then persist the TrieLog for that transition. If specified but not a direct
// descendant simply store the new block hash.
// then persist the TrieLog for that transition.
// If specified but not a direct descendant simply store the new block hash.
if (blockHeader != null) {
if (!worldStateRootHash.equals(blockHeader.getStateRoot())) {
throw new RuntimeException(
"World State Root does not match expected value, header "
+ blockHeader.getStateRoot().toHexString()
+ " calculated "
+ worldStateRootHash.toHexString());
}
worldStateBlockHash = Hash.fromPlugin(blockHeader.getBlockHash());
stateUpdater
.getTrieBranchStorageTransaction()
.put(WORLD_BLOCK_HASH_KEY, worldStateBlockHash.toArrayUnsafe());
if (originalBlockHash.equals(blockHeader.getParentHash())) {
LOG.debug("Writing Trie Log for {}", worldStateBlockHash);
final TrieLogLayer trieLog = localUpdater.generateTrieLog(worldStateBlockHash);
trieLog.freeze();
archive.addLayeredWorldState(this, blockHeader, worldStateRootHash, trieLog);
final BytesValueRLPOutput rlpLog = new BytesValueRLPOutput();
trieLog.writeTo(rlpLog);
stateUpdater
.getTrieLogStorageTransaction()
.put(worldStateBlockHash.toArrayUnsafe(), rlpLog.encoded().toArrayUnsafe());
}
final TrieLogLayer trieLog =
prepareTrieLog(blockHeader, localUpdater, newWorldStateRootHash);
persistTrieLog(blockHeader, newWorldStateRootHash, trieLog, stateUpdater);
worldStateBlockHash = blockHeader.getHash();
} else {
stateUpdater.getTrieBranchStorageTransaction().remove(WORLD_BLOCK_HASH_KEY);
worldStateBlockHash = null;
}
stateUpdater
.getTrieBranchStorageTransaction()
.put(WORLD_ROOT_HASH_KEY, newWorldStateRootHash.toArrayUnsafe());
worldStateRootHash = newWorldStateRootHash;
success = true;
} finally {
if (success) {
@ -292,8 +273,6 @@ public class BonsaiPersistedWorldState implements MutableWorldState, BonsaiWorld
} else {
stateUpdater.rollback();
updater.reset();
worldStateBlockHash = originalBlockHash;
worldStateRootHash = originalRootHash;
}
}
if (blockHeader != null) {
@ -301,6 +280,46 @@ public class BonsaiPersistedWorldState implements MutableWorldState, BonsaiWorld
}
}
protected TrieLogLayer prepareTrieLog(
final BlockHeader blockHeader,
final BonsaiWorldStateUpdater localUpdater,
final Hash currentWorldStateRootHash) {
if (!currentWorldStateRootHash.equals(blockHeader.getStateRoot())) {
throw new RuntimeException(
"World State Root does not match expected value, header "
+ blockHeader.getStateRoot().toHexString()
+ " calculated "
+ currentWorldStateRootHash.toHexString());
}
debugLambda(LOG, "Adding layered world state for {}", blockHeader::toLogString);
final TrieLogLayer trieLog = localUpdater.generateTrieLog(blockHeader.getBlockHash());
trieLog.freeze();
archive.addLayeredWorldState(this, blockHeader, currentWorldStateRootHash, trieLog);
return trieLog;
}
private void persistTrieLog(
final BlockHeader blockHeader,
final Hash worldStateRootHash,
final TrieLogLayer trieLog,
final BonsaiWorldStateKeyValueStorage.Updater stateUpdater) {
debugLambda(
LOG,
"Persisting trie log for block hash {} and world state root {}",
blockHeader::toLogString,
worldStateRootHash::toHexString);
stateUpdater
.getTrieBranchStorageTransaction()
.put(WORLD_BLOCK_HASH_KEY, blockHeader.getHash().toArrayUnsafe());
final BytesValueRLPOutput rlpLog = new BytesValueRLPOutput();
trieLog.writeTo(rlpLog);
stateUpdater
.getTrieLogStorageTransaction()
.put(blockHeader.getHash().toArrayUnsafe(), rlpLog.encoded().toArrayUnsafe());
}
@Override
public WorldUpdater updater() {
return updater;

@ -17,9 +17,11 @@
package org.hyperledger.besu.ethereum.bonsai;
import static org.hyperledger.besu.datatypes.Hash.fromPlugin;
import static org.hyperledger.besu.util.Slf4jLambdaHelper.debugLambda;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.BlockAddedEvent;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
@ -73,24 +75,23 @@ public class BonsaiWorldStateArchive implements WorldStateArchive {
this.persistedState = new BonsaiPersistedWorldState(this, worldStateStorage);
this.layeredWorldStatesByHash = layeredWorldStatesByHash;
this.maxLayersToLoad = maxLayersToLoad;
blockchain.observeBlockAdded(
event -> {
if (event.isNewCanonicalHead()) {
final BlockHeader eventBlockHeader = event.getBlock().getHeader();
layeredWorldStatesByHash.computeIfPresent(
eventBlockHeader.getParentHash(),
(hash, bonsaiLayeredWorldState) -> {
if (layeredWorldStatesByHash.containsKey(
fromPlugin(eventBlockHeader.getBlockHash()))) {
bonsaiLayeredWorldState.setNextWorldView(
Optional.of(
layeredWorldStatesByHash.get(
fromPlugin(eventBlockHeader.getBlockHash()))));
}
return bonsaiLayeredWorldState;
});
}
});
blockchain.observeBlockAdded(this::blockAddedHandler);
}
private void blockAddedHandler(final BlockAddedEvent event) {
LOG.debug("New block add event {}", event);
if (event.isNewCanonicalHead()) {
final BlockHeader eventBlockHeader = event.getBlock().getHeader();
layeredWorldStatesByHash.computeIfPresent(
eventBlockHeader.getParentHash(),
(parentHash, bonsaiLayeredWorldState) -> {
if (layeredWorldStatesByHash.containsKey(eventBlockHeader.getHash())) {
bonsaiLayeredWorldState.setNextWorldView(
Optional.of(layeredWorldStatesByHash.get(eventBlockHeader.getHash())));
}
return bonsaiLayeredWorldState;
});
}
}
@Override
@ -117,7 +118,12 @@ public class BonsaiWorldStateArchive implements WorldStateArchive {
blockHeader.getNumber(),
worldStateRootHash,
trieLog);
layeredWorldStatesByHash.put(bonsaiLayeredWorldState.blockHash(), bonsaiLayeredWorldState);
debugLambda(
LOG,
"adding layered world state for block {}, state root hash {}",
blockHeader::toLogString,
worldStateRootHash::toHexString);
layeredWorldStatesByHash.put(blockHeader.getHash(), bonsaiLayeredWorldState);
}
public Optional<TrieLogLayer> getTrieLogLayer(final Hash blockHash) {
@ -226,8 +232,7 @@ public class BonsaiWorldStateArchive implements WorldStateArchive {
}
// attempt the state rolling
final BonsaiWorldStateUpdater bonsaiUpdater =
(BonsaiWorldStateUpdater) persistedState.updater();
final BonsaiWorldStateUpdater bonsaiUpdater = getUpdater();
try {
for (final TrieLogLayer rollBack : rollBacks) {
LOG.debug("Attempting Rollback of {}", rollBack.getBlockHash());
@ -255,6 +260,10 @@ public class BonsaiWorldStateArchive implements WorldStateArchive {
}
}
BonsaiWorldStateUpdater getUpdater() {
return (BonsaiWorldStateUpdater) persistedState.updater();
}
@Override
public MutableWorldState getMutable() {
return persistedState;

@ -35,7 +35,6 @@ import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.rlp.RLP;
public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage {
public static final byte[] WORLD_ROOT_HASH_KEY = "worldRoot".getBytes(StandardCharsets.UTF_8);
public static final byte[] WORLD_BLOCK_HASH_KEY =

@ -36,7 +36,8 @@ public class BlockAddedEvent {
public enum EventType {
HEAD_ADVANCED,
FORK,
CHAIN_REORG
CHAIN_REORG,
STORED_ONLY
}
private BlockAddedEvent(
@ -98,12 +99,23 @@ public class BlockAddedEvent {
block.getHeader().getParentHash());
}
public static BlockAddedEvent createForStoredOnly(final Block block) {
return new BlockAddedEvent(
EventType.STORED_ONLY,
block,
Collections.emptyList(),
Collections.emptyList(),
Collections.emptyList(),
Collections.emptyList(),
block.getHeader().getParentHash());
}
public Block getBlock() {
return block;
}
public boolean isNewCanonicalHead() {
return eventType != EventType.FORK;
return eventType == EventType.HEAD_ADVANCED || eventType == EventType.CHAIN_REORG;
}
public EventType getEventType() {
@ -129,4 +141,24 @@ public class BlockAddedEvent {
public Hash getCommonAncestorHash() {
return commonAncestorHash;
}
@Override
public String toString() {
return "BlockAddedEvent{"
+ "eventType="
+ eventType
+ ", block="
+ block.toLogString()
+ ", commonAncestorHash="
+ commonAncestorHash
+ ", addedTransactions count="
+ addedTransactions.size()
+ ", removedTransactions count="
+ removedTransactions.size()
+ ", transactionReceipts count ="
+ transactionReceipts.size()
+ ", logsWithMetadata count="
+ logsWithMetadata.size()
+ '}';
}
}

@ -21,6 +21,7 @@ import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toList;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.BlockchainStorage.Updater;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockBody;
import org.hyperledger.besu.ethereum.core.BlockHeader;
@ -282,20 +283,33 @@ public class DefaultBlockchain implements MutableBlockchain {
@Override
public synchronized void appendBlock(final Block block, final List<TransactionReceipt> receipts) {
appendBlockHelper(new BlockWithReceipts(block, receipts), false);
}
@Override
public synchronized void storeBlock(final Block block, final List<TransactionReceipt> receipts) {
appendBlockHelper(new BlockWithReceipts(block, receipts), true);
}
private boolean blockShouldBeProcessed(
final Block block, final List<TransactionReceipt> receipts) {
checkArgument(
block.getBody().getTransactions().size() == receipts.size(),
"Supplied receipts do not match block transactions.");
if (blockIsAlreadyTracked(block)) {
return;
return false;
}
checkArgument(blockIsConnected(block), "Attempt to append non-connected block.");
final BlockAddedEvent blockAddedEvent =
appendBlockHelper(new BlockWithReceipts(block, receipts));
blockAddedObservers.forEach(observer -> observer.onBlockAdded(blockAddedEvent));
return true;
}
private BlockAddedEvent appendBlockHelper(final BlockWithReceipts blockWithReceipts) {
private void appendBlockHelper(
final BlockWithReceipts blockWithReceipts, final boolean storeOnly) {
if (!blockShouldBeProcessed(blockWithReceipts.getBlock(), blockWithReceipts.getReceipts())) {
return;
}
final Block block = blockWithReceipts.getBlock();
final List<TransactionReceipt> receipts = blockWithReceipts.getReceipts();
final Hash hash = block.getHash();
@ -308,15 +322,18 @@ public class DefaultBlockchain implements MutableBlockchain {
updater.putTransactionReceipts(hash, receipts);
updater.putTotalDifficulty(hash, td);
// Update canonical chain data
final BlockAddedEvent blockAddedEvent = updateCanonicalChainData(updater, blockWithReceipts);
updater.commit();
if (blockAddedEvent.isNewCanonicalHead()) {
updateCacheForNewCanonicalHead(block, td);
final BlockAddedEvent blockAddedEvent;
if (storeOnly) {
blockAddedEvent = handleStoreOnly(blockWithReceipts);
} else {
blockAddedEvent = updateCanonicalChainData(updater, blockWithReceipts);
if (blockAddedEvent.isNewCanonicalHead()) {
updateCacheForNewCanonicalHead(block, td);
}
}
return blockAddedEvent;
updater.commit();
blockAddedObservers.forEach(observer -> observer.onBlockAdded(blockAddedEvent));
}
@Override
@ -365,24 +382,17 @@ public class DefaultBlockchain implements MutableBlockchain {
private BlockAddedEvent updateCanonicalChainData(
final BlockchainStorage.Updater updater, final BlockWithReceipts blockWithReceipts) {
final Block newBlock = blockWithReceipts.getBlock();
final Hash chainHead = blockchainStorage.getChainHead().orElse(null);
if (newBlock.getHeader().getNumber() != BlockHeader.GENESIS_BLOCK_NUMBER && chainHead == null) {
throw new IllegalStateException("Blockchain is missing chain head.");
}
final Hash newBlockHash = newBlock.getHash();
try {
if (chainHead == null || newBlock.getHeader().getParentHash().equals(chainHead)) {
// This block advances the chain, update the chain head
updater.putBlockHash(newBlock.getHeader().getNumber(), newBlockHash);
updater.setChainHead(newBlockHash);
indexTransactionForBlock(updater, newBlockHash, newBlock.getBody().getTransactions());
return BlockAddedEvent.createForHeadAdvancement(
newBlock,
LogWithMetadata.generate(
blockWithReceipts.getBlock(), blockWithReceipts.getReceipts(), false),
blockWithReceipts.getReceipts());
if (newBlock.getHeader().getParentHash().equals(chainHead) || chainHead == null) {
return handleNewHead(updater, blockWithReceipts);
} else if (blockChoiceRule.compare(newBlock.getHeader(), chainHeader) > 0) {
// New block represents a chain reorganization
return handleChainReorg(updater, blockWithReceipts);
@ -398,6 +408,26 @@ public class DefaultBlockchain implements MutableBlockchain {
}
}
private BlockAddedEvent handleStoreOnly(final BlockWithReceipts blockWithReceipts) {
return BlockAddedEvent.createForStoredOnly(blockWithReceipts.getBlock());
}
private BlockAddedEvent handleNewHead(
final Updater updater, final BlockWithReceipts blockWithReceipts) {
// This block advances the chain, update the chain head
final Hash newBlockHash = blockWithReceipts.getHash();
updater.putBlockHash(blockWithReceipts.getNumber(), newBlockHash);
updater.setChainHead(newBlockHash);
indexTransactionForBlock(
updater, newBlockHash, blockWithReceipts.getBlock().getBody().getTransactions());
return BlockAddedEvent.createForHeadAdvancement(
blockWithReceipts.getBlock(),
LogWithMetadata.generate(
blockWithReceipts.getBlock(), blockWithReceipts.getReceipts(), false),
blockWithReceipts.getReceipts());
}
private BlockAddedEvent handleFork(final BlockchainStorage.Updater updater, final Block fork) {
final Collection<Hash> forkHeads = blockchainStorage.getForkHeads();
@ -559,6 +589,31 @@ public class DefaultBlockchain implements MutableBlockchain {
}
}
@Override
public boolean forwardToBlock(final BlockHeader blockHeader) {
checkArgument(
chainHeader.getHash().equals(blockHeader.getParentHash()),
"Supplied block header is not a child of the current chain head.");
final BlockchainStorage.Updater updater = blockchainStorage.updater();
try {
final BlockWithReceipts blockWithReceipts = getBlockWithReceipts(blockHeader).get();
BlockAddedEvent newHeadEvent = handleNewHead(updater, blockWithReceipts);
updateCacheForNewCanonicalHead(
blockWithReceipts.getBlock(), calculateTotalDifficulty(blockHeader));
updater.commit();
blockAddedObservers.forEach(observer -> observer.onBlockAdded(newHeadEvent));
return true;
} catch (final NoSuchElementException e) {
// Any Optional.get() calls in this block should be present, missing data means data
// corruption or a bug.
updater.rollback();
throw new IllegalStateException("Blockchain is missing data that should be present.", e);
}
}
@Override
public void setFinalized(final Hash blockHash) {
final var updater = blockchainStorage.updater();

@ -37,10 +37,22 @@ public interface MutableBlockchain extends Blockchain {
*/
void appendBlock(Block block, List<TransactionReceipt> receipts);
/**
* Adds a block to the blockchain, without updating the chain state.
*
* <p>Block must be connected to the existing blockchain (its parent must already be stored),
* otherwise an {@link IllegalArgumentException} is thrown. Blocks representing forks are allowed
* as long as they are connected.
*
* @param block The block to append.
* @param receipts The list of receipts associated with this block's transactions.
*/
void storeBlock(Block block, List<TransactionReceipt> receipts);
void unsafeImportBlock(
final Block block,
final List<TransactionReceipt> receipts,
final Optional<Difficulty> maybeTtalDifficulty);
final Optional<Difficulty> maybeTotalDifficulty);
void unsafeSetChainHead(final BlockHeader blockHeader, final Difficulty totalDifficulty);
@ -62,6 +74,16 @@ public interface MutableBlockchain extends Blockchain {
*/
boolean rewindToBlock(final Hash blockHash);
/**
* Forward the canonical chainhead to the specified block hash. The block hash must be a child of
* the current chainhead, that is already stored
*
* @param blockHeader The block header to forward to.
* @return {@code true} on success, {@code false} if the block is not a child of the current head
* {@code blockNumber}
*/
boolean forwardToBlock(final BlockHeader blockHeader);
/**
* Set the hash of the last finalized block.
*

@ -180,11 +180,9 @@ public abstract class AbstractBlockProcessor implements BlockProcessor {
}
return AbstractBlockProcessor.Result.failed();
}
worldStateUpdater.commit();
currentGasUsed += transaction.getGasLimit() - result.getGasRemaining();
final TransactionReceipt transactionReceipt =
transactionReceiptFactory.create(
transaction.getType(), result, worldState, currentGasUsed);
@ -205,6 +203,7 @@ public abstract class AbstractBlockProcessor implements BlockProcessor {
LOG.error("failed persisting block", e);
return AbstractBlockProcessor.Result.failed();
}
return AbstractBlockProcessor.Result.successful(receipts);
}

@ -35,6 +35,7 @@ public interface WorldStateArchive {
boolean isWorldStateAvailable(Hash rootHash, Hash blockHash);
@Deprecated
Optional<MutableWorldState> getMutable(long blockNumber, boolean isPersistingState);
Optional<MutableWorldState> getMutable(Hash rootHash, Hash blockHash, boolean isPersistingState);

@ -21,8 +21,9 @@ import static org.hyperledger.besu.ethereum.bonsai.BonsaiWorldStateKeyValueStora
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.datatypes.Hash;
@ -123,10 +124,6 @@ public class BonsaiWorldStateArchiveTest {
bonsaiWorldStateArchive =
new BonsaiWorldStateArchive(storageProvider, blockchain, 12, layeredWorldStatesByHash);
final BlockHeader blockHeader = blockBuilder.number(0).buildHeader();
final BytesValueRLPOutput rlpLog = new BytesValueRLPOutput();
final TrieLogLayer trieLogLayer = new TrieLogLayer();
trieLogLayer.setBlockHash(blockHeader.getHash());
trieLogLayer.writeTo(rlpLog);
when(blockchain.getBlockHeader(eq(blockHeader.getHash()))).thenReturn(Optional.of(blockHeader));
@ -145,12 +142,11 @@ public class BonsaiWorldStateArchiveTest {
final Map layeredWorldStatesByHash = mock(HashMap.class);
bonsaiWorldStateArchive =
new BonsaiWorldStateArchive(storageProvider, blockchain, 12, layeredWorldStatesByHash);
spy(new BonsaiWorldStateArchive(storageProvider, blockchain, 12, layeredWorldStatesByHash));
var updater = spy(bonsaiWorldStateArchive.getUpdater());
when(bonsaiWorldStateArchive.getUpdater()).thenReturn(updater);
final BlockHeader blockHeader = blockBuilder.number(0).buildHeader();
final BytesValueRLPOutput rlpLog = new BytesValueRLPOutput();
final TrieLogLayer trieLogLayer = new TrieLogLayer();
trieLogLayer.setBlockHash(blockHeader.getHash());
trieLogLayer.writeTo(rlpLog);
when(blockchain.getBlockHeader(eq(blockHeader.getHash()))).thenReturn(Optional.of(blockHeader));
when(blockchain.getBlockHeader(eq(Hash.ZERO))).thenReturn(Optional.of(blockHeader));
@ -160,7 +156,8 @@ public class BonsaiWorldStateArchiveTest {
// verify is not trying to get the trie log layer to rollback when block is present
verify(layeredWorldStatesByHash).entrySet();
verifyNoMoreInteractions(layeredWorldStatesByHash);
verify(updater, times(0)).rollBack(any());
verify(updater, times(0)).rollForward(any());
}
@SuppressWarnings({"unchecked"})
@ -181,7 +178,9 @@ public class BonsaiWorldStateArchiveTest {
.thenReturn(mock(BonsaiLayeredWorldState.class, Answers.RETURNS_MOCKS));
bonsaiWorldStateArchive =
new BonsaiWorldStateArchive(storageProvider, blockchain, 12, layeredWorldStatesByHash);
spy(new BonsaiWorldStateArchive(storageProvider, blockchain, 12, layeredWorldStatesByHash));
var updater = spy(bonsaiWorldStateArchive.getUpdater());
when(bonsaiWorldStateArchive.getUpdater()).thenReturn(updater);
// initial persisted state hash key
when(blockchain.getBlockHeader(eq(Hash.ZERO))).thenReturn(Optional.of(blockHeaderChainA));
@ -198,6 +197,7 @@ public class BonsaiWorldStateArchiveTest {
verify(layeredWorldStatesByHash).containsKey(eq(blockHeaderChainB.getHash()));
verify(layeredWorldStatesByHash).get(eq(blockHeaderChainB.getHash()));
verify(layeredWorldStatesByHash).entrySet();
verifyNoMoreInteractions(layeredWorldStatesByHash);
verify(updater, times(1)).rollBack(any());
verify(updater, times(1)).rollForward(any());
}
}

Loading…
Cancel
Save