Experimental Backup and Restore (#1235)

Experimental backup and restore via `operator x-backup-state` and `operator x-restore-state` CLI commands.  Besu needs to be in a non-operational state for the backup and restore to occur.  Restore has only been tested into an empty database, not on top of an existing database.

This feature is not advised for production use at the moment.

Signed-off-by: Danno Ferrin <danno.ferrin@gmail.com>
pull/1290/head
Danno Ferrin 4 years ago committed by GitHub
parent 022dab4e16
commit 4dbe0c7d14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      CHANGELOG.md
  2. 14
      acceptance-tests/tests/build.gradle
  3. 193
      acceptance-tests/tests/src/test/java/org/hyperledger/besu/tests/acceptance/backup/BackupRoundTripAcceptanceTest.java
  4. 3
      besu/build.gradle
  5. 124
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/operator/BackupState.java
  6. 7
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/operator/OperatorSubCommand.java
  7. 348
      besu/src/main/java/org/hyperledger/besu/cli/subcommands/operator/RestoreState.java
  8. 2
      ethereum/api/build.gradle
  9. 467
      ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/query/StateBackupService.java
  10. 68
      ethereum/api/src/test/java/org/hyperledger/besu/ethereum/api/query/StateBackupServiceTest.java
  11. 2
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/CompactEncoding.java
  12. 2
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/MerklePatriciaTrie.java
  13. 86
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/PersistVisitor.java
  14. 227
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/RestoreVisitor.java
  15. 6
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/SimpleMerklePatriciaTrie.java
  16. 2
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/StorageEntriesCollector.java
  17. 6
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/StoredMerklePatriciaTrie.java
  18. 13
      ethereum/trie/src/main/java/org/hyperledger/besu/ethereum/trie/TrieIterator.java
  19. 2
      ethereum/trie/src/test/java/org/hyperledger/besu/ethereum/trie/TrieIteratorTest.java

@ -4,6 +4,10 @@
### Additions and Improvements
* Experimental offline backup and restore has been added via the `operator x-backup-state` and
`operator x-restore-state` CLI commands. Data formats will be fluid for as long as the `x-`
prefix is present in the CLI so it is advised not to rely on these backups for disaster recovery.
### Bug Fixes
#### Previously identified known issues

@ -12,24 +12,20 @@
*/
dependencies {
implementation project(':crypto')
testImplementation project(':acceptance-tests:dsl')
testImplementation project(':enclave')
testImplementation 'org.awaitility:awaitility'
testImplementation project(':consensus:clique')
testImplementation project(':ethereum:permissioning')
testImplementation project(':util')
testImplementation project(':plugin-api')
testImplementation project(':besu')
testImplementation project(':config')
testImplementation project(':consensus:clique')
implementation project(':crypto')
testImplementation project(':enclave')
testImplementation project(':ethereum:api')
testImplementation project(':ethereum:core')
testImplementation project(':ethereum:rlp')
testImplementation project(':privacy-contracts')
testImplementation project(path: ':ethereum:core', configuration: 'testSupportArtifacts')
testImplementation project(':ethereum:permissioning')
testImplementation project(':ethereum:rlp')
testImplementation project(':plugin-api')
testImplementation project(':privacy-contracts')
testImplementation project(':testutil')
testImplementation project(':util')

@ -0,0 +1,193 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.tests.acceptance.backup;
import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.config.JsonUtil;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.tests.acceptance.AbstractPreexistingNodeTest;
import org.hyperledger.besu.tests.acceptance.database.DatabaseMigrationAcceptanceTest;
import org.hyperledger.besu.tests.acceptance.dsl.WaitUtils;
import org.hyperledger.besu.tests.acceptance.dsl.blockchain.Amount;
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
import org.hyperledger.besu.tests.acceptance.dsl.node.configuration.BesuNodeConfigurationBuilder;
import java.io.IOException;
import java.math.BigInteger;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.function.Function;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.jetbrains.annotations.NotNull;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(Parameterized.class)
public class BackupRoundTripAcceptanceTest extends AbstractPreexistingNodeTest {
private final Path backupPath;
private final Path restorePath;
private final Path rebackupPath;
@SuppressWarnings({"unused", "FieldCanBeLocal"})
private final List<AccountData> testAccounts;
@SuppressWarnings({"unused", "FieldCanBeLocal"})
private final long expectedChainHeight;
public BackupRoundTripAcceptanceTest(
final String testName,
final String dataPath,
final long expectedChainHeight,
final List<AccountData> testAccounts)
throws IOException {
super(testName, dataPath);
this.expectedChainHeight = expectedChainHeight;
this.testAccounts = testAccounts;
backupPath = Files.createTempDirectory("backup");
backupPath.toFile().deleteOnExit();
restorePath = Files.createTempDirectory("restore");
restorePath.toFile().deleteOnExit();
rebackupPath = Files.createTempDirectory("rebackup");
rebackupPath.toFile().deleteOnExit();
}
@Parameters(name = "{0}")
public static Object[][] getParameters() {
return new Object[][] {
// First 10 blocks of ropsten
new Object[] {
"Before versioning was enabled",
"version0",
0xA,
singletonList(
new AccountData(
"0xd1aeb42885a43b72b518182ef893125814811048",
BigInteger.valueOf(0xA),
Wei.fromHexString("0x2B5E3AF16B1880000"))),
},
new Object[] {
"After versioning was enabled and using multiple RocksDB columns",
"version1",
0xA,
singletonList(
new AccountData(
"0xd1aeb42885a43b72b518182ef893125814811048",
BigInteger.valueOf(0xA),
Wei.fromHexString("0x2B5E3AF16B1880000")))
}
};
}
@Before
public void setUp() throws Exception {
final URL rootURL = DatabaseMigrationAcceptanceTest.class.getResource(dataPath);
hostDataPath = copyDataDir(rootURL);
final Path databaseArchive =
Paths.get(
DatabaseMigrationAcceptanceTest.class
.getResource(String.format("%s/besu-db-archive.tar.gz", dataPath))
.toURI());
extract(databaseArchive, hostDataPath.toAbsolutePath().toString());
}
@Test
public void backupRoundtripAndBack() throws IOException {
// backup from existing files
final BesuNode backupNode =
besu.createNode(
"backup " + testName,
configureNodeCommands(
hostDataPath,
"operator",
"x-backup-state",
"--backup-path=" + backupPath.toString(),
"--block=100"));
cluster.startNode(backupNode);
WaitUtils.waitFor(60, () -> backupNode.verify(exitedSuccessfully));
final ObjectNode backupManifest =
JsonUtil.objectNodeFromString(
Files.readString(backupPath.resolve("besu-backup-manifest.json")));
// restore to a new directory
final BesuNode restoreNode =
besu.createNode(
"restore " + testName,
configureNodeCommands(
restorePath,
"operator",
"x-restore-state",
"--backup-path=" + backupPath.toString()));
cluster.startNode(restoreNode);
WaitUtils.waitFor(60, () -> restoreNode.verify(exitedSuccessfully));
// start up the backed-up version and assert some details
final BesuNode runningNode = besu.createNode(testName, this::configureNode);
cluster.start(runningNode);
// height matches
blockchain.currentHeight(expectedChainHeight).verify(runningNode);
// accounts have value
testAccounts.forEach(
accountData ->
accounts
.createAccount(Address.fromHexString(accountData.getAccountAddress()))
.balanceAtBlockEquals(
Amount.wei(accountData.getExpectedBalance().toBigInteger()),
accountData.getBlock())
.verify(runningNode));
runningNode.stop();
// backup from the restore
final BesuNode rebackupBesuNode =
besu.createNode(
"rebackup " + testName,
configureNodeCommands(
restorePath,
"operator",
"x-backup-state",
"--backup-path=" + rebackupPath.toString(),
"--block=100"));
cluster.startNode(rebackupBesuNode);
WaitUtils.waitFor(60, () -> rebackupBesuNode.verify(exitedSuccessfully));
final ObjectNode rebackupManifest =
JsonUtil.objectNodeFromString(
Files.readString(rebackupPath.resolve("besu-backup-manifest.json")));
// expect that the backup and rebackup manifests match
assertThat(rebackupManifest).isEqualTo(backupManifest);
}
@NotNull
private Function<BesuNodeConfigurationBuilder, BesuNodeConfigurationBuilder>
configureNodeCommands(final Path dataPath, final String... commands) {
return nodeBuilder -> super.configureNode(nodeBuilder).dataPath(dataPath).run(commands);
}
}

@ -43,6 +43,7 @@ dependencies {
implementation project(':ethereum:permissioning')
implementation project(':ethereum:retesteth')
implementation project(':ethereum:rlp')
implementation project(':ethereum:trie')
implementation project(':ethereum:stratum')
implementation project(':ethereum:ethstats')
implementation project(':metrics:core')
@ -64,6 +65,8 @@ dependencies {
implementation 'org.apache.tuweni:tuweni-config'
implementation 'org.apache.tuweni:tuweni-units'
implementation 'org.springframework.security:spring-security-crypto'
implementation 'org.xerial.snappy:snappy-java'
runtimeOnly 'org.apache.logging.log4j:log4j-slf4j-impl'
runtimeOnly 'com.splunk.logging:splunk-library-javalogging'

@ -0,0 +1,124 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.cli.subcommands.operator;
import static com.google.common.base.Preconditions.checkArgument;
import static org.hyperledger.besu.cli.DefaultCommandValues.MANDATORY_LONG_FORMAT_HELP;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.ethereum.api.query.StateBackupService;
import org.hyperledger.besu.ethereum.api.query.StateBackupService.BackupStatus;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.eth.manager.EthScheduler;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import java.io.File;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParentCommand;
@Command(
name = "x-backup-state",
description = "Backups up the state and accounts of a specified block.",
mixinStandardHelpOptions = true)
public class BackupState implements Runnable {
@Option(
names = "--block",
paramLabel = MANDATORY_LONG_FORMAT_HELP,
description = "The block to perform the backup at (default: calculated chain head)",
arity = "1..1")
private final Long block = Long.MAX_VALUE;
@Option(
names = "--backup-path",
required = true,
paramLabel = MANDATORY_LONG_FORMAT_HELP,
description = "The path to store the backup files.",
arity = "1..1")
private final File backupDir = null;
@Option(
names = {"--compression-enabled"},
description = "Enable data compression",
arity = "1")
private final Boolean compress = true;
@ParentCommand private OperatorSubCommand parentCommand;
@Override
public void run() {
checkArgument(
parentCommand.parentCommand.dataDir().toFile().exists(),
"DataDir (the blockchain being backed up) does not exist.");
checkArgument(
backupDir.exists() || backupDir.mkdirs(),
"Backup directory does not exist and cannot be created.");
final BesuController besuController = createBesuController();
final MutableBlockchain blockchain = besuController.getProtocolContext().getBlockchain();
final WorldStateStorage worldStateStorage =
besuController.getProtocolContext().getWorldStateArchive().getWorldStateStorage();
final EthScheduler scheduler = new EthScheduler(1, 1, 1, 1, new NoOpMetricsSystem());
try {
final long targetBlock = Math.min(blockchain.getChainHeadBlockNumber(), this.block);
final StateBackupService backup =
new StateBackupService(
BesuInfo.version(), blockchain, backupDir.toPath(), scheduler, worldStateStorage);
final BackupStatus status = backup.requestBackup(targetBlock, compress, Optional.empty());
final double refValue = Math.pow(2, 256) / 100.0d;
while (status.isBackingUp()) {
if (status.getTargetBlockNum() != status.getStoredBlockNum()) {
System.out.printf(
"Chain Progress - %,d of %,d (%5.2f%%)%n",
status.getStoredBlockNum(),
status.getTargetBlockNum(),
status.getStoredBlockNum() * 100.0d / status.getTargetBlockNum());
} else {
System.out.printf(
"State Progress - %6.3f%% / %,d Accounts / %,d Storage Nodes%n",
status.getCurrentAccountBytes().toUnsignedBigInteger().doubleValue() / refValue,
status.getAccountCount(),
status.getStorageCount());
}
LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(10));
}
System.out.printf(
"Backup complete%n Accounts: %,d%n Code Size: %,d%nState Entries: %,d%n",
status.getAccountCount(), status.getCodeSize(), status.getStorageCount());
} finally {
scheduler.stop();
try {
scheduler.awaitStop();
} catch (final InterruptedException e) {
// ignore
}
}
}
private BesuController createBesuController() {
return parentCommand.parentCommand.buildController();
}
}

@ -30,7 +30,12 @@ import picocli.CommandLine.Spec;
name = COMMAND_NAME,
description = "Operator related actions such as generating configuration and caches.",
mixinStandardHelpOptions = true,
subcommands = {GenerateBlockchainConfig.class, GenerateLogBloomCache.class})
subcommands = {
GenerateBlockchainConfig.class,
GenerateLogBloomCache.class,
BackupState.class,
RestoreState.class
})
public class OperatorSubCommand implements Runnable {
public static final String COMMAND_NAME = "operator";

@ -0,0 +1,348 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.cli.subcommands.operator;
import static org.hyperledger.besu.cli.DefaultCommandValues.MANDATORY_LONG_FORMAT_HELP;
import static org.hyperledger.besu.ethereum.trie.CompactEncoding.bytesToPath;
import org.hyperledger.besu.config.JsonUtil;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.ethereum.api.query.StateBackupService;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockBody;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockHeaderFunctions;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.TransactionReceipt;
import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput;
import org.hyperledger.besu.ethereum.rlp.RLPInput;
import org.hyperledger.besu.ethereum.trie.Node;
import org.hyperledger.besu.ethereum.trie.PersistVisitor;
import org.hyperledger.besu.ethereum.trie.RestoreVisitor;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.function.BiFunction;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.xerial.snappy.Snappy;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParentCommand;
@Command(
name = "x-restore-state",
description = "Restores the chain from a previously generated backup-state.",
mixinStandardHelpOptions = true)
public class RestoreState implements Runnable {
private static final Logger LOG = LogManager.getLogger();
@Option(
names = "--backup-path",
required = true,
paramLabel = MANDATORY_LONG_FORMAT_HELP,
description = "The path to store the backup files.",
arity = "1..1")
private final Path backupDir = null;
@ParentCommand private OperatorSubCommand parentCommand;
private static final int TRIE_NODE_COMMIT_BATCH_SIZE = 100;
private long targetBlock;
private long accountCount;
private long trieNodeCount;
private boolean compressed;
private BesuController besuController;
private WorldStateStorage.Updater updater;
private Path accountFileName(final int fileNumber, final boolean compressed) {
return StateBackupService.accountFileName(backupDir, targetBlock, fileNumber, compressed);
}
private Path headerFileName(final int fileNumber, final boolean compressed) {
return StateBackupService.headerFileName(backupDir, fileNumber, compressed);
}
private Path bodyFileName(final int fileNumber, final boolean compressed) {
return StateBackupService.bodyFileName(backupDir, fileNumber, compressed);
}
private Path receiptFileName(final int fileNumber, final boolean compressed) {
return StateBackupService.receiptFileName(backupDir, fileNumber, compressed);
}
@Override
public void run() {
try {
final ObjectNode manifest =
JsonUtil.objectNodeFromString(
Files.readString(backupDir.resolve("besu-backup-manifest.json")));
compressed = manifest.get("compressed").asBoolean(false);
targetBlock = manifest.get("targetBlock").asLong();
accountCount = manifest.get("accountCount").asLong();
besuController = createBesuController();
restoreBlocks();
restoreAccounts();
LOG.info("Restore complete");
} catch (final IOException e) {
LOG.error("Error restoring state", e);
}
}
private void restoreBlocks() throws IOException {
try (final RollingFileReader headerReader =
new RollingFileReader(this::headerFileName, compressed);
final RollingFileReader bodyReader = new RollingFileReader(this::bodyFileName, compressed);
final RollingFileReader receiptReader =
new RollingFileReader(this::receiptFileName, compressed)) {
final MutableBlockchain blockchain = besuController.getProtocolContext().getBlockchain();
// target block is "including" the target block, so LE test not LT.
for (int i = 0; i <= targetBlock; i++) {
if (i % 100000 == 0) {
LOG.info("Loading chain data {} / {}", i, targetBlock);
}
final byte[] headerEntry = headerReader.readBytes();
final byte[] bodyEntry = bodyReader.readBytes();
final byte[] receiptEntry = receiptReader.readBytes();
final BlockHeaderFunctions functions = new MainnetBlockHeaderFunctions();
final BlockHeader header =
BlockHeader.readFrom(
new BytesValueRLPInput(Bytes.wrap(headerEntry), false, true), functions);
final BlockBody body =
BlockBody.readFrom(
new BytesValueRLPInput(Bytes.wrap(bodyEntry), false, true), functions);
final RLPInput receiptsRlp = new BytesValueRLPInput(Bytes.wrap(receiptEntry), false, true);
final int receiptsCount = receiptsRlp.enterList();
final List<TransactionReceipt> receipts = new ArrayList<>(receiptsCount);
for (int j = 0; j < receiptsCount; j++) {
receipts.add(TransactionReceipt.readFrom(receiptsRlp, true));
}
receiptsRlp.leaveList();
blockchain.appendBlock(new Block(header, body), receipts);
}
}
LOG.info("Chain data loaded");
}
@SuppressWarnings("UnusedVariable")
private void restoreAccounts() throws IOException {
newWorldStateUpdater();
int storageBranchCount = 0;
int storageExtensionCount = 0;
int storageLeafCount = 0;
final PersistVisitor<Bytes> accountPersistVisitor =
new PersistVisitor<>(this::updateAccountState);
Node<Bytes> root = accountPersistVisitor.initialRoot();
try (final RollingFileReader reader =
new RollingFileReader(this::accountFileName, compressed)) {
for (int i = 0; i < accountCount; i++) {
if (i % 100000 == 0) {
LOG.info("Loading account data {} / {}", i, accountCount);
}
final byte[] accountEntry = reader.readBytes();
final BytesValueRLPInput accountInput =
new BytesValueRLPInput(Bytes.of(accountEntry), false, true);
final int length = accountInput.enterList();
if (length != 3) {
throw new RuntimeException("Unexpected account length " + length);
}
final Bytes32 trieKey = accountInput.readBytes32();
final Bytes accountRlp = accountInput.readBytes();
final Bytes code = accountInput.readBytes();
final StateTrieAccountValue trieAccount =
StateTrieAccountValue.readFrom(new BytesValueRLPInput(accountRlp, false, true));
if (!trieAccount.getCodeHash().equals(Hash.hash(code))) {
throw new RuntimeException("Code hash doesn't match");
}
if (code.size() > 0) {
updateCode(code);
}
final RestoreVisitor<Bytes> accountTrieWriteVisitor =
new RestoreVisitor<>(t -> t, accountRlp, accountPersistVisitor);
root = root.accept(accountTrieWriteVisitor, bytesToPath(trieKey));
final PersistVisitor<Bytes> storagePersistVisitor =
new PersistVisitor<>(this::updateAccountStorage);
Node<Bytes> storageRoot = storagePersistVisitor.initialRoot();
while (true) {
final byte[] trieEntry = reader.readBytes();
final BytesValueRLPInput trieInput =
new BytesValueRLPInput(Bytes.of(trieEntry), false, true);
final int len = trieInput.enterList();
if (len == 0) {
break;
}
if (len != 2) {
throw new RuntimeException("Unexpected storage trie entry length " + len);
}
final Bytes32 storageTrieKey = Bytes32.wrap(trieInput.readBytes());
final Bytes storageTrieValue = Bytes.wrap(trieInput.readBytes());
final RestoreVisitor<Bytes> storageTrieWriteVisitor =
new RestoreVisitor<>(t -> t, storageTrieValue, storagePersistVisitor);
storageRoot = storageRoot.accept(storageTrieWriteVisitor, bytesToPath(storageTrieKey));
trieInput.leaveList();
}
storagePersistVisitor.persist(storageRoot);
storageBranchCount += storagePersistVisitor.getBranchNodeCount();
storageExtensionCount += storagePersistVisitor.getExtensionNodeCount();
storageLeafCount += storagePersistVisitor.getLeafNodeCount();
accountInput.leaveList();
}
}
accountPersistVisitor.persist(root);
updater.commit();
LOG.info("Account BranchNodes: {} ", accountPersistVisitor.getBranchNodeCount());
LOG.info("Account ExtensionNodes: {} ", accountPersistVisitor.getExtensionNodeCount());
LOG.info("Account LeafNodes: {} ", accountPersistVisitor.getLeafNodeCount());
LOG.info("Storage BranchNodes: {} ", storageBranchCount);
LOG.info("Storage LeafNodes: {} ", storageExtensionCount);
LOG.info("Storage ExtensionNodes: {} ", storageLeafCount);
LOG.info("Account data loaded");
}
private void newWorldStateUpdater() {
if (updater != null) {
updater.commit();
}
final WorldStateStorage worldStateStorage =
besuController.getProtocolContext().getWorldStateArchive().getWorldStateStorage();
updater = worldStateStorage.updater();
}
private void maybeCommitUpdater() {
if (trieNodeCount % TRIE_NODE_COMMIT_BATCH_SIZE == 0) {
newWorldStateUpdater();
}
}
private void updateCode(final Bytes code) {
maybeCommitUpdater();
updater.putCode(code);
}
private void updateAccountState(final Bytes32 key, final Bytes value) {
maybeCommitUpdater();
updater.putAccountStateTrieNode(key, value);
trieNodeCount++;
}
private void updateAccountStorage(final Bytes32 key, final Bytes value) {
maybeCommitUpdater();
updater.putAccountStorageTrieNode(key, value);
trieNodeCount++;
}
static class RollingFileReader implements Closeable {
final BiFunction<Integer, Boolean, Path> filenameGenerator;
final boolean compressed;
int currentPosition;
int fileNumber;
FileInputStream in;
final DataInputStream index;
boolean done = false;
RollingFileReader(
final BiFunction<Integer, Boolean, Path> filenameGenerator, final boolean compressed)
throws IOException {
this.filenameGenerator = filenameGenerator;
this.compressed = compressed;
final Path firstInputFile = filenameGenerator.apply(fileNumber, compressed);
in = new FileInputStream(firstInputFile.toFile());
index =
new DataInputStream(
new FileInputStream(StateBackupService.dataFileToIndex(firstInputFile).toFile()));
fileNumber = index.readInt();
currentPosition = index.readUnsignedShort();
}
byte[] readBytes() throws IOException {
byte[] raw;
try {
final int start = currentPosition;
final int nextFile = index.readUnsignedShort();
currentPosition = index.readInt();
if (nextFile == fileNumber) {
final int len = currentPosition - start;
raw = new byte[len];
//noinspection ResultOfMethodCallIgnored
in.read(raw);
} else {
raw = in.readAllBytes();
in.close();
fileNumber = nextFile;
in = new FileInputStream(filenameGenerator.apply(fileNumber, compressed).toFile());
if (currentPosition != 0) {
//noinspection ResultOfMethodCallIgnored
in.skip(currentPosition);
}
}
} catch (final EOFException eofe) {
// this happens when we read the last value, where there is no next index.
raw = in.readAllBytes();
done = true;
}
return compressed ? Snappy.uncompress(raw) : raw;
}
@Override
public void close() throws IOException {
in.close();
index.close();
}
public boolean isDone() {
return done;
}
}
@SuppressWarnings("unused")
BesuController createBesuController() {
return parentCommand.parentCommand.buildController();
}
}

@ -41,6 +41,7 @@ dependencies {
implementation project(':ethereum:p2p')
implementation project(':ethereum:permissioning')
implementation project(':ethereum:rlp')
implementation project(':ethereum:trie')
implementation project(':metrics:core')
implementation project(':nat')
implementation project(':plugin-api')
@ -59,6 +60,7 @@ dependencies {
implementation 'org.apache.tuweni:tuweni-units'
implementation 'org.bouncycastle:bcprov-jdk15on'
implementation 'org.springframework.security:spring-security-crypto'
implementation 'org.xerial.snappy:snappy-java'
runtimeOnly 'org.bouncycastle:bcpkix-jdk15on'

@ -0,0 +1,467 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.api.query;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import org.hyperledger.besu.config.JsonUtil;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.TransactionReceipt;
import org.hyperledger.besu.ethereum.eth.manager.EthScheduler;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput;
import org.hyperledger.besu.ethereum.trie.Node;
import org.hyperledger.besu.ethereum.trie.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.trie.TrieIterator;
import org.hyperledger.besu.ethereum.trie.TrieIterator.State;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.plugin.data.Hash;
import java.io.Closeable;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiFunction;
import java.util.function.Function;
import com.fasterxml.jackson.annotation.JsonGetter;
import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.xerial.snappy.Snappy;
public class StateBackupService {
private static final Logger LOG = LogManager.getLogger();
private static final long MAX_FILE_SIZE = 1 << 28; // 256 MiB max file size
private static final Bytes ACCOUNT_END_MARKER;
static {
final BytesValueRLPOutput endMarker = new BytesValueRLPOutput();
endMarker.startList();
endMarker.endList();
ACCOUNT_END_MARKER = endMarker.encoded();
}
private final String besuVesion;
private final Lock submissionLock = new ReentrantLock();
private final EthScheduler scheduler;
private final Blockchain blockchain;
private final WorldStateStorage worldStateStorage;
private final BackupStatus backupStatus = new BackupStatus();
private Path backupDir;
private RollingFileWriter accountFileWriter;
public StateBackupService(
final String besuVesion,
final Blockchain blockchain,
final Path backupDir,
final EthScheduler scheduler,
final WorldStateStorage worldStateStorage) {
this.besuVesion = besuVesion;
this.blockchain = blockchain;
this.backupDir = backupDir;
this.scheduler = scheduler;
this.worldStateStorage = worldStateStorage;
}
public Path getBackupDir() {
return backupDir;
}
public BackupStatus requestBackup(
final long block, final boolean compress, final Optional<Path> backupDir) {
boolean requestAccepted = false;
try {
if (submissionLock.tryLock(100, TimeUnit.MILLISECONDS)) {
try {
if (!backupStatus.isBackingUp()) {
requestAccepted = true;
this.backupDir = backupDir.orElse(this.backupDir);
backupStatus.targetBlock = block;
backupStatus.compressed = compress;
backupStatus.currentAccount = Bytes32.ZERO;
scheduler.scheduleComputationTask(
() -> {
try {
return backup(block, compress);
} catch (final IOException ioe) {
LOG.error("Error writing backups", ioe);
return backupStatus;
}
});
}
} finally {
submissionLock.unlock();
}
}
} catch (final InterruptedException e) {
// ignore
}
backupStatus.requestAccepted = requestAccepted;
return backupStatus;
}
public static Path dataFileToIndex(final Path dataName) {
return Path.of(dataName.toString().replaceAll("(.*)[-.]\\d\\d\\d\\d\\.(.)dat", "$1.$2idx"));
}
public static Path accountFileName(
final Path backupDir,
final long targetBlock,
final int fileNumber,
final boolean compressed) {
return backupDir.resolve(
String.format(
"besu-account-backup-%08d-%04d.%sdat",
targetBlock, fileNumber, compressed ? "c" : "r"));
}
public static Path headerFileName(
final Path backupDir, final int fileNumber, final boolean compressed) {
return backupDir.resolve(
String.format("besu-header-backup-%04d.%sdat", fileNumber, compressed ? "c" : "r"));
}
public static Path bodyFileName(
final Path backupDir, final int fileNumber, final boolean compressed) {
return backupDir.resolve(
String.format("besu-body-backup-%04d.%sdat", fileNumber, compressed ? "c" : "r"));
}
public static Path receiptFileName(
final Path backupDir, final int fileNumber, final boolean compressed) {
return backupDir.resolve(
String.format("besu-receipt-backup-%04d.%sdat", fileNumber, compressed ? "c" : "r"));
}
private Path accountFileName(final int fileNumber, final boolean compressed) {
return accountFileName(backupDir, backupStatus.targetBlock, fileNumber, compressed);
}
private Path headerFileName(final int fileNumber, final boolean compressed) {
return headerFileName(backupDir, fileNumber, compressed);
}
private Path bodyFileName(final int fileNumber, final boolean compressed) {
return bodyFileName(backupDir, fileNumber, compressed);
}
private Path receiptFileName(final int fileNumber, final boolean compressed) {
return receiptFileName(backupDir, fileNumber, compressed);
}
private BackupStatus backup(final long block, final boolean compress) throws IOException {
try {
checkArgument(
block >= 0 && block <= blockchain.getChainHeadBlockNumber(),
"Backup Block must be within blockchain");
backupStatus.targetBlock = block;
backupStatus.compressed = compress;
backupStatus.currentAccount = Bytes32.ZERO;
backupChaindata();
backupLeaves();
writeManifest();
return backupStatus;
} catch (final Throwable t) {
LOG.error("Unexpected error", t);
throw t;
}
}
private void writeManifest() throws IOException {
final Map<String, Object> manifest = new HashMap<>();
manifest.put("clientVersion", besuVesion);
manifest.put("compressed", backupStatus.compressed);
manifest.put("targetBlock", backupStatus.targetBlock);
manifest.put("accountCount", backupStatus.accountCount);
Files.write(
backupDir.resolve("besu-backup-manifest.json"),
JsonUtil.getJson(manifest).getBytes(StandardCharsets.UTF_8));
}
private void backupLeaves() throws IOException {
final Optional<BlockHeader> header = blockchain.getBlockHeader(backupStatus.targetBlock);
if (header.isEmpty()) {
backupStatus.currentAccount = null;
return;
}
final Optional<Bytes> worldStateRoot =
worldStateStorage.getAccountStateTrieNode(header.get().getStateRoot());
if (worldStateRoot.isEmpty()) {
backupStatus.currentAccount = null;
return;
}
try (final RollingFileWriter accountFileWriter =
new RollingFileWriter(this::accountFileName, backupStatus.compressed)) {
this.accountFileWriter = accountFileWriter;
final StoredMerklePatriciaTrie<Bytes32, Bytes> accountTrie =
new StoredMerklePatriciaTrie<>(
worldStateStorage::getAccountStateTrieNode,
header.get().getStateRoot(),
Function.identity(),
Function.identity());
accountTrie.visitLeafs(this::visitAccount);
backupStatus.currentAccount = null;
}
}
private TrieIterator.State visitAccount(final Bytes32 nodeKey, final Node<Bytes> node) {
if (node.getValue().isEmpty()) {
return State.CONTINUE;
}
backupStatus.currentAccount = nodeKey;
final Bytes nodeValue = node.getValue().orElse(Hash.EMPTY);
final StateTrieAccountValue account =
StateTrieAccountValue.readFrom(new BytesValueRLPInput(nodeValue, false));
final Bytes code = worldStateStorage.getCode(account.getCodeHash()).orElse(Bytes.EMPTY);
backupStatus.codeSize.addAndGet(code.size());
final BytesValueRLPOutput accountOutput = new BytesValueRLPOutput();
accountOutput.startList();
accountOutput.writeBytes(nodeKey); // trie hash
accountOutput.writeBytes(nodeValue); // account rlp
accountOutput.writeBytes(code); // code
accountOutput.endList();
try {
accountFileWriter.writeBytes(accountOutput.encoded().toArrayUnsafe());
} catch (final IOException ioe) {
LOG.error("Failure writing backup", ioe);
return State.STOP;
}
// storage is written for each leaf, otherwise the whole trie would have to fit in memory
final StoredMerklePatriciaTrie<Bytes32, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
worldStateStorage::getAccountStateTrieNode,
account.getStorageRoot(),
Function.identity(),
Function.identity());
storageTrie.visitLeafs(
(storageKey, storageValue) ->
visitAccountStorage(storageKey, storageValue, accountFileWriter));
try {
accountFileWriter.writeBytes(ACCOUNT_END_MARKER.toArrayUnsafe());
} catch (final IOException ioe) {
LOG.error("Failure writing backup", ioe);
return State.STOP;
}
backupStatus.accountCount.incrementAndGet();
return State.CONTINUE;
}
private void backupChaindata() throws IOException {
try (final RollingFileWriter headerWriter =
new RollingFileWriter(this::headerFileName, backupStatus.compressed);
final RollingFileWriter bodyWriter =
new RollingFileWriter(this::bodyFileName, backupStatus.compressed);
final RollingFileWriter receiptsWriter =
new RollingFileWriter(this::receiptFileName, backupStatus.compressed)) {
for (int blockNumber = 0; blockNumber <= backupStatus.targetBlock; blockNumber++) {
final Optional<Block> block = blockchain.getBlockByNumber(blockNumber);
checkState(
block.isPresent(), "Block data for %s was not found in the archive", blockNumber);
final Optional<List<TransactionReceipt>> receipts =
blockchain.getTxReceipts(block.get().getHash());
checkState(
receipts.isPresent(), "Receipts for %s was not found in the archive", blockNumber);
final BytesValueRLPOutput headerOutput = new BytesValueRLPOutput();
block.get().getHeader().writeTo(headerOutput);
headerWriter.writeBytes(headerOutput.encoded().toArrayUnsafe());
final BytesValueRLPOutput bodyOutput = new BytesValueRLPOutput();
block.get().getBody().writeTo(bodyOutput);
bodyWriter.writeBytes(bodyOutput.encoded().toArrayUnsafe());
final BytesValueRLPOutput receiptsOutput = new BytesValueRLPOutput();
receiptsOutput.writeList(receipts.get(), TransactionReceipt::writeToWithRevertReason);
receiptsWriter.writeBytes(receiptsOutput.encoded().toArrayUnsafe());
backupStatus.storedBlock = blockNumber;
}
}
}
private TrieIterator.State visitAccountStorage(
final Bytes32 nodeKey, final Node<Bytes> node, final RollingFileWriter accountFileWriter) {
backupStatus.currentStorage = nodeKey;
final BytesValueRLPOutput output = new BytesValueRLPOutput();
output.startList();
output.writeBytes(nodeKey);
output.writeBytes(node.getValue().orElse(Bytes.EMPTY));
output.endList();
try {
accountFileWriter.writeBytes(output.encoded().toArrayUnsafe());
} catch (final IOException ioe) {
LOG.error("Failure writing backup", ioe);
return State.STOP;
}
backupStatus.storageCount.incrementAndGet();
return State.CONTINUE;
}
static class RollingFileWriter implements Closeable {
final BiFunction<Integer, Boolean, Path> filenameGenerator;
final boolean compressed;
int currentSize;
int fileNumber;
FileOutputStream out;
final DataOutputStream index;
RollingFileWriter(
final BiFunction<Integer, Boolean, Path> filenameGenerator, final boolean compressed)
throws FileNotFoundException {
this.filenameGenerator = filenameGenerator;
this.compressed = compressed;
currentSize = 0;
fileNumber = 0;
final Path firstOutputFile = filenameGenerator.apply(fileNumber, compressed);
out = new FileOutputStream(firstOutputFile.toFile());
index = new DataOutputStream(new FileOutputStream(dataFileToIndex(firstOutputFile).toFile()));
}
void writeBytes(final byte[] bytes) throws IOException {
final byte[] finalBytes;
if (compressed) {
finalBytes = Snappy.compress(bytes);
} else {
finalBytes = bytes;
}
int pos = currentSize;
currentSize += finalBytes.length;
if (currentSize > MAX_FILE_SIZE) {
out.close();
out = new FileOutputStream(filenameGenerator.apply(++fileNumber, compressed).toFile());
currentSize = finalBytes.length;
pos = 0;
}
index.writeShort(fileNumber);
index.writeInt(pos);
out.write(finalBytes);
}
@Override
public void close() throws IOException {
out.close();
index.close();
}
}
public static final class BackupStatus {
long targetBlock;
long storedBlock;
boolean compressed;
Bytes32 currentAccount;
Bytes32 currentStorage;
AtomicLong accountCount = new AtomicLong(0);
AtomicLong codeSize = new AtomicLong(0);
AtomicLong storageCount = new AtomicLong(0);
boolean requestAccepted;
@JsonGetter
public String getTargetBlock() {
return "0x" + Long.toHexString(targetBlock);
}
@JsonGetter
public String getStoredBlock() {
return "0x" + Long.toHexString(storedBlock);
}
@JsonGetter
public String getCurrentAccount() {
return currentAccount.toHexString();
}
@JsonGetter
public String getCurrentStorage() {
return currentStorage.toHexString();
}
@JsonGetter
public boolean isBackingUp() {
return currentAccount != null;
}
@JsonIgnore
public long getAccountCount() {
return accountCount.get();
}
@JsonIgnore
public long getCodeSize() {
return codeSize.get();
}
@JsonIgnore
public long getStorageCount() {
return storageCount.get();
}
@JsonIgnore
public Bytes getCurrentAccountBytes() {
return currentAccount;
}
@JsonIgnore
public long getStoredBlockNum() {
return storedBlock;
}
@JsonIgnore
public long getTargetBlockNum() {
return targetBlock;
}
}
}

@ -0,0 +1,68 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.api.query;
import static org.assertj.core.api.Assertions.assertThat;
import java.nio.file.Path;
import org.junit.Test;
public class StateBackupServiceTest {
private final Path backupDir = Path.of("/tmp/backup");
@Test
public void fileIndexRenames() {
assertThat(StateBackupService.dataFileToIndex(Path.of("/tmp/besu-blocks-0000.cdat")).toString())
.isEqualTo("/tmp/besu-blocks.cidx");
assertThat(StateBackupService.dataFileToIndex(Path.of("/tmp/besu.blocks.0000.rdat")).toString())
.isEqualTo("/tmp/besu.blocks.ridx");
}
@Test
public void leafFileName() {
assertThat(StateBackupService.accountFileName(backupDir, 4_000_000, 42, false).toString())
.isEqualTo("/tmp/backup/besu-account-backup-04000000-0042.rdat");
assertThat(StateBackupService.accountFileName(backupDir, 6_000_000, 46, true).toString())
.isEqualTo("/tmp/backup/besu-account-backup-06000000-0046.cdat");
}
@Test
public void headerFileName() {
assertThat(StateBackupService.headerFileName(backupDir, 42, false).toString())
.isEqualTo("/tmp/backup/besu-header-backup-0042.rdat");
assertThat(StateBackupService.headerFileName(backupDir, 46, true).toString())
.isEqualTo("/tmp/backup/besu-header-backup-0046.cdat");
}
@Test
public void bodyFileName() {
assertThat(StateBackupService.bodyFileName(backupDir, 42, false).toString())
.isEqualTo("/tmp/backup/besu-body-backup-0042.rdat");
assertThat(StateBackupService.bodyFileName(backupDir, 46, true).toString())
.isEqualTo("/tmp/backup/besu-body-backup-0046.cdat");
}
@Test
public void receiptFileName() {
assertThat(StateBackupService.receiptFileName(backupDir, 42, false).toString())
.isEqualTo("/tmp/backup/besu-receipt-backup-0042.rdat");
assertThat(StateBackupService.receiptFileName(backupDir, 46, true).toString())
.isEqualTo("/tmp/backup/besu-receipt-backup-0046.cdat");
}
}

@ -19,7 +19,7 @@ import static com.google.common.base.Preconditions.checkArgument;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.MutableBytes;
abstract class CompactEncoding {
public abstract class CompactEncoding {
private CompactEncoding() {}
static final byte LEAF_TERMINATOR = 0x10;

@ -88,4 +88,6 @@ public interface MerklePatriciaTrie<K, V> {
Map<Bytes32, V> entriesFrom(Bytes32 startKeyHash, int limit);
void visitAll(Consumer<Node<V>> visitor);
void visitLeafs(final TrieIterator.LeafHandler<V> handler);
}

@ -0,0 +1,86 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
package org.hyperledger.besu.ethereum.trie;
import java.util.function.BiConsumer;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
public class PersistVisitor<V> implements NodeVisitor<V> {
private int branchNodeCount = 0;
private int extensionNodeCount = 0;
private int leafNodeCount = 0;
private final BiConsumer<Bytes32, Bytes> writer;
public PersistVisitor(final BiConsumer<Bytes32, Bytes> writer) {
this.writer = writer;
}
public Node<V> initialRoot() {
return NullNode.instance();
}
public void persist(final Node<V> root) {
if (root instanceof BranchNode) {
visit((BranchNode<V>) root);
} else if (root instanceof ExtensionNode) {
visit((ExtensionNode<V>) root);
} else if (root instanceof LeafNode) {
visit((LeafNode<V>) root);
} else if (root instanceof NullNode) {
visit((NullNode<V>) root);
}
}
@Override
public void visit(final BranchNode<V> branchNode) {
writer.accept(branchNode.getHash(), branchNode.getRlp());
branchNodeCount++;
branchNode.getChildren().forEach(node -> node.accept(this));
}
@Override
public void visit(final ExtensionNode<V> extensionNode) {
writer.accept(extensionNode.getHash(), extensionNode.getRlp());
extensionNodeCount++;
extensionNode.getChild().accept(this);
}
@Override
public void visit(final LeafNode<V> leafNode) {
writer.accept(leafNode.getHash(), leafNode.getRlp());
leafNodeCount++;
}
@Override
public void visit(final NullNode<V> nullNode) {}
public int getBranchNodeCount() {
return branchNodeCount;
}
public int getExtensionNodeCount() {
return extensionNodeCount;
}
public int getLeafNodeCount() {
return leafNodeCount;
}
}

@ -0,0 +1,227 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie;
import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
public class RestoreVisitor<V> implements PathNodeVisitor<V> {
private final NodeFactory<V> nodeFactory;
private final V value;
private final NodeVisitor<V> persistVisitor;
public RestoreVisitor(
final Function<V, Bytes> valueSerializer,
final V value,
final NodeVisitor<V> persistVisitor) {
this.nodeFactory = new DefaultNodeFactory<>(valueSerializer);
this.value = value;
this.persistVisitor = persistVisitor;
}
@Override
public Node<V> visit(final ExtensionNode<V> extensionNode, final Bytes path) {
final Bytes extensionPath = extensionNode.getPath();
final int commonPathLength = extensionPath.commonPrefixLength(path);
assert commonPathLength < path.size()
: "Visiting path doesn't end with a non-matching terminator";
if (commonPathLength == extensionPath.size()) {
final Node<V> newChild = extensionNode.getChild().accept(this, path.slice(commonPathLength));
return extensionNode.replaceChild(newChild);
}
// path diverges before the end of the extension - create a new branch
final byte leafIndex = path.get(commonPathLength);
final Bytes leafPath = path.slice(commonPathLength + 1);
final byte extensionIndex = extensionPath.get(commonPathLength);
final Node<V> updatedExtension =
extensionNode.replacePath(extensionPath.slice(commonPathLength + 1));
final Node<V> leaf = nodeFactory.createLeaf(leafPath, value);
final Node<V> branch =
nodeFactory.createBranch(leafIndex, leaf, extensionIndex, updatedExtension);
if (commonPathLength > 0) {
return nodeFactory.createExtension(extensionPath.slice(0, commonPathLength), branch);
} else {
return branch;
}
}
@Override
public Node<V> visit(final BranchNode<V> branchNode, final Bytes path) {
assert path.size() > 0 : "Visiting path doesn't end with a non-matching terminator";
BranchNode<V> workingNode = branchNode;
final byte childIndex = path.get(0);
if (childIndex == CompactEncoding.LEAF_TERMINATOR) {
return workingNode.replaceValue(value);
}
for (byte i = 0; i < childIndex; i++) {
workingNode = persistNode(workingNode, i);
}
final Node<V> updatedChild = workingNode.child(childIndex).accept(this, path.slice(1));
return workingNode.replaceChild(childIndex, updatedChild);
}
private BranchNode<V> persistNode(final BranchNode<V> parent, final byte index) {
final Node<V> child = parent.getChildren().get(index);
if (!(child instanceof StoredNode)) {
child.accept(persistVisitor);
final PersistedNode<V> persistedNode =
new PersistedNode<>(null, child.getHash(), child.getRlpRef());
return (BranchNode<V>) parent.replaceChild(index, persistedNode);
} else {
return parent;
}
}
@Override
public Node<V> visit(final LeafNode<V> leafNode, final Bytes path) {
final Bytes leafPath = leafNode.getPath();
final int commonPathLength = leafPath.commonPrefixLength(path);
// Check if the current leaf node should be replaced
if (commonPathLength == leafPath.size() && commonPathLength == path.size()) {
return nodeFactory.createLeaf(leafPath, value);
}
assert commonPathLength < leafPath.size() && commonPathLength < path.size()
: "Should not have consumed non-matching terminator";
// The current leaf path must be split to accommodate the new value.
final byte newLeafIndex = path.get(commonPathLength);
final Bytes newLeafPath = path.slice(commonPathLength + 1);
final byte updatedLeafIndex = leafPath.get(commonPathLength);
final Node<V> updatedLeaf = leafNode.replacePath(leafPath.slice(commonPathLength + 1));
final Node<V> leaf = nodeFactory.createLeaf(newLeafPath, value);
final Node<V> branch =
nodeFactory.createBranch(updatedLeafIndex, updatedLeaf, newLeafIndex, leaf);
if (commonPathLength > 0) {
return nodeFactory.createExtension(leafPath.slice(0, commonPathLength), branch);
} else {
return branch;
}
}
@Override
public Node<V> visit(final NullNode<V> nullNode, final Bytes path) {
return nodeFactory.createLeaf(path, value);
}
static class PersistedNode<V> implements Node<V> {
private final Bytes path;
private final Bytes32 hash;
private final Bytes refRlp;
PersistedNode(final Bytes path, final Bytes32 hash, final Bytes refRlp) {
this.path = path;
this.hash = hash;
this.refRlp = refRlp;
}
/** @return True if the node needs to be persisted. */
@Override
public boolean isDirty() {
return false;
}
/** Marks the node as being modified (needs to be persisted); */
@Override
public void markDirty() {
throw new UnsupportedOperationException(
"A persisted node cannot ever be dirty since it's loaded from storage");
}
@Override
public Node<V> accept(final PathNodeVisitor<V> visitor, final Bytes path) {
// do nothing
return this;
}
@Override
public void accept(final NodeVisitor<V> visitor) {
// do nothing
}
@Override
public Bytes getPath() {
return path;
}
@Override
public Optional<V> getValue() {
throw new UnsupportedOperationException(
"A persisted node cannot have a value, as it's already been restored.");
}
@Override
public List<Node<V>> getChildren() {
return List.of();
}
@Override
public Bytes getRlp() {
throw new UnsupportedOperationException(
"A persisted node cannot have rlp, as it's already been restored.");
}
@Override
public Bytes getRlpRef() {
return refRlp;
}
@Override
public boolean isReferencedByHash() {
// Persisted nodes represent only nodes that are referenced by hash
return true;
}
@Override
public Bytes32 getHash() {
return hash;
}
@Override
public Node<V> replacePath(final Bytes path) {
throw new UnsupportedOperationException(
"A persisted node cannot be replaced, as it's already been restored.");
}
@Override
public void unload() {
throw new UnsupportedOperationException(
"A persisted node cannot be unloaded, as it's already been restored.");
}
@Override
public String print() {
return "PersistedNode:" + "\n\tPath: " + getPath() + "\n\tHash: " + getHash();
}
}
}

@ -102,4 +102,10 @@ public class SimpleMerklePatriciaTrie<K extends Bytes, V> implements MerklePatri
public void visitAll(final Consumer<Node<V>> visitor) {
root.accept(new AllNodesVisitor<>(visitor));
}
@Override
public void visitLeafs(final TrieIterator.LeafHandler<V> handler) {
final TrieIterator<V> visitor = new TrieIterator<>(handler, true);
root.accept(visitor, CompactEncoding.bytesToPath(Bytes32.ZERO));
}
}

@ -34,7 +34,7 @@ public class StorageEntriesCollector<V> implements TrieIterator.LeafHandler<V> {
final Node<V> root, final Bytes32 startKeyHash, final int limit) {
final StorageEntriesCollector<V> entriesCollector =
new StorageEntriesCollector<>(startKeyHash, limit);
final TrieIterator<V> visitor = new TrieIterator<>(entriesCollector);
final TrieIterator<V> visitor = new TrieIterator<>(entriesCollector, false);
root.accept(visitor, CompactEncoding.bytesToPath(startKeyHash));
return entriesCollector.getValues();
}

@ -129,6 +129,12 @@ public class StoredMerklePatriciaTrie<K extends Bytes, V> implements MerklePatri
root.accept(new AllNodesVisitor<>(visitor));
}
@Override
public void visitLeafs(final TrieIterator.LeafHandler<V> handler) {
final TrieIterator<V> visitor = new TrieIterator<>(handler, true);
root.accept(visitor, CompactEncoding.bytesToPath(Bytes32.ZERO));
}
@Override
public Bytes32 getRootHash() {
return root.getHash();

@ -26,9 +26,11 @@ public class TrieIterator<V> implements PathNodeVisitor<V> {
private final Deque<Bytes> paths = new ArrayDeque<>();
private final LeafHandler<V> leafHandler;
private State state = State.SEARCHING;
private final boolean unload;
public TrieIterator(final LeafHandler<V> leafHandler) {
public TrieIterator(final LeafHandler<V> leafHandler, final boolean unload) {
this.leafHandler = leafHandler;
this.unload = unload;
}
@Override
@ -42,6 +44,9 @@ public class TrieIterator<V> implements PathNodeVisitor<V> {
paths.push(node.getPath());
node.getChild().accept(this, remainingPath);
if (unload) {
node.getChild().unload();
}
paths.pop();
return node;
}
@ -60,7 +65,11 @@ public class TrieIterator<V> implements PathNodeVisitor<V> {
paths.push(node.getPath());
for (byte i = iterateFrom; i < BranchNode.RADIX && state.continueIterating(); i++) {
paths.push(Bytes.of(i));
node.child(i).accept(this, remainingPath);
final Node<V> child = node.child(i);
child.accept(this, remainingPath);
if (unload) {
child.unload();
}
paths.pop();
}
paths.pop();

@ -53,7 +53,7 @@ public class TrieIteratorTest {
private final DefaultNodeFactory<String> nodeFactory =
new DefaultNodeFactory<>(this::valueSerializer);
private final TrieIterator<String> iterator = new TrieIterator<>(leafHandler);
private final TrieIterator<String> iterator = new TrieIterator<>(leafHandler, false);
private Bytes valueSerializer(final String value) {
return Bytes.wrap(value.getBytes(StandardCharsets.UTF_8));

Loading…
Cancel
Save