Release Candidate 2023.2.0 ( dev -> main ) (#4399)
* check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * activate epoch * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review "--port flag". * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * Commented golangci. * staged stream sync v1.0 * fix protocol tests * fix spell * remove unused struct * fix rosetta test * add comments and refactor verify sig * add comments, remove extra function * add comment * refactor errors, rename metrics * refactor p2p host creation * fix initsync and host creation * fix short range hash chain * fix beacon node detection for p2p protocol * refactor stream peer cooldown and fix protocol beacon node field * refactor p2p host and routing * fix p2p discovery test issue * add MaxAdvertiseWaitTime to handle advertisements interval and address stream connection issue * terminal print the peer id and proto id * fix boot complete message when node is shut down * add new config option ( ForceReachabilityPublic ) to fix local-net consensus issue * fix self query issue * fix test NewDNSSyncingPeerProvider * [testnet] disable leader rotation * fix discovery issue for legacy sync * add watermark low/high options for p2p connection manager * add test for new conn manager flags * fix dedent * add comment to inform about p2p connection manager options * fix max height issue * add a separate log for get max height error * fix log * feat: triesInMemory flag * fix: panic if TriesInMemory is 1 to 2 * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed timers locks. * Removed fbft locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional logs. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * Fix formatting. * Rebased onto dev. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Fix review comment. * Go mod tidy. * Set to EpochTBD. * Fix tests. * [core] fix state handling of self destruct If a contract self destructs to self and then receives funds within the same transaction, it is possible for its stale state to be saved. This change removes that possibility by checking for deleted state objects before returning them. * Fixed race error. * rpc: add configurable http and `eth_call` timeout * remove default timeouts * store the evm call timeout in rosetta object * [cmd] actually apply ToRPCServerConfig * Removed unused method. * Rotate external leaders on non-beacon chains. * Fix nil panic. * in progress. * in progress. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fixed code review. * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * staged stream sync v1.0 * refactor errors, rename metrics * fix p2p discovery test issue * add watermark low/high options for p2p connection manager * fix dedent * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Go mod tidy. * remove default timeouts * Rotate external leaders on non-beacon chains. * Fix nil panic. * Fixes. * Update singleton.go * evm: don't return extcode for validators Due to technical debt, validator information is stored in the code field of the address. The code field can be accessed in Solidity for an arbitrary address using `extcodesize`, `extcodehash`, and `extcodecopy` or helper commands (such as `address.code.Length`). The presence of this field is used by contract developers to (erroneously) deny smart contract access to other smart contracts (and therefore, validators). This PR fixes that oversight by returning the same values as other EOAs for known validator addresses. Obviously, it needs a hard fork that will be scheduled separately. * Fix context passing. * Clean up code. * Removed engine dependency. * Fix possible panic. * Clean up code. * Network type. * Fix tests. * Revert "Removed engine dependency." (#4392) * Revert "Fix tests." This reverts commitpull/4420/head v2023.2.0597ba2d6f1
. * Revert "Network type." This reverts commit5e1878aedc
. * Revert "Clean up code." This reverts commit15885f4c9b
. * Revert "Fix possible panic." This reverts commit1a70d5eb66
. * Revert "Removed engine dependency." This reverts commit8c2ff803f7
. * gitignore the cache folder (#4389) * stable localnet with external validator (#4388) * stable localnet with external validator * ignore deploy config file comments * reduce node launched in localnet * update makefile * localnet configuration - add more fn * fix validator information command typo * Configurable tx pool. (#4240) * AccountQueue & GlobalQueue. * Lifetime duration. * [pool] make flags configurable * [pool] use 4096 as default `GlobalSlots` * [rosetta] update default values of tx pool * [test] update value to default * PriceLimit and PriceBump. * Fix tests. * Fix price limit & bump. * Updated, fixed migrate version and tests. * Rebased. * Fix go toml version. --------- Co-authored-by: Konstantin <k.potapov@softpro.com> Co-authored-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> * Upgrade rawdb and statedb codes to add the latest functionalities of ethdb (#4374) * added bloom filter * upgrade rawdb and statedb * change var name and remove extra comments * return back fake storage in case if we need it for test later * add the previous change back * remove some extra entries from go.mod * fix WritePreimages to use batch * mark unused functions which are ported over from eth --------- Co-authored-by: Casey Gardiner <117784577+ONECasey@users.noreply.github.com> * update all ethereum rawdb pointers to use harmony rawdb (#4395) * Fix reduce node dependencies. (#4379) * Fix. * Fix. * Fix pool init. * Clean up. * add prefix for contract code (#4397) * Rotate external validators for non-beacon shards. (#4373) * Rotate only non beacon shards. * Rotate all shards, but only hmy validators for beacon. * Fix type. * Revert "Fix type." This reverts commit 0a8b506c763d9f8609abff7395ba32b18e43b149. * Revert "Rotate all shards, but only hmy validators for beacon." This reverts commit 70b09e2de81aa2cbffae3ccdfd4e334e7d938759. * Fixed failed test. * Revert "Revert "Rotate all shards, but only hmy validators for beacon."" This reverts commit 66cfaa9817488be60ed5b5cfee1fe833ede237c8. * Frequency by slots count. * Fix config. * First validator produce rest blocks. * Updated. * Add lock. * Add prefix for validator wrapper (#4402) * add separate prefix for validator wrapper * update comments * make read/write backward compatible * add validator codes to stats * goimports * goimports accessor_state * add snapshot feature to state db (#4406) * Typed cache & Node cleanup. (#4409) * Channels usage through methods. * Fix retry count. Removed proposedBlock. * keysToAddrs rewritten to lrucache. * core, internal/configs: HIP28-v2 fee collection (#4410) * core, internal/configs: HIP28-v2 fee collection Based on the Snapshot vote that has passed, collect 50% of the fee to a community maintained account and the remainder to an account used to pay for infrastructure costs. Note that these accounts need to be decided and set in the code at this moment, and the feature needs to be activated by setting the `FeeCollectEpoch` of the `ChainConfig` object. The implementation for devnet is a bit different than compared to others because the feature was activated on devnet with 100% collection to an account. I have handled this case separately in `devnet.go`. * test: add test for StateTransition.ApplyMessage The objective of this unit test is to check that the fees of a transaction are appropriately credited to the fee collectors that are set. This means, for a transaction of 21,000 gas limit and 100 gwei gas price, two equal fee collectors get 10,500 * 100 gwei each. In addition, to be clear that the refund mechanism (in case a transaction with extra gas comes in) works, the tested transaction has a 50,000 gas limit of which only 21,000 gas limit is actually consumed. * sharding/config: clarify local fee collector pk * sharding/config: set testnet fee collector same as devnet * test: add test for truncated fee distribution * sharding/config: set fee collector addresses * test: hardcode the expected fee collected * goimports * params/config: set testnet fee collect epoch Schedule testnet hard fork epoch to be 1296, which begins around the time 2023-04-28 07:14:20+00:00 * params/config: schedule devnee fee collection Signed-off-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> * Minor: removed time.Sleep from tests. (#4412) * Provide current time as argument. * Fix test. * Fix naming. * Mainnet Release Candidate 2023.1.2 (#4376) (#4414) * remove default timeouts * store the evm call timeout in rosetta object * [cmd] actually apply ToRPCServerConfig * Removed unused method. * Rotate external leaders on non-beacon chains. * Fix nil panic. * Bump github.com/aws/aws-sdk-go from 1.33.0 to 1.34.0 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.33.0 to 1.34.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/v1.34.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.33.0...v1.34.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production ... * Bump github.com/ipld/go-ipld-prime from 0.9.0 to 0.19.0 Bumps [github.com/ipld/go-ipld-prime](https://github.com/ipld/go-ipld-prime) from 0.9.0 to 0.19.0. - [Release notes](https://github.com/ipld/go-ipld-prime/releases) - [Changelog](https://github.com/ipld/go-ipld-prime/blob/master/CHANGELOG.md) - [Commits](https://github.com/ipld/go-ipld-prime/compare/v0.9.0...v0.19.0) --- updated-dependencies: - dependency-name: github.com/ipld/go-ipld-prime dependency-type: indirect ... * Bump golang.org/x/net from 0.3.0 to 0.7.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.3.0 to 0.7.0. - [Release notes](https://github.com/golang/net/releases) - [Commits](https://github.com/golang/net/compare/v0.3.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... * Small fixes. * in progress. * in progress. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * activate epoch * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review "--port flag". * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * Commented golangci. * staged stream sync v1.0 * fix protocol tests * fix spell * remove unused struct * fix rosetta test * add comments and refactor verify sig * add comments, remove extra function * add comment * refactor errors, rename metrics * refactor p2p host creation * fix initsync and host creation * fix short range hash chain * fix beacon node detection for p2p protocol * refactor stream peer cooldown and fix protocol beacon node field * refactor p2p host and routing * fix p2p discovery test issue * add MaxAdvertiseWaitTime to handle advertisements interval and address stream connection issue * terminal print the peer id and proto id * fix boot complete message when node is shut down * add new config option ( ForceReachabilityPublic ) to fix local-net consensus issue * fix self query issue * fix test NewDNSSyncingPeerProvider * [testnet] disable leader rotation * fix discovery issue for legacy sync * add watermark low/high options for p2p connection manager * add test for new conn manager flags * fix dedent * add comment to inform about p2p connection manager options * fix max height issue * add a separate log for get max height error * fix log * feat: triesInMemory flag * fix: panic if TriesInMemory is 1 to 2 * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed timers locks. * Removed fbft locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional logs. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * Fix formatting. * Rebased onto dev. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Fix review comment. * Go mod tidy. * Set to EpochTBD. * Fix tests. * [core] fix state handling of self destruct If a contract self destructs to self and then receives funds within the same transaction, it is possible for its stale state to be saved. This change removes that possibility by checking for deleted state objects before returning them. * Fixed race error. * rpc: add configurable http and `eth_call` timeout * remove default timeouts * store the evm call timeout in rosetta object * [cmd] actually apply ToRPCServerConfig * Removed unused method. * Rotate external leaders on non-beacon chains. * Fix nil panic. * in progress. * in progress. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fixed code review. * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * staged stream sync v1.0 * refactor errors, rename metrics * fix p2p discovery test issue * add watermark low/high options for p2p connection manager * fix dedent * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Go mod tidy. * remove default timeouts * Rotate external leaders on non-beacon chains. * Fix nil panic. * Fixes. * Update singleton.go * evm: don't return extcode for validators Due to technical debt, validator information is stored in the code field of the address. The code field can be accessed in Solidity for an arbitrary address using `extcodesize`, `extcodehash`, and `extcodecopy` or helper commands (such as `address.code.Length`). The presence of this field is used by contract developers to (erroneously) deny smart contract access to other smart contracts (and therefore, validators). This PR fixes that oversight by returning the same values as other EOAs for known validator addresses. Obviously, it needs a hard fork that will be scheduled separately. * Fix context passing. * Clean up code. * Removed engine dependency. * Fix possible panic. * Clean up code. * Network type. * Fix tests. * Revert "Removed engine dependency." (#4392) * Revert "Fix tests." This reverts commit597ba2d6f1
. * Revert "Network type." This reverts commit5e1878aedc
. * Revert "Clean up code." This reverts commit15885f4c9b
. * Revert "Fix possible panic." This reverts commit1a70d5eb66
. * Revert "Removed engine dependency." This reverts commit8c2ff803f7
. * gitignore the cache folder (#4389) * stable localnet with external validator (#4388) * stable localnet with external validator * ignore deploy config file comments * reduce node launched in localnet * update makefile * localnet configuration - add more fn * fix validator information command typo --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: Casey Gardiner <117784577+ONECasey@users.noreply.github.com> Co-authored-by: frozen <355847+Frozen@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: “GheisMohammadi” <“Gheis.Mohammadi@gmail.com”> Co-authored-by: “GheisMohammadi” <36589218+GheisMohammadi@users.noreply.github.com> Co-authored-by: Sun Hyuk Ahn <sunhyukahn@Suns-MacBook-Pro.local> Co-authored-by: Soph <35721420+sophoah@users.noreply.github.com> * chore: merge `main` into `dev` (#4415) * Mainnet Release Candidate 2023.1.2 (#4376) * remove default timeouts * store the evm call timeout in rosetta object * [cmd] actually apply ToRPCServerConfig * Removed unused method. * Rotate external leaders on non-beacon chains. * Fix nil panic. * Bump github.com/aws/aws-sdk-go from 1.33.0 to 1.34.0 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.33.0 to 1.34.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/v1.34.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.33.0...v1.34.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> * Bump github.com/ipld/go-ipld-prime from 0.9.0 to 0.19.0 Bumps [github.com/ipld/go-ipld-prime](https://github.com/ipld/go-ipld-prime) from 0.9.0 to 0.19.0. - [Release notes](https://github.com/ipld/go-ipld-prime/releases) - [Changelog](https://github.com/ipld/go-ipld-prime/blob/master/CHANGELOG.md) - [Commits](https://github.com/ipld/go-ipld-prime/compare/v0.9.0...v0.19.0) --- updated-dependencies: - dependency-name: github.com/ipld/go-ipld-prime dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> * Bump golang.org/x/net from 0.3.0 to 0.7.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.3.0 to 0.7.0. - [Release notes](https://github.com/golang/net/releases) - [Commits](https://github.com/golang/net/compare/v0.3.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> * Small fixes. * in progress. * in progress. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * activate epoch * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review "--port flag". * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * Commented golangci. * staged stream sync v1.0 * fix protocol tests * fix spell * remove unused struct * fix rosetta test * add comments and refactor verify sig * add comments, remove extra function * add comment * refactor errors, rename metrics * refactor p2p host creation * fix initsync and host creation * fix short range hash chain * fix beacon node detection for p2p protocol * refactor stream peer cooldown and fix protocol beacon node field * refactor p2p host and routing * fix p2p discovery test issue * add MaxAdvertiseWaitTime to handle advertisements interval and address stream connection issue * terminal print the peer id and proto id * fix boot complete message when node is shut down * add new config option ( ForceReachabilityPublic ) to fix local-net consensus issue * fix self query issue * fix test NewDNSSyncingPeerProvider * [testnet] disable leader rotation * fix discovery issue for legacy sync * add watermark low/high options for p2p connection manager * add test for new conn manager flags * fix dedent * add comment to inform about p2p connection manager options * fix max height issue * add a separate log for get max height error * fix log * feat: triesInMemory flag * fix: panic if TriesInMemory is 1 to 2 * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed timers locks. * Removed fbft locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional logs. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * Fix formatting. * Rebased onto dev. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Fix review comment. * Go mod tidy. * Set to EpochTBD. * Fix tests. * [core] fix state handling of self destruct If a contract self destructs to self and then receives funds within the same transaction, it is possible for its stale state to be saved. This change removes that possibility by checking for deleted state objects before returning them. * Fixed race error. * rpc: add configurable http and `eth_call` timeout * remove default timeouts * store the evm call timeout in rosetta object * [cmd] actually apply ToRPCServerConfig * Removed unused method. * Rotate external leaders on non-beacon chains. * Fix nil panic. * in progress. * in progress. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fixed code review. * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * staged stream sync v1.0 * refactor errors, rename metrics * fix p2p discovery test issue * add watermark low/high options for p2p connection manager * fix dedent * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Go mod tidy. * remove default timeouts * Rotate external leaders on non-beacon chains. * Fix nil panic. * Fixes. * Update singleton.go * evm: don't return extcode for validators Due to technical debt, validator information is stored in the code field of the address. The code field can be accessed in Solidity for an arbitrary address using `extcodesize`, `extcodehash`, and `extcodecopy` or helper commands (such as `address.code.Length`). The presence of this field is used by contract developers to (erroneously) deny smart contract access to other smart contracts (and therefore, validators). This PR fixes that oversight by returning the same values as other EOAs for known validator addresses. Obviously, it needs a hard fork that will be scheduled separately. * Fix context passing. * Clean up code. * Removed engine dependency. * Fix possible panic. * Clean up code. * Network type. * Fix tests. * Revert "Removed engine dependency." (#4392) * Revert "Fix tests." This reverts commit597ba2d6f1
. * Revert "Network type." This reverts commit5e1878aedc
. * Revert "Clean up code." This reverts commit15885f4c9b
. * Revert "Fix possible panic." This reverts commit1a70d5eb66
. * Revert "Removed engine dependency." This reverts commit8c2ff803f7
. * gitignore the cache folder (#4389) * stable localnet with external validator (#4388) * stable localnet with external validator * ignore deploy config file comments * reduce node launched in localnet * update makefile * localnet configuration - add more fn * fix validator information command typo --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> Co-authored-by: frozen <355847+Frozen@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: “GheisMohammadi” <“Gheis.Mohammadi@gmail.com”> Co-authored-by: “GheisMohammadi” <36589218+GheisMohammadi@users.noreply.github.com> Co-authored-by: Sun Hyuk Ahn <sunhyukahn@Suns-MacBook-Pro.local> Co-authored-by: Soph <35721420+sophoah@users.noreply.github.com> * build: update pinned curl version (#4394) Per the Alpine Linux package repositories, the version for cURL included with v3.16 has changed to revision 6 * consensus: replace type assert with test (#4398) If `consensus.finalityCounter` does not have anything stored (for example in Syncing mode), the `Load()` returns an interface that cannot be automatically asserted to an `int64`. This results in the node crashing. This commit fixes that. * Turn pprof default on with local saved files (#3894) * Turn pprof default on with local saved files * [pprof] change interval from 600s to 3600s * Revert "Turn pprof default on with local saved files (#3894)" (#4400) This reverts commit78d26d7910
. --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: Casey Gardiner <117784577+ONECasey@users.noreply.github.com> Co-authored-by: frozen <355847+Frozen@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: “GheisMohammadi” <“Gheis.Mohammadi@gmail.com”> Co-authored-by: “GheisMohammadi” <36589218+GheisMohammadi@users.noreply.github.com> Co-authored-by: Sun Hyuk Ahn <sunhyukahn@Suns-MacBook-Pro.local> Co-authored-by: Soph <35721420+sophoah@users.noreply.github.com> Co-authored-by: Jacky Wang <jackyw.se@gmail.com> * internal/params: set validator code fix hard forks (#4413) * internal/params: schedule hard forks Signed-off-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> * internal/params: set localnet fee collect epoch 2 Signed-off-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> --------- Signed-off-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> * internal/params: schedule HIP28v2 + val code fix (#4416) Signed-off-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> * Dev fix conflicts. (#4417) * Mainnet Release Candidate 2023.1.2 (#4376) * remove default timeouts * store the evm call timeout in rosetta object * [cmd] actually apply ToRPCServerConfig * Removed unused method. * Rotate external leaders on non-beacon chains. * Fix nil panic. * Bump github.com/aws/aws-sdk-go from 1.33.0 to 1.34.0 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.33.0 to 1.34.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/v1.34.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.33.0...v1.34.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> * Bump github.com/ipld/go-ipld-prime from 0.9.0 to 0.19.0 Bumps [github.com/ipld/go-ipld-prime](https://github.com/ipld/go-ipld-prime) from 0.9.0 to 0.19.0. - [Release notes](https://github.com/ipld/go-ipld-prime/releases) - [Changelog](https://github.com/ipld/go-ipld-prime/blob/master/CHANGELOG.md) - [Commits](https://github.com/ipld/go-ipld-prime/compare/v0.9.0...v0.19.0) --- updated-dependencies: - dependency-name: github.com/ipld/go-ipld-prime dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> * Bump golang.org/x/net from 0.3.0 to 0.7.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.3.0 to 0.7.0. - [Release notes](https://github.com/golang/net/releases) - [Commits](https://github.com/golang/net/compare/v0.3.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> * Small fixes. * in progress. * in progress. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * activate epoch * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review "--port flag". * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * Commented golangci. * staged stream sync v1.0 * fix protocol tests * fix spell * remove unused struct * fix rosetta test * add comments and refactor verify sig * add comments, remove extra function * add comment * refactor errors, rename metrics * refactor p2p host creation * fix initsync and host creation * fix short range hash chain * fix beacon node detection for p2p protocol * refactor stream peer cooldown and fix protocol beacon node field * refactor p2p host and routing * fix p2p discovery test issue * add MaxAdvertiseWaitTime to handle advertisements interval and address stream connection issue * terminal print the peer id and proto id * fix boot complete message when node is shut down * add new config option ( ForceReachabilityPublic ) to fix local-net consensus issue * fix self query issue * fix test NewDNSSyncingPeerProvider * [testnet] disable leader rotation * fix discovery issue for legacy sync * add watermark low/high options for p2p connection manager * add test for new conn manager flags * fix dedent * add comment to inform about p2p connection manager options * fix max height issue * add a separate log for get max height error * fix log * feat: triesInMemory flag * fix: panic if TriesInMemory is 1 to 2 * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed timers locks. * Removed fbft locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional logs. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * Fix formatting. * Rebased onto dev. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * comment activation * 295 epoch * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Fix review comment. * Go mod tidy. * Set to EpochTBD. * Fix tests. * [core] fix state handling of self destruct If a contract self destructs to self and then receives funds within the same transaction, it is possible for its stale state to be saved. This change removes that possibility by checking for deleted state objects before returning them. * Fixed race error. * rpc: add configurable http and `eth_call` timeout * remove default timeouts * store the evm call timeout in rosetta object * [cmd] actually apply ToRPCServerConfig * Removed unused method. * Rotate external leaders on non-beacon chains. * Fix nil panic. * in progress. * in progress. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fixed code review. * Fix review comments. * Returned locks in rotateLeader. * Rebased onto dev. * staged stream sync v1.0 * refactor errors, rename metrics * fix p2p discovery test issue * add watermark low/high options for p2p connection manager * fix dedent * in progress. * consensus check is forked * fix * Cleanup and fix update pub keys. * fix fix fix fix fix * activate epoch * EpochTBD for leader rotation epoch. * 295 epoch * Decider no longer requires public keys as a dependency. (#4289) * Consensus doesn't require anymore `Node` as a circular dependency. * Proper blockchain initialization. * Rwlock consensus. * Removed channels. * Removed view change locks. * Removed multiSigMutex locks. * Removed leader locks. * Removed additional locks and isViewChange. * Added locks detected by race. * Added locks detected by race. * Locks for start. * Removed additional locks. * Removed additional locks. * Make func private. * Make VerifyBlock private. * Make IsLeader private. * Make ParseFBFTMessage private. * Fix remove locks. * Added additional locks. * Added additional locks. * Added readSignatureBitmapPayload locks. * Added HandleMessageUpdate locks. * Added LastMile locks. * Locks for IsValidatorInCommittee. * Fixed locks. * Fixed tests. * Fixed lock. * Rebased over leader rotation. * in progress. * consensus check is forked * update master * fix leader * check leader for N blocks * fix * fix * Cleanup and fix update pub keys. * Rotate leader. * fix fix fix fix fix * Cleaned. * Cache for `GetLeaderPubKeyFromCoinbase`, removed `NthNextHmyExt`. * Fix failed tests. * Fixed code review. * Fix review comments. * Merged leader rotation. * Rebased on dev. * Rebased on dev. * Fix usage of private methods. * Fix usage of private methods. * Fix usage of private methods. * Removed deadcode, LockedFBFTPhase. * Fix review comment. * Go mod tidy. * remove default timeouts * Rotate external leaders on non-beacon chains. * Fix nil panic. * Fixes. * Update singleton.go * evm: don't return extcode for validators Due to technical debt, validator information is stored in the code field of the address. The code field can be accessed in Solidity for an arbitrary address using `extcodesize`, `extcodehash`, and `extcodecopy` or helper commands (such as `address.code.Length`). The presence of this field is used by contract developers to (erroneously) deny smart contract access to other smart contracts (and therefore, validators). This PR fixes that oversight by returning the same values as other EOAs for known validator addresses. Obviously, it needs a hard fork that will be scheduled separately. * Fix context passing. * Clean up code. * Removed engine dependency. * Fix possible panic. * Clean up code. * Network type. * Fix tests. * Revert "Removed engine dependency." (#4392) * Revert "Fix tests." This reverts commit597ba2d6f1
. * Revert "Network type." This reverts commit5e1878aedc
. * Revert "Clean up code." This reverts commit15885f4c9b
. * Revert "Fix possible panic." This reverts commit1a70d5eb66
. * Revert "Removed engine dependency." This reverts commit8c2ff803f7
. * gitignore the cache folder (#4389) * stable localnet with external validator (#4388) * stable localnet with external validator * ignore deploy config file comments * reduce node launched in localnet * update makefile * localnet configuration - add more fn * fix validator information command typo --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> Co-authored-by: frozen <355847+Frozen@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: “GheisMohammadi” <“Gheis.Mohammadi@gmail.com”> Co-authored-by: “GheisMohammadi” <36589218+GheisMohammadi@users.noreply.github.com> Co-authored-by: Sun Hyuk Ahn <sunhyukahn@Suns-MacBook-Pro.local> Co-authored-by: Soph <35721420+sophoah@users.noreply.github.com> * build: update pinned curl version (#4394) Per the Alpine Linux package repositories, the version for cURL included with v3.16 has changed to revision 6 * consensus: replace type assert with test (#4398) If `consensus.finalityCounter` does not have anything stored (for example in Syncing mode), the `Load()` returns an interface that cannot be automatically asserted to an `int64`. This results in the node crashing. This commit fixes that. * Turn pprof default on with local saved files (#3894) * Turn pprof default on with local saved files * [pprof] change interval from 600s to 3600s * Revert "Turn pprof default on with local saved files (#3894)" (#4400) This reverts commit78d26d7910
. * go mod tidy. * Increased wait time. --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: Casey Gardiner <117784577+ONECasey@users.noreply.github.com> Co-authored-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: “GheisMohammadi” <“Gheis.Mohammadi@gmail.com”> Co-authored-by: “GheisMohammadi” <36589218+GheisMohammadi@users.noreply.github.com> Co-authored-by: Sun Hyuk Ahn <sunhyukahn@Suns-MacBook-Pro.local> Co-authored-by: Soph <35721420+sophoah@users.noreply.github.com> Co-authored-by: Jacky Wang <jackyw.se@gmail.com> --------- Signed-off-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: frozen <355847+Frozen@users.noreply.github.com> Co-authored-by: “GheisMohammadi” <“Gheis.Mohammadi@gmail.com”> Co-authored-by: “GheisMohammadi” <36589218+GheisMohammadi@users.noreply.github.com> Co-authored-by: MaxMustermann2 <82761650+MaxMustermann2@users.noreply.github.com> Co-authored-by: Sun Hyuk Ahn <sunhyukahn@Suns-MacBook-Pro.local> Co-authored-by: Soph <35721420+sophoah@users.noreply.github.com> Co-authored-by: Konstantin <k.potapov@softpro.com> Co-authored-by: Gheis Mohammadi <Gheis.Mohammadi@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jacky Wang <jackyw.se@gmail.com>
parent
cdb32b33c4
commit
01691fd3ea
@ -0,0 +1,32 @@ |
||||
package rawdb |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
ethRawDB "github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/harmony-one/harmony/crypto/bls" |
||||
) |
||||
|
||||
func TestLeaderRotationMeta(t *testing.T) { |
||||
db := ethRawDB.NewMemoryDatabase() |
||||
err := WriteLeaderRotationMeta(db, make([]byte, bls.PublicKeySizeInBytes), 1, 2, 3) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
pub, epoch, count, shifts, err := ReadLeaderRotationMeta(db) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if len(pub) != bls.PublicKeySizeInBytes { |
||||
t.Fatal("invalid leader public key size") |
||||
} |
||||
if epoch != 1 { |
||||
t.Fatal("invalid epoch") |
||||
} |
||||
if count != 2 { |
||||
t.Fatal("invalid count") |
||||
} |
||||
if shifts != 3 { |
||||
t.Fatal("invalid shifts") |
||||
} |
||||
} |
@ -0,0 +1,210 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
|
||||
func ReadSnapshotDisabled(db ethdb.KeyValueReader) bool { |
||||
disabled, _ := db.Has(snapshotDisabledKey) |
||||
return disabled |
||||
} |
||||
|
||||
// WriteSnapshotDisabled stores the snapshot pause flag.
|
||||
func WriteSnapshotDisabled(db ethdb.KeyValueWriter) { |
||||
if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store snapshot disabled flag") |
||||
} |
||||
} |
||||
|
||||
// DeleteSnapshotDisabled deletes the flag keeping the snapshot maintenance disabled.
|
||||
func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) { |
||||
if err := db.Delete(snapshotDisabledKey); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot disabled flag") |
||||
} |
||||
} |
||||
|
||||
// ReadSnapshotRoot retrieves the root of the block whose state is contained in
|
||||
// the persisted snapshot.
|
||||
func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { |
||||
data, _ := db.Get(SnapshotRootKey) |
||||
if len(data) != common.HashLength { |
||||
return common.Hash{} |
||||
} |
||||
return common.BytesToHash(data) |
||||
} |
||||
|
||||
// WriteSnapshotRoot stores the root of the block whose state is contained in
|
||||
// the persisted snapshot.
|
||||
func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { |
||||
if err := db.Put(SnapshotRootKey, root[:]); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store snapshot root") |
||||
} |
||||
} |
||||
|
||||
// DeleteSnapshotRoot deletes the hash of the block whose state is contained in
|
||||
// the persisted snapshot. Since snapshots are not immutable, this method can
|
||||
// be used during updates, so a crash or failure will mark the entire snapshot
|
||||
// invalid.
|
||||
func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { |
||||
if err := db.Delete(SnapshotRootKey); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot root") |
||||
} |
||||
} |
||||
|
||||
// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
|
||||
func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||
data, _ := db.Get(accountSnapshotKey(hash)) |
||||
return data |
||||
} |
||||
|
||||
// WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
|
||||
func WriteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash, entry []byte) { |
||||
if err := db.Put(accountSnapshotKey(hash), entry); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store account snapshot") |
||||
} |
||||
} |
||||
|
||||
// DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
|
||||
func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) { |
||||
if err := db.Delete(accountSnapshotKey(hash)); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete account snapshot") |
||||
} |
||||
} |
||||
|
||||
// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
|
||||
func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte { |
||||
data, _ := db.Get(storageSnapshotKey(accountHash, storageHash)) |
||||
return data |
||||
} |
||||
|
||||
// WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
|
||||
func WriteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) { |
||||
if err := db.Put(storageSnapshotKey(accountHash, storageHash), entry); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store storage snapshot") |
||||
} |
||||
} |
||||
|
||||
// DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
|
||||
func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash) { |
||||
if err := db.Delete(storageSnapshotKey(accountHash, storageHash)); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete storage snapshot") |
||||
} |
||||
} |
||||
|
||||
// IterateStorageSnapshots returns an iterator for walking the entire storage
|
||||
// space of a specific account.
|
||||
func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator { |
||||
return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength) |
||||
} |
||||
|
||||
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
|
||||
// the last shutdown. The blob is expected to be max a few 10s of megabytes.
|
||||
func ReadSnapshotJournal(db ethdb.KeyValueReader) []byte { |
||||
data, _ := db.Get(snapshotJournalKey) |
||||
return data |
||||
} |
||||
|
||||
// WriteSnapshotJournal stores the serialized in-memory diff layers to save at
|
||||
// shutdown. The blob is expected to be max a few 10s of megabytes.
|
||||
func WriteSnapshotJournal(db ethdb.KeyValueWriter, journal []byte) { |
||||
if err := db.Put(snapshotJournalKey, journal); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store snapshot journal") |
||||
} |
||||
} |
||||
|
||||
// DeleteSnapshotJournal deletes the serialized in-memory diff layers saved at
|
||||
// the last shutdown
|
||||
func DeleteSnapshotJournal(db ethdb.KeyValueWriter) { |
||||
if err := db.Delete(snapshotJournalKey); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot journal") |
||||
} |
||||
} |
||||
|
||||
// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
|
||||
// the last shutdown.
|
||||
func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte { |
||||
data, _ := db.Get(snapshotGeneratorKey) |
||||
return data |
||||
} |
||||
|
||||
// WriteSnapshotGenerator stores the serialized snapshot generator to save at
|
||||
// shutdown.
|
||||
func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) { |
||||
if err := db.Put(snapshotGeneratorKey, generator); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store snapshot generator") |
||||
} |
||||
} |
||||
|
||||
// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
|
||||
// the last shutdown
|
||||
func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) { |
||||
if err := db.Delete(snapshotGeneratorKey); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot generator") |
||||
} |
||||
} |
||||
|
||||
// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
|
||||
// snapshot layer.
|
||||
func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 { |
||||
data, _ := db.Get(snapshotRecoveryKey) |
||||
if len(data) == 0 { |
||||
return nil |
||||
} |
||||
if len(data) != 8 { |
||||
return nil |
||||
} |
||||
number := binary.BigEndian.Uint64(data) |
||||
return &number |
||||
} |
||||
|
||||
// WriteSnapshotRecoveryNumber stores the block number of the last persisted
|
||||
// snapshot layer.
|
||||
func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) { |
||||
var buf [8]byte |
||||
binary.BigEndian.PutUint64(buf[:], number) |
||||
if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store snapshot recovery number") |
||||
} |
||||
} |
||||
|
||||
// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
|
||||
// snapshot layer.
|
||||
func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) { |
||||
if err := db.Delete(snapshotRecoveryKey); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot recovery number") |
||||
} |
||||
} |
||||
|
||||
// ReadSnapshotSyncStatus retrieves the serialized sync status saved at shutdown.
|
||||
func ReadSnapshotSyncStatus(db ethdb.KeyValueReader) []byte { |
||||
data, _ := db.Get(snapshotSyncStatusKey) |
||||
return data |
||||
} |
||||
|
||||
// WriteSnapshotSyncStatus stores the serialized sync status to save at shutdown.
|
||||
func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) { |
||||
if err := db.Put(snapshotSyncStatusKey, status); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store snapshot sync status") |
||||
} |
||||
} |
@ -0,0 +1,149 @@ |
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// ReadPreimage retrieves a single preimage of the provided hash.
|
||||
func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||
data, _ := db.Get(preimageKey(hash)) |
||||
return data |
||||
} |
||||
|
||||
// WritePreimages writes the provided set of preimages to the database.
|
||||
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) error { |
||||
for hash, preimage := range preimages { |
||||
if err := db.Put(preimageKey(hash), preimage); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store trie preimage") |
||||
} |
||||
} |
||||
preimageCounter.Inc(int64(len(preimages))) |
||||
preimageHitCounter.Inc(int64(len(preimages))) |
||||
return nil |
||||
} |
||||
|
||||
// ReadCode retrieves the contract code of the provided code hash.
|
||||
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||
// Try with the prefixed code scheme first, if not then try with legacy
|
||||
// scheme.
|
||||
data := ReadCodeWithPrefix(db, hash) |
||||
if len(data) != 0 { |
||||
return data |
||||
} |
||||
data, _ = db.Get(hash.Bytes()) |
||||
return data |
||||
} |
||||
|
||||
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
|
||||
// The main difference between this function and ReadCode is this function
|
||||
// will only check the existence with latest scheme(with prefix).
|
||||
func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||
data, _ := db.Get(codeKey(hash)) |
||||
return data |
||||
} |
||||
|
||||
// HasCode checks if the contract code corresponding to the
|
||||
// provided code hash is present in the db.
|
||||
func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||
// Try with the prefixed code scheme first, if not then try with legacy
|
||||
// scheme.
|
||||
if ok := HasCodeWithPrefix(db, hash); ok { |
||||
return true |
||||
} |
||||
ok, _ := db.Has(hash.Bytes()) |
||||
return ok |
||||
} |
||||
|
||||
// HasCodeWithPrefix checks if the contract code corresponding to the
|
||||
// provided code hash is present in the db. This function will only check
|
||||
// presence using the prefix-scheme.
|
||||
func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||
ok, _ := db.Has(codeKey(hash)) |
||||
return ok |
||||
} |
||||
|
||||
// WriteCode writes the provided contract code database.
|
||||
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { |
||||
if err := db.Put(codeKey(hash), code); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store contract code") |
||||
} |
||||
} |
||||
|
||||
// DeleteCode deletes the specified contract code from the database.
|
||||
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { |
||||
if err := db.Delete(codeKey(hash)); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete contract code") |
||||
} |
||||
} |
||||
|
||||
// ReadValidatorCode retrieves the validator code of the provided code hash.
|
||||
func ReadValidatorCode(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||
// Try with the prefixed code scheme first, if not then try with legacy
|
||||
// scheme.
|
||||
data := ReadValidatorCodeWithPrefix(db, hash) |
||||
if len(data) != 0 { |
||||
return data |
||||
} |
||||
data, _ = db.Get(hash.Bytes()) |
||||
return data |
||||
} |
||||
|
||||
// ReadValidatorCodeWithPrefix retrieves the validator code of the provided code hash.
|
||||
// The main difference between this function and ReadValidatorCode is this function
|
||||
// will only check the existence with latest scheme(with prefix).
|
||||
func ReadValidatorCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||
data, _ := db.Get(validatorCodeKey(hash)) |
||||
return data |
||||
} |
||||
|
||||
// HasValidatorCode checks if the validator code corresponding to the
|
||||
// provided code hash is present in the db.
|
||||
func HasValidatorCode(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||
// Try with the prefixed code scheme first, if not then try with legacy
|
||||
// scheme.
|
||||
if ok := HasValidatorCodeWithPrefix(db, hash); ok { |
||||
return true |
||||
} |
||||
ok, _ := db.Has(hash.Bytes()) |
||||
return ok |
||||
} |
||||
|
||||
// HasValidatorCodeWithPrefix checks if the validator code corresponding to the
|
||||
// provided code hash is present in the db. This function will only check
|
||||
// presence using the prefix-scheme.
|
||||
func HasValidatorCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||
ok, _ := db.Has(validatorCodeKey(hash)) |
||||
return ok |
||||
} |
||||
|
||||
// WriteValidatorCode writes the provided validator code to database.
|
||||
func WriteValidatorCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { |
||||
if err := db.Put(validatorCodeKey(hash), code); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store validator code") |
||||
} |
||||
} |
||||
|
||||
// DeleteValidatorCode deletes the specified validator code from the database.
|
||||
func DeleteValidatorCode(db ethdb.KeyValueWriter, hash common.Hash) { |
||||
if err := db.Delete(validatorCodeKey(hash)); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete validator code") |
||||
} |
||||
} |
@ -0,0 +1,80 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"bytes" |
||||
|
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
|
||||
func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte { |
||||
data, _ := db.Get(skeletonSyncStatusKey) |
||||
return data |
||||
} |
||||
|
||||
// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
|
||||
func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) { |
||||
if err := db.Put(skeletonSyncStatusKey, status); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store skeleton sync status") |
||||
} |
||||
} |
||||
|
||||
// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
|
||||
// shutdown
|
||||
func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) { |
||||
if err := db.Delete(skeletonSyncStatusKey); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to remove skeleton sync status") |
||||
} |
||||
} |
||||
|
||||
// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
|
||||
func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header { |
||||
data, _ := db.Get(skeletonHeaderKey(number)) |
||||
if len(data) == 0 { |
||||
return nil |
||||
} |
||||
header := new(types.Header) |
||||
if err := rlp.Decode(bytes.NewReader(data), header); err != nil { |
||||
utils.Logger().Error().Err(err).Uint64("number", number).Msg("Invalid skeleton header RLP") |
||||
return nil |
||||
} |
||||
return header |
||||
} |
||||
|
||||
// WriteSkeletonHeader stores a block header into the skeleton sync store.
|
||||
func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) { |
||||
data, err := rlp.EncodeToBytes(header) |
||||
if err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to RLP encode header") |
||||
} |
||||
key := skeletonHeaderKey(header.Number.Uint64()) |
||||
if err := db.Put(key, data); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store skeleton header") |
||||
} |
||||
} |
||||
|
||||
// DeleteSkeletonHeader removes all block header data associated with a hash.
|
||||
func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) { |
||||
if err := db.Delete(skeletonHeaderKey(number)); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete skeleton header") |
||||
} |
||||
} |
@ -0,0 +1,263 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sync" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"golang.org/x/crypto/sha3" |
||||
) |
||||
|
||||
// HashScheme is the legacy hash-based state scheme with which trie nodes are
|
||||
// stored in the disk with node hash as the database key. The advantage of this
|
||||
// scheme is that different versions of trie nodes can be stored in disk, which
|
||||
// is very beneficial for constructing archive nodes. The drawback is it will
|
||||
// store different trie nodes on the same path to different locations on the disk
|
||||
// with no data locality, and it's unfriendly for designing state pruning.
|
||||
//
|
||||
// Now this scheme is still kept for backward compatibility, and it will be used
|
||||
// for archive node and some other tries(e.g. light trie).
|
||||
const HashScheme = "hashScheme" |
||||
|
||||
// PathScheme is the new path-based state scheme with which trie nodes are stored
|
||||
// in the disk with node path as the database key. This scheme will only store one
|
||||
// version of state data in the disk, which means that the state pruning operation
|
||||
// is native. At the same time, this scheme will put adjacent trie nodes in the same
|
||||
// area of the disk with good data locality property. But this scheme needs to rely
|
||||
// on extra state diffs to survive deep reorg.
|
||||
const PathScheme = "pathScheme" |
||||
|
||||
// nodeHasher used to derive the hash of trie node.
|
||||
type nodeHasher struct{ sha crypto.KeccakState } |
||||
|
||||
var hasherPool = sync.Pool{ |
||||
New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, |
||||
} |
||||
|
||||
func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) } |
||||
func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) } |
||||
|
||||
func (h *nodeHasher) hashData(data []byte) (n common.Hash) { |
||||
h.sha.Reset() |
||||
h.sha.Write(data) |
||||
h.sha.Read(n[:]) |
||||
return n |
||||
} |
||||
|
||||
// ReadAccountTrieNode retrieves the account trie node and the associated node
|
||||
// hash with the specified node path.
|
||||
func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) { |
||||
data, err := db.Get(accountTrieNodeKey(path)) |
||||
if err != nil { |
||||
return nil, common.Hash{} |
||||
} |
||||
hasher := newNodeHasher() |
||||
defer returnHasherToPool(hasher) |
||||
return data, hasher.hashData(data) |
||||
} |
||||
|
||||
// HasAccountTrieNode checks the account trie node presence with the specified
|
||||
// node path and the associated node hash.
|
||||
func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool { |
||||
data, err := db.Get(accountTrieNodeKey(path)) |
||||
if err != nil { |
||||
return false |
||||
} |
||||
hasher := newNodeHasher() |
||||
defer returnHasherToPool(hasher) |
||||
return hasher.hashData(data) == hash |
||||
} |
||||
|
||||
// WriteAccountTrieNode writes the provided account trie node into database.
|
||||
func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { |
||||
if err := db.Put(accountTrieNodeKey(path), node); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store account trie node") |
||||
} |
||||
} |
||||
|
||||
// DeleteAccountTrieNode deletes the specified account trie node from the database.
|
||||
func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) { |
||||
if err := db.Delete(accountTrieNodeKey(path)); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete account trie node") |
||||
} |
||||
} |
||||
|
||||
// ReadStorageTrieNode retrieves the storage trie node and the associated node
|
||||
// hash with the specified node path.
|
||||
func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) { |
||||
data, err := db.Get(storageTrieNodeKey(accountHash, path)) |
||||
if err != nil { |
||||
return nil, common.Hash{} |
||||
} |
||||
hasher := newNodeHasher() |
||||
defer returnHasherToPool(hasher) |
||||
return data, hasher.hashData(data) |
||||
} |
||||
|
||||
// HasStorageTrieNode checks the storage trie node presence with the provided
|
||||
// node path and the associated node hash.
|
||||
func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool { |
||||
data, err := db.Get(storageTrieNodeKey(accountHash, path)) |
||||
if err != nil { |
||||
return false |
||||
} |
||||
hasher := newNodeHasher() |
||||
defer returnHasherToPool(hasher) |
||||
return hasher.hashData(data) == hash |
||||
} |
||||
|
||||
// WriteStorageTrieNode writes the provided storage trie node into database.
|
||||
func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { |
||||
if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store storage trie node") |
||||
} |
||||
} |
||||
|
||||
// DeleteStorageTrieNode deletes the specified storage trie node from the database.
|
||||
func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) { |
||||
if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete storage trie node") |
||||
} |
||||
} |
||||
|
||||
// ReadLegacyTrieNode retrieves the legacy trie node with the given
|
||||
// associated node hash.
|
||||
func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||
data, err := db.Get(hash.Bytes()) |
||||
if err != nil { |
||||
return nil |
||||
} |
||||
return data |
||||
} |
||||
|
||||
// HasLegacyTrieNode checks if the trie node with the provided hash is present in db.
|
||||
func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||
ok, _ := db.Has(hash.Bytes()) |
||||
return ok |
||||
} |
||||
|
||||
// WriteLegacyTrieNode writes the provided legacy trie node to database.
|
||||
func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { |
||||
if err := db.Put(hash.Bytes(), node); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to store legacy trie node") |
||||
} |
||||
} |
||||
|
||||
// DeleteLegacyTrieNode deletes the specified legacy trie node from database.
|
||||
func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { |
||||
if err := db.Delete(hash.Bytes()); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to delete legacy trie node") |
||||
} |
||||
} |
||||
|
||||
// HasTrieNode checks the trie node presence with the provided node info and
|
||||
// the associated node hash.
|
||||
func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool { |
||||
switch scheme { |
||||
case HashScheme: |
||||
return HasLegacyTrieNode(db, hash) |
||||
case PathScheme: |
||||
if owner == (common.Hash{}) { |
||||
return HasAccountTrieNode(db, path, hash) |
||||
} |
||||
return HasStorageTrieNode(db, owner, path, hash) |
||||
default: |
||||
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||
} |
||||
} |
||||
|
||||
// ReadTrieNode retrieves the trie node from database with the provided node info
|
||||
// and associated node hash.
|
||||
// hashScheme-based lookup requires the following:
|
||||
// - hash
|
||||
//
|
||||
// pathScheme-based lookup requires the following:
|
||||
// - owner
|
||||
// - path
|
||||
func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte { |
||||
switch scheme { |
||||
case HashScheme: |
||||
return ReadLegacyTrieNode(db, hash) |
||||
case PathScheme: |
||||
var ( |
||||
blob []byte |
||||
nHash common.Hash |
||||
) |
||||
if owner == (common.Hash{}) { |
||||
blob, nHash = ReadAccountTrieNode(db, path) |
||||
} else { |
||||
blob, nHash = ReadStorageTrieNode(db, owner, path) |
||||
} |
||||
if nHash != hash { |
||||
return nil |
||||
} |
||||
return blob |
||||
default: |
||||
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||
} |
||||
} |
||||
|
||||
// WriteTrieNode writes the trie node into database with the provided node info
|
||||
// and associated node hash.
|
||||
// hashScheme-based lookup requires the following:
|
||||
// - hash
|
||||
//
|
||||
// pathScheme-based lookup requires the following:
|
||||
// - owner
|
||||
// - path
|
||||
func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) { |
||||
switch scheme { |
||||
case HashScheme: |
||||
WriteLegacyTrieNode(db, hash, node) |
||||
case PathScheme: |
||||
if owner == (common.Hash{}) { |
||||
WriteAccountTrieNode(db, path, node) |
||||
} else { |
||||
WriteStorageTrieNode(db, owner, path, node) |
||||
} |
||||
default: |
||||
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||
} |
||||
} |
||||
|
||||
// DeleteTrieNode deletes the trie node from database with the provided node info
|
||||
// and associated node hash.
|
||||
// hashScheme-based lookup requires the following:
|
||||
// - hash
|
||||
//
|
||||
// pathScheme-based lookup requires the following:
|
||||
// - owner
|
||||
// - path
|
||||
func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) { |
||||
switch scheme { |
||||
case HashScheme: |
||||
DeleteLegacyTrieNode(db, hash) |
||||
case PathScheme: |
||||
if owner == (common.Hash{}) { |
||||
DeleteAccountTrieNode(db, path) |
||||
} else { |
||||
DeleteStorageTrieNode(db, owner, path) |
||||
} |
||||
default: |
||||
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||
} |
||||
} |
@ -0,0 +1,55 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
// The list of table names of chain freezer.
|
||||
// This variables is NOT used, just ported over from the Ethereum
|
||||
const ( |
||||
// ChainFreezerHeaderTable indicates the name of the freezer header table.
|
||||
ChainFreezerHeaderTable = "headers" |
||||
|
||||
// ChainFreezerHashTable indicates the name of the freezer canonical hash table.
|
||||
ChainFreezerHashTable = "hashes" |
||||
|
||||
// ChainFreezerBodiesTable indicates the name of the freezer block body table.
|
||||
ChainFreezerBodiesTable = "bodies" |
||||
|
||||
// ChainFreezerReceiptTable indicates the name of the freezer receipts table.
|
||||
ChainFreezerReceiptTable = "receipts" |
||||
|
||||
// ChainFreezerDifficultyTable indicates the name of the freezer total difficulty table.
|
||||
ChainFreezerDifficultyTable = "diffs" |
||||
) |
||||
|
||||
// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
|
||||
// Hashes and difficulties don't compress well.
|
||||
// This function is NOT used, just ported over from the Ethereum
|
||||
var chainFreezerNoSnappy = map[string]bool{ |
||||
ChainFreezerHeaderTable: false, |
||||
ChainFreezerHashTable: true, |
||||
ChainFreezerBodiesTable: false, |
||||
ChainFreezerReceiptTable: false, |
||||
ChainFreezerDifficultyTable: true, |
||||
} |
||||
|
||||
// The list of identifiers of ancient stores.
|
||||
var ( |
||||
chainFreezerName = "chain" // the folder name of chain segment ancient store.
|
||||
) |
||||
|
||||
// freezers the collections of all builtin freezers.
|
||||
var freezers = []string{chainFreezerName} |
@ -0,0 +1,91 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"fmt" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
) |
||||
|
||||
type tableSize struct { |
||||
name string |
||||
size common.StorageSize |
||||
} |
||||
|
||||
// freezerInfo contains the basic information of the freezer.
|
||||
type freezerInfo struct { |
||||
name string // The identifier of freezer
|
||||
head uint64 // The number of last stored item in the freezer
|
||||
tail uint64 // The number of first stored item in the freezer
|
||||
sizes []tableSize // The storage size per table
|
||||
} |
||||
|
||||
// count returns the number of stored items in the freezer.
|
||||
func (info *freezerInfo) count() uint64 { |
||||
return info.head - info.tail + 1 |
||||
} |
||||
|
||||
// size returns the storage size of the entire freezer.
|
||||
func (info *freezerInfo) size() common.StorageSize { |
||||
var total common.StorageSize |
||||
for _, table := range info.sizes { |
||||
total += table.size |
||||
} |
||||
return total |
||||
} |
||||
|
||||
// inspectFreezers inspects all freezers registered in the system.
|
||||
// This function is NOT used, just ported over from the Ethereum
|
||||
func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { |
||||
var infos []freezerInfo |
||||
for _, freezer := range freezers { |
||||
switch freezer { |
||||
case chainFreezerName: |
||||
// Chain ancient store is a bit special. It's always opened along
|
||||
// with the key-value store, inspect the chain store directly.
|
||||
info := freezerInfo{name: freezer} |
||||
// Retrieve storage size of every contained table.
|
||||
for table := range chainFreezerNoSnappy { |
||||
size, err := db.AncientSize(table) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
info.sizes = append(info.sizes, tableSize{name: table, size: common.StorageSize(size)}) |
||||
} |
||||
// Retrieve the number of last stored item
|
||||
ancients, err := db.Ancients() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
info.head = ancients - 1 |
||||
|
||||
// Retrieve the number of first stored item
|
||||
tail, err := db.Tail() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
info.tail = tail |
||||
infos = append(infos, info) |
||||
|
||||
default: |
||||
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers) |
||||
} |
||||
} |
||||
return infos, nil |
||||
} |
@ -0,0 +1,358 @@ |
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"runtime" |
||||
"sync/atomic" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/prque" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
|
||||
// of frozen ancient blocks. The method iterates over all the frozen blocks and
|
||||
// injects into the database the block hash->number mappings.
|
||||
// This function is NOT used, just ported over from the Ethereum
|
||||
func InitDatabaseFromFreezer(db ethdb.Database) { |
||||
// If we can't access the freezer or it's empty, abort
|
||||
frozen, err := db.Ancients() |
||||
if err != nil || frozen == 0 { |
||||
return |
||||
} |
||||
var ( |
||||
batch = db.NewBatch() |
||||
start = time.Now() |
||||
logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
|
||||
hash common.Hash |
||||
) |
||||
for i := uint64(0); i < frozen; { |
||||
// We read 100K hashes at a time, for a total of 3.2M
|
||||
count := uint64(100_000) |
||||
if i+count > frozen { |
||||
count = frozen - i |
||||
} |
||||
data, err := db.AncientRange(ChainFreezerHashTable, i, count, 32*count) |
||||
if err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to init database from freezer") |
||||
} |
||||
for j, h := range data { |
||||
number := i + uint64(j) |
||||
hash = common.BytesToHash(h) |
||||
WriteHeaderNumber(batch, hash, number) |
||||
// If enough data was accumulated in memory or we're at the last block, dump to disk
|
||||
if batch.ValueSize() > ethdb.IdealBatchSize { |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to write data to db") |
||||
} |
||||
batch.Reset() |
||||
} |
||||
} |
||||
i += uint64(len(data)) |
||||
// If we've spent too much time already, notify the user of what we're doing
|
||||
if time.Since(logged) > 8*time.Second { |
||||
log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
logged = time.Now() |
||||
} |
||||
} |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to write data to db") |
||||
} |
||||
batch.Reset() |
||||
|
||||
WriteHeadHeaderHash(db, hash) |
||||
WriteHeadFastBlockHash(db, hash) |
||||
log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
} |
||||
|
||||
type blockTxHashes struct { |
||||
number uint64 |
||||
hashes []common.Hash |
||||
} |
||||
|
||||
// iterateTransactions iterates over all transactions in the (canon) block
|
||||
// number(s) given, and yields the hashes on a channel. If there is a signal
|
||||
// received from interrupt channel, the iteration will be aborted and result
|
||||
// channel will be closed.
|
||||
func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool, interrupt chan struct{}) chan *blockTxHashes { |
||||
// One thread sequentially reads data from db
|
||||
type numberRlp struct { |
||||
number uint64 |
||||
rlp rlp.RawValue |
||||
} |
||||
if to == from { |
||||
return nil |
||||
} |
||||
threads := to - from |
||||
if cpus := runtime.NumCPU(); threads > uint64(cpus) { |
||||
threads = uint64(cpus) |
||||
} |
||||
var ( |
||||
rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
|
||||
hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
|
||||
) |
||||
// lookup runs in one instance
|
||||
lookup := func() { |
||||
n, end := from, to |
||||
if reverse { |
||||
n, end = to-1, from-1 |
||||
} |
||||
defer close(rlpCh) |
||||
for n != end { |
||||
data := ReadCanonicalBodyRLP(db, n) |
||||
// Feed the block to the aggregator, or abort on interrupt
|
||||
select { |
||||
case rlpCh <- &numberRlp{n, data}: |
||||
case <-interrupt: |
||||
return |
||||
} |
||||
if reverse { |
||||
n-- |
||||
} else { |
||||
n++ |
||||
} |
||||
} |
||||
} |
||||
// process runs in parallel
|
||||
nThreadsAlive := int32(threads) |
||||
process := func() { |
||||
defer func() { |
||||
// Last processor closes the result channel
|
||||
if atomic.AddInt32(&nThreadsAlive, -1) == 0 { |
||||
close(hashesCh) |
||||
} |
||||
}() |
||||
for data := range rlpCh { |
||||
var body types.Body |
||||
if err := rlp.DecodeBytes(data.rlp, &body); err != nil { |
||||
utils.Logger().Warn().Err(err).Uint64("block", data.number).Msg("Failed to decode block body") |
||||
return |
||||
} |
||||
var hashes []common.Hash |
||||
for _, tx := range body.Transactions { |
||||
hashes = append(hashes, tx.Hash()) |
||||
} |
||||
result := &blockTxHashes{ |
||||
hashes: hashes, |
||||
number: data.number, |
||||
} |
||||
// Feed the block to the aggregator, or abort on interrupt
|
||||
select { |
||||
case hashesCh <- result: |
||||
case <-interrupt: |
||||
return |
||||
} |
||||
} |
||||
} |
||||
go lookup() // start the sequential db accessor
|
||||
for i := 0; i < int(threads); i++ { |
||||
go process() |
||||
} |
||||
return hashesCh |
||||
} |
||||
|
||||
// indexTransactions creates txlookup indices of the specified block range.
|
||||
//
|
||||
// This function iterates canonical chain in reverse order, it has one main advantage:
|
||||
// We can write tx index tail flag periodically even without the whole indexing
|
||||
// procedure is finished. So that we can resume indexing procedure next time quickly.
|
||||
//
|
||||
// There is a passed channel, the whole procedure will be interrupted if any
|
||||
// signal received.
|
||||
// This function is NOT used, just ported over from the Ethereum
|
||||
func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||
// short circuit for invalid range
|
||||
if from >= to { |
||||
return |
||||
} |
||||
var ( |
||||
hashesCh = iterateTransactions(db, from, to, true, interrupt) |
||||
batch = db.NewBatch() |
||||
start = time.Now() |
||||
logged = start.Add(-7 * time.Second) |
||||
// Since we iterate in reverse, we expect the first number to come
|
||||
// in to be [to-1]. Therefore, setting lastNum to means that the
|
||||
// prqueue gap-evaluation will work correctly
|
||||
lastNum = to |
||||
queue = prque.New[int64, *blockTxHashes](nil) |
||||
// for stats reporting
|
||||
blocks, txs = 0, 0 |
||||
) |
||||
for chanDelivery := range hashesCh { |
||||
// Push the delivery into the queue and process contiguous ranges.
|
||||
// Since we iterate in reverse, so lower numbers have lower prio, and
|
||||
// we can use the number directly as prio marker
|
||||
queue.Push(chanDelivery, int64(chanDelivery.number)) |
||||
for !queue.Empty() { |
||||
// If the next available item is gapped, return
|
||||
if _, priority := queue.Peek(); priority != int64(lastNum-1) { |
||||
break |
||||
} |
||||
// For testing
|
||||
if hook != nil && !hook(lastNum-1) { |
||||
break |
||||
} |
||||
// Next block available, pop it off and index it
|
||||
delivery := queue.PopItem() |
||||
lastNum = delivery.number |
||||
WriteTxLookupEntries(batch, delivery.number, delivery.hashes) |
||||
blocks++ |
||||
txs += len(delivery.hashes) |
||||
// If enough data was accumulated in memory or we're at the last block, dump to disk
|
||||
if batch.ValueSize() > ethdb.IdealBatchSize { |
||||
WriteTxIndexTail(batch, lastNum) // Also write the tail here
|
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||
return |
||||
} |
||||
batch.Reset() |
||||
} |
||||
// If we've spent too much time already, notify the user of what we're doing
|
||||
if time.Since(logged) > 8*time.Second { |
||||
log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
logged = time.Now() |
||||
} |
||||
} |
||||
} |
||||
// Flush the new indexing tail and the last committed data. It can also happen
|
||||
// that the last batch is empty because nothing to index, but the tail has to
|
||||
// be flushed anyway.
|
||||
WriteTxIndexTail(batch, lastNum) |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||
return |
||||
} |
||||
select { |
||||
case <-interrupt: |
||||
log.Debug("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
default: |
||||
log.Debug("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
} |
||||
} |
||||
|
||||
// IndexTransactions creates txlookup indices of the specified block range. The from
|
||||
// is included while to is excluded.
|
||||
//
|
||||
// This function iterates canonical chain in reverse order, it has one main advantage:
|
||||
// We can write tx index tail flag periodically even without the whole indexing
|
||||
// procedure is finished. So that we can resume indexing procedure next time quickly.
|
||||
//
|
||||
// There is a passed channel, the whole procedure will be interrupted if any
|
||||
// signal received.
|
||||
func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) { |
||||
indexTransactions(db, from, to, interrupt, nil) |
||||
} |
||||
|
||||
// indexTransactionsForTesting is the internal debug version with an additional hook.
|
||||
func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||
indexTransactions(db, from, to, interrupt, hook) |
||||
} |
||||
|
||||
// unindexTransactions removes txlookup indices of the specified block range.
|
||||
//
|
||||
// There is a passed channel, the whole procedure will be interrupted if any
|
||||
// signal received.
|
||||
// This function is NOT used, just ported over from the Ethereum
|
||||
func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||
// short circuit for invalid range
|
||||
if from >= to { |
||||
return |
||||
} |
||||
var ( |
||||
hashesCh = iterateTransactions(db, from, to, false, interrupt) |
||||
batch = db.NewBatch() |
||||
start = time.Now() |
||||
logged = start.Add(-7 * time.Second) |
||||
// we expect the first number to come in to be [from]. Therefore, setting
|
||||
// nextNum to from means that the prqueue gap-evaluation will work correctly
|
||||
nextNum = from |
||||
queue = prque.New[int64, *blockTxHashes](nil) |
||||
// for stats reporting
|
||||
blocks, txs = 0, 0 |
||||
) |
||||
// Otherwise spin up the concurrent iterator and unindexer
|
||||
for delivery := range hashesCh { |
||||
// Push the delivery into the queue and process contiguous ranges.
|
||||
queue.Push(delivery, -int64(delivery.number)) |
||||
for !queue.Empty() { |
||||
// If the next available item is gapped, return
|
||||
if _, priority := queue.Peek(); -priority != int64(nextNum) { |
||||
break |
||||
} |
||||
// For testing
|
||||
if hook != nil && !hook(nextNum) { |
||||
break |
||||
} |
||||
delivery := queue.PopItem() |
||||
nextNum = delivery.number + 1 |
||||
DeleteTxLookupEntries(batch, delivery.hashes) |
||||
txs += len(delivery.hashes) |
||||
blocks++ |
||||
|
||||
// If enough data was accumulated in memory or we're at the last block, dump to disk
|
||||
// A batch counts the size of deletion as '1', so we need to flush more
|
||||
// often than that.
|
||||
if blocks%1000 == 0 { |
||||
WriteTxIndexTail(batch, nextNum) |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||
return |
||||
} |
||||
batch.Reset() |
||||
} |
||||
// If we've spent too much time already, notify the user of what we're doing
|
||||
if time.Since(logged) > 8*time.Second { |
||||
log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
logged = time.Now() |
||||
} |
||||
} |
||||
} |
||||
// Flush the new indexing tail and the last committed data. It can also happen
|
||||
// that the last batch is empty because nothing to unindex, but the tail has to
|
||||
// be flushed anyway.
|
||||
WriteTxIndexTail(batch, nextNum) |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||
return |
||||
} |
||||
select { |
||||
case <-interrupt: |
||||
log.Debug("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
default: |
||||
log.Debug("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
} |
||||
} |
||||
|
||||
// UnindexTransactions removes txlookup indices of the specified block range.
|
||||
// The from is included while to is excluded.
|
||||
//
|
||||
// There is a passed channel, the whole procedure will be interrupted if any
|
||||
// signal received.
|
||||
func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) { |
||||
unindexTransactions(db, from, to, interrupt, nil) |
||||
} |
||||
|
||||
// unindexTransactionsForTesting is the internal debug version with an additional hook.
|
||||
func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||
unindexTransactions(db, from, to, interrupt, hook) |
||||
} |
@ -0,0 +1,468 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"os" |
||||
"path" |
||||
"path/filepath" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/ethdb/leveldb" |
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/olekukonko/tablewriter" |
||||
) |
||||
|
||||
var errNotSupported = errors.New("not supported") |
||||
|
||||
// convertLegacyFn takes a raw freezer entry in an older format and
|
||||
// returns it in the new format.
|
||||
type convertLegacyFn = func([]byte) ([]byte, error) |
||||
|
||||
// freezerdb is a database wrapper that enabled freezer data retrievals.
|
||||
type freezerdb struct { |
||||
ancientRoot string |
||||
ethdb.KeyValueStore |
||||
ethdb.AncientStore |
||||
} |
||||
|
||||
// AncientDatadir returns the path of root ancient directory.
|
||||
func (frdb *freezerdb) AncientDatadir() (string, error) { |
||||
return frdb.ancientRoot, nil |
||||
} |
||||
|
||||
// Close implements io.Closer, closing both the fast key-value store as well as
|
||||
// the slow ancient tables.
|
||||
func (frdb *freezerdb) Close() error { |
||||
var errs []error |
||||
if err := frdb.AncientStore.Close(); err != nil { |
||||
errs = append(errs, err) |
||||
} |
||||
if err := frdb.KeyValueStore.Close(); err != nil { |
||||
errs = append(errs, err) |
||||
} |
||||
if len(errs) != 0 { |
||||
return fmt.Errorf("%v", errs) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// nofreezedb is a database wrapper that disables freezer data retrievals.
|
||||
type nofreezedb struct { |
||||
ethdb.KeyValueStore |
||||
} |
||||
|
||||
// HasAncient returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) { |
||||
return false, errNotSupported |
||||
} |
||||
|
||||
// Ancient returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) { |
||||
return nil, errNotSupported |
||||
} |
||||
|
||||
// AncientRange returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) { |
||||
return nil, errNotSupported |
||||
} |
||||
|
||||
// Ancients returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) Ancients() (uint64, error) { |
||||
return 0, errNotSupported |
||||
} |
||||
|
||||
// Tail returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) Tail() (uint64, error) { |
||||
return 0, errNotSupported |
||||
} |
||||
|
||||
// AncientSize returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) AncientSize(kind string) (uint64, error) { |
||||
return 0, errNotSupported |
||||
} |
||||
|
||||
// ModifyAncients is not supported.
|
||||
func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { |
||||
return 0, errNotSupported |
||||
} |
||||
|
||||
// TruncateHead returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) TruncateHead(items uint64) error { |
||||
return errNotSupported |
||||
} |
||||
|
||||
// TruncateTail returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) TruncateTail(items uint64) error { |
||||
return errNotSupported |
||||
} |
||||
|
||||
// Sync returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) Sync() error { |
||||
return errNotSupported |
||||
} |
||||
|
||||
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { |
||||
// Unlike other ancient-related methods, this method does not return
|
||||
// errNotSupported when invoked.
|
||||
// The reason for this is that the caller might want to do several things:
|
||||
// 1. Check if something is in freezer,
|
||||
// 2. If not, check leveldb.
|
||||
//
|
||||
// This will work, since the ancient-checks inside 'fn' will return errors,
|
||||
// and the leveldb work will continue.
|
||||
//
|
||||
// If we instead were to return errNotSupported here, then the caller would
|
||||
// have to explicitly check for that, having an extra clause to do the
|
||||
// non-ancient operations.
|
||||
return fn(db) |
||||
} |
||||
|
||||
// MigrateTable processes the entries in a given table in sequence
|
||||
// converting them to a new format if they're of an old format.
|
||||
func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error { |
||||
return errNotSupported |
||||
} |
||||
|
||||
// AncientDatadir returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) AncientDatadir() (string, error) { |
||||
return "", errNotSupported |
||||
} |
||||
|
||||
// NewDatabase creates a high level database on top of a given key-value data
|
||||
// store without a freezer moving immutable chain segments into cold storage.
|
||||
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { |
||||
return &nofreezedb{KeyValueStore: db} |
||||
} |
||||
|
||||
// resolveChainFreezerDir is a helper function which resolves the absolute path
|
||||
// of chain freezer by considering backward compatibility.
|
||||
// This function is NOT used, just ported over from the Ethereum
|
||||
func resolveChainFreezerDir(ancient string) string { |
||||
// Check if the chain freezer is already present in the specified
|
||||
// sub folder, if not then two possibilities:
|
||||
// - chain freezer is not initialized
|
||||
// - chain freezer exists in legacy location (root ancient folder)
|
||||
freezer := path.Join(ancient, chainFreezerName) |
||||
if !common.FileExist(freezer) { |
||||
if !common.FileExist(ancient) { |
||||
// The entire ancient store is not initialized, still use the sub
|
||||
// folder for initialization.
|
||||
} else { |
||||
// Ancient root is already initialized, then we hold the assumption
|
||||
// that chain freezer is also initialized and located in root folder.
|
||||
// In this case fallback to legacy location.
|
||||
freezer = ancient |
||||
log.Info("Found legacy ancient chain path", "location", ancient) |
||||
} |
||||
} |
||||
return freezer |
||||
} |
||||
|
||||
// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
|
||||
// freezer moving immutable chain segments into cold storage.
|
||||
func NewMemoryDatabase() ethdb.Database { |
||||
return NewDatabase(memorydb.New()) |
||||
} |
||||
|
||||
// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
|
||||
// with an initial starting capacity, but without a freezer moving immutable
|
||||
// chain segments into cold storage.
|
||||
func NewMemoryDatabaseWithCap(size int) ethdb.Database { |
||||
return NewDatabase(memorydb.NewWithCap(size)) |
||||
} |
||||
|
||||
// NewLevelDBDatabase creates a persistent key-value database without a freezer
|
||||
// moving immutable chain segments into cold storage.
|
||||
func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { |
||||
db, err := leveldb.New(file, cache, handles, namespace, readonly) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
log.Info("Using LevelDB as the backing database") |
||||
return NewDatabase(db), nil |
||||
} |
||||
|
||||
const ( |
||||
dbPebble = "pebble" |
||||
dbLeveldb = "leveldb" |
||||
) |
||||
|
||||
// hasPreexistingDb checks the given data directory whether a database is already
|
||||
// instantiated at that location, and if so, returns the type of database (or the
|
||||
// empty string).
|
||||
func hasPreexistingDb(path string) string { |
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { |
||||
return "" // No pre-existing db
|
||||
} |
||||
if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil { |
||||
if err != nil { |
||||
panic(err) // only possible if the pattern is malformed
|
||||
} |
||||
return dbPebble |
||||
} |
||||
return dbLeveldb |
||||
} |
||||
|
||||
// OpenOptions contains the options to apply when opening a database.
|
||||
// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
|
||||
type OpenOptions struct { |
||||
Type string // "leveldb" | "pebble"
|
||||
Directory string // the datadir
|
||||
AncientsDirectory string // the ancients-dir
|
||||
Namespace string // the namespace for database relevant metrics
|
||||
Cache int // the capacity(in megabytes) of the data caching
|
||||
Handles int // number of files to be open simultaneously
|
||||
ReadOnly bool |
||||
} |
||||
|
||||
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
|
||||
//
|
||||
// type == null type != null
|
||||
// +----------------------------------------
|
||||
// db is non-existent | leveldb default | specified type
|
||||
// db is existent | from db | specified type (if compatible)
|
||||
func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { |
||||
existingDb := hasPreexistingDb(o.Directory) |
||||
if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { |
||||
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) |
||||
} |
||||
if o.Type == dbPebble || existingDb == dbPebble { |
||||
if PebbleEnabled { |
||||
log.Info("Using pebble as the backing database") |
||||
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) |
||||
} else { |
||||
return nil, errors.New("db.engine 'pebble' not supported on this platform") |
||||
} |
||||
} |
||||
if len(o.Type) != 0 && o.Type != dbLeveldb { |
||||
return nil, fmt.Errorf("unknown db.engine %v", o.Type) |
||||
} |
||||
log.Info("Using leveldb as the backing database") |
||||
// Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly
|
||||
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) |
||||
} |
||||
|
||||
type counter uint64 |
||||
|
||||
func (c counter) String() string { |
||||
return fmt.Sprintf("%d", c) |
||||
} |
||||
|
||||
func (c counter) Percentage(current uint64) string { |
||||
return fmt.Sprintf("%d", current*100/uint64(c)) |
||||
} |
||||
|
||||
// stat stores sizes and count for a parameter
|
||||
type stat struct { |
||||
size common.StorageSize |
||||
count counter |
||||
} |
||||
|
||||
// Add size to the stat and increase the counter by 1
|
||||
func (s *stat) Add(size common.StorageSize) { |
||||
s.size += size |
||||
s.count++ |
||||
} |
||||
|
||||
func (s *stat) Size() string { |
||||
return s.size.String() |
||||
} |
||||
|
||||
func (s *stat) Count() string { |
||||
return s.count.String() |
||||
} |
||||
|
||||
// InspectDatabase traverses the entire database and checks the size
|
||||
// of all different categories of data.
|
||||
// This function is NOT used, just ported over from the Ethereum
|
||||
func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { |
||||
it := db.NewIterator(keyPrefix, keyStart) |
||||
defer it.Release() |
||||
|
||||
var ( |
||||
count int64 |
||||
start = time.Now() |
||||
logged = time.Now() |
||||
|
||||
// Key-value store statistics
|
||||
headers stat |
||||
bodies stat |
||||
receipts stat |
||||
tds stat |
||||
numHashPairings stat |
||||
hashNumPairings stat |
||||
tries stat |
||||
codes stat |
||||
validatorCodes stat |
||||
txLookups stat |
||||
accountSnaps stat |
||||
storageSnaps stat |
||||
preimages stat |
||||
bloomBits stat |
||||
beaconHeaders stat |
||||
cliqueSnaps stat |
||||
|
||||
// Les statistic
|
||||
chtTrieNodes stat |
||||
bloomTrieNodes stat |
||||
|
||||
// Meta- and unaccounted data
|
||||
metadata stat |
||||
unaccounted stat |
||||
|
||||
// Totals
|
||||
total common.StorageSize |
||||
) |
||||
// Inspect key-value database first.
|
||||
for it.Next() { |
||||
var ( |
||||
key = it.Key() |
||||
size = common.StorageSize(len(key) + len(it.Value())) |
||||
) |
||||
total += size |
||||
switch { |
||||
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength): |
||||
headers.Add(size) |
||||
case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength): |
||||
bodies.Add(size) |
||||
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength): |
||||
receipts.Add(size) |
||||
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix): |
||||
tds.Add(size) |
||||
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix): |
||||
numHashPairings.Add(size) |
||||
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): |
||||
hashNumPairings.Add(size) |
||||
case len(key) == common.HashLength: |
||||
tries.Add(size) |
||||
case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength: |
||||
codes.Add(size) |
||||
case bytes.HasPrefix(key, ValidatorCodePrefix) && len(key) == len(ValidatorCodePrefix)+common.HashLength: |
||||
validatorCodes.Add(size) |
||||
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): |
||||
txLookups.Add(size) |
||||
case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength): |
||||
accountSnaps.Add(size) |
||||
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): |
||||
storageSnaps.Add(size) |
||||
case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength): |
||||
preimages.Add(size) |
||||
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): |
||||
metadata.Add(size) |
||||
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength): |
||||
metadata.Add(size) |
||||
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): |
||||
bloomBits.Add(size) |
||||
case bytes.HasPrefix(key, BloomBitsIndexPrefix): |
||||
bloomBits.Add(size) |
||||
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8): |
||||
beaconHeaders.Add(size) |
||||
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength: |
||||
cliqueSnaps.Add(size) |
||||
case bytes.HasPrefix(key, ChtTablePrefix) || |
||||
bytes.HasPrefix(key, ChtIndexTablePrefix) || |
||||
bytes.HasPrefix(key, ChtPrefix): // Canonical hash trie
|
||||
chtTrieNodes.Add(size) |
||||
case bytes.HasPrefix(key, BloomTrieTablePrefix) || |
||||
bytes.HasPrefix(key, BloomTrieIndexPrefix) || |
||||
bytes.HasPrefix(key, BloomTriePrefix): // Bloomtrie sub
|
||||
bloomTrieNodes.Add(size) |
||||
default: |
||||
var accounted bool |
||||
for _, meta := range [][]byte{ |
||||
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey, |
||||
lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, |
||||
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, |
||||
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, |
||||
} { |
||||
if bytes.Equal(key, meta) { |
||||
metadata.Add(size) |
||||
accounted = true |
||||
break |
||||
} |
||||
} |
||||
if !accounted { |
||||
unaccounted.Add(size) |
||||
} |
||||
} |
||||
count++ |
||||
if count%1000 == 0 && time.Since(logged) > 8*time.Second { |
||||
log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
logged = time.Now() |
||||
} |
||||
} |
||||
// Display the database statistic of key-value store.
|
||||
stats := [][]string{ |
||||
{"Key-Value store", "Headers", headers.Size(), headers.Count()}, |
||||
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()}, |
||||
{"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()}, |
||||
{"Key-Value store", "Difficulties", tds.Size(), tds.Count()}, |
||||
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()}, |
||||
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()}, |
||||
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()}, |
||||
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()}, |
||||
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()}, |
||||
{"Key-Value store", "Validator codes", validatorCodes.Size(), validatorCodes.Count()}, |
||||
{"Key-Value store", "Trie nodes", tries.Size(), tries.Count()}, |
||||
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, |
||||
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, |
||||
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, |
||||
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()}, |
||||
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()}, |
||||
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()}, |
||||
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()}, |
||||
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()}, |
||||
} |
||||
// Inspect all registered append-only file store then.
|
||||
ancients, err := inspectFreezers(db) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
for _, ancient := range ancients { |
||||
for _, table := range ancient.sizes { |
||||
stats = append(stats, []string{ |
||||
fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)), |
||||
strings.Title(table.name), |
||||
table.size.String(), |
||||
fmt.Sprintf("%d", ancient.count()), |
||||
}) |
||||
} |
||||
total += ancient.size() |
||||
} |
||||
table := tablewriter.NewWriter(os.Stdout) |
||||
table.SetHeader([]string{"Database", "Category", "Size", "Items"}) |
||||
table.SetFooter([]string{"", "Total", total.String(), " "}) |
||||
table.AppendBulk(stats) |
||||
table.Render() |
||||
|
||||
if unaccounted.size > 0 { |
||||
utils.Logger().Error(). |
||||
Interface("size", unaccounted.size). |
||||
Interface("count", unaccounted.count). |
||||
Msg("Database contains unaccounted data") |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,17 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
@ -0,0 +1,37 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
//go:build arm64 || amd64
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/ethdb/pebble" |
||||
) |
||||
|
||||
// Pebble is unsuported on 32bit architecture
|
||||
const PebbleEnabled = true |
||||
|
||||
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
||||
// moving immutable chain segments into cold storage.
|
||||
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { |
||||
db, err := pebble.New(file, cache, handles, namespace, readonly) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return NewDatabase(db), nil |
||||
} |
@ -0,0 +1,34 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !(arm64 || amd64)
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"errors" |
||||
|
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
) |
||||
|
||||
// Pebble is unsuported on 32bit architecture
|
||||
const PebbleEnabled = false |
||||
|
||||
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
||||
// moving immutable chain segments into cold storage.
|
||||
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { |
||||
return nil, errors.New("pebble is not supported on this platform") |
||||
} |
@ -0,0 +1,47 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import "github.com/ethereum/go-ethereum/ethdb" |
||||
|
||||
// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
|
||||
// with a specific key length will be returned.
|
||||
type KeyLengthIterator struct { |
||||
requiredKeyLength int |
||||
ethdb.Iterator |
||||
} |
||||
|
||||
// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
|
||||
// pairs where keys with a specific key length will be returned.
|
||||
func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator { |
||||
return &KeyLengthIterator{ |
||||
Iterator: it, |
||||
requiredKeyLength: keyLen, |
||||
} |
||||
} |
||||
|
||||
func (it *KeyLengthIterator) Next() bool { |
||||
// Return true as soon as a key with the required key length is discovered
|
||||
for it.Iterator.Next() { |
||||
if len(it.Iterator.Key()) == it.requiredKeyLength { |
||||
return true |
||||
} |
||||
} |
||||
|
||||
// Return false when we exhaust the keys in the underlying iterator.
|
||||
return false |
||||
} |
@ -0,0 +1,60 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"testing" |
||||
) |
||||
|
||||
func TestKeyLengthIterator(t *testing.T) { |
||||
db := NewMemoryDatabase() |
||||
|
||||
keyLen := 8 |
||||
expectedKeys := make(map[string]struct{}) |
||||
for i := 0; i < 100; i++ { |
||||
key := make([]byte, keyLen) |
||||
binary.BigEndian.PutUint64(key, uint64(i)) |
||||
if err := db.Put(key, []byte{0x1}); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
expectedKeys[string(key)] = struct{}{} |
||||
|
||||
longerKey := make([]byte, keyLen*2) |
||||
binary.BigEndian.PutUint64(longerKey, uint64(i)) |
||||
if err := db.Put(longerKey, []byte{0x1}); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen) |
||||
for it.Next() { |
||||
key := it.Key() |
||||
_, exists := expectedKeys[string(key)] |
||||
if !exists { |
||||
t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key)) |
||||
} |
||||
delete(expectedKeys, string(key)) |
||||
if len(key) != keyLen { |
||||
t.Fatalf("Found unexpected key in key length iterator with length %d", len(key)) |
||||
} |
||||
} |
||||
|
||||
if len(expectedKeys) != 0 { |
||||
t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen) |
||||
} |
||||
} |
@ -0,0 +1,313 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
) |
||||
|
||||
// table is a wrapper around a database that prefixes each key access with a pre-
|
||||
// configured string.
|
||||
type table struct { |
||||
db ethdb.Database |
||||
prefix string |
||||
} |
||||
|
||||
// NewTable returns a database object that prefixes all keys with a given string.
|
||||
func NewTable(db ethdb.Database, prefix string) ethdb.Database { |
||||
return &table{ |
||||
db: db, |
||||
prefix: prefix, |
||||
} |
||||
} |
||||
|
||||
// Close is a noop to implement the Database interface.
|
||||
func (t *table) Close() error { |
||||
return nil |
||||
} |
||||
|
||||
// Has retrieves if a prefixed version of a key is present in the database.
|
||||
func (t *table) Has(key []byte) (bool, error) { |
||||
return t.db.Has(append([]byte(t.prefix), key...)) |
||||
} |
||||
|
||||
// Get retrieves the given prefixed key if it's present in the database.
|
||||
func (t *table) Get(key []byte) ([]byte, error) { |
||||
return t.db.Get(append([]byte(t.prefix), key...)) |
||||
} |
||||
|
||||
// HasAncient is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) HasAncient(kind string, number uint64) (bool, error) { |
||||
return t.db.HasAncient(kind, number) |
||||
} |
||||
|
||||
// Ancient is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) Ancient(kind string, number uint64) ([]byte, error) { |
||||
return t.db.Ancient(kind, number) |
||||
} |
||||
|
||||
// AncientRange is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { |
||||
return t.db.AncientRange(kind, start, count, maxBytes) |
||||
} |
||||
|
||||
// Ancients is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) Ancients() (uint64, error) { |
||||
return t.db.Ancients() |
||||
} |
||||
|
||||
// Tail is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) Tail() (uint64, error) { |
||||
return t.db.Tail() |
||||
} |
||||
|
||||
// AncientSize is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) AncientSize(kind string) (uint64, error) { |
||||
return t.db.AncientSize(kind) |
||||
} |
||||
|
||||
// ModifyAncients runs an ancient write operation on the underlying database.
|
||||
func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { |
||||
return t.db.ModifyAncients(fn) |
||||
} |
||||
|
||||
func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { |
||||
return t.db.ReadAncients(fn) |
||||
} |
||||
|
||||
// TruncateHead is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) TruncateHead(items uint64) error { |
||||
return t.db.TruncateHead(items) |
||||
} |
||||
|
||||
// TruncateTail is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) TruncateTail(items uint64) error { |
||||
return t.db.TruncateTail(items) |
||||
} |
||||
|
||||
// Sync is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) Sync() error { |
||||
return t.db.Sync() |
||||
} |
||||
|
||||
// MigrateTable processes the entries in a given table in sequence
|
||||
// converting them to a new format if they're of an old format.
|
||||
func (t *table) MigrateTable(kind string, convert convertLegacyFn) error { |
||||
return t.db.MigrateTable(kind, convert) |
||||
} |
||||
|
||||
// AncientDatadir returns the ancient datadir of the underlying database.
|
||||
func (t *table) AncientDatadir() (string, error) { |
||||
return t.db.AncientDatadir() |
||||
} |
||||
|
||||
// Put inserts the given value into the database at a prefixed version of the
|
||||
// provided key.
|
||||
func (t *table) Put(key []byte, value []byte) error { |
||||
return t.db.Put(append([]byte(t.prefix), key...), value) |
||||
} |
||||
|
||||
// Delete removes the given prefixed key from the database.
|
||||
func (t *table) Delete(key []byte) error { |
||||
return t.db.Delete(append([]byte(t.prefix), key...)) |
||||
} |
||||
|
||||
// NewIterator creates a binary-alphabetical iterator over a subset
|
||||
// of database content with a particular key prefix, starting at a particular
|
||||
// initial key (or after, if it does not exist).
|
||||
func (t *table) NewIterator(prefix []byte, start []byte) ethdb.Iterator { |
||||
innerPrefix := append([]byte(t.prefix), prefix...) |
||||
iter := t.db.NewIterator(innerPrefix, start) |
||||
return &tableIterator{ |
||||
iter: iter, |
||||
prefix: t.prefix, |
||||
} |
||||
} |
||||
|
||||
// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
|
||||
// of database content with a particular key prefix.
|
||||
func (t *table) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator { |
||||
return t.NewIterator(prefix, nil) |
||||
} |
||||
|
||||
// Stat returns a particular internal stat of the database.
|
||||
func (t *table) Stat(property string) (string, error) { |
||||
return t.db.Stat(property) |
||||
} |
||||
|
||||
// Compact flattens the underlying data store for the given key range. In essence,
|
||||
// deleted and overwritten versions are discarded, and the data is rearranged to
|
||||
// reduce the cost of operations needed to access them.
|
||||
//
|
||||
// A nil start is treated as a key before all keys in the data store; a nil limit
|
||||
// is treated as a key after all keys in the data store. If both is nil then it
|
||||
// will compact entire data store.
|
||||
func (t *table) Compact(start []byte, limit []byte) error { |
||||
// If no start was specified, use the table prefix as the first value
|
||||
if start == nil { |
||||
start = []byte(t.prefix) |
||||
} else { |
||||
start = append([]byte(t.prefix), start...) |
||||
} |
||||
// If no limit was specified, use the first element not matching the prefix
|
||||
// as the limit
|
||||
if limit == nil { |
||||
limit = []byte(t.prefix) |
||||
for i := len(limit) - 1; i >= 0; i-- { |
||||
// Bump the current character, stopping if it doesn't overflow
|
||||
limit[i]++ |
||||
if limit[i] > 0 { |
||||
break |
||||
} |
||||
// Character overflown, proceed to the next or nil if the last
|
||||
if i == 0 { |
||||
limit = nil |
||||
} |
||||
} |
||||
} else { |
||||
limit = append([]byte(t.prefix), limit...) |
||||
} |
||||
// Range correctly calculated based on table prefix, delegate down
|
||||
return t.db.Compact(start, limit) |
||||
} |
||||
|
||||
// NewBatch creates a write-only database that buffers changes to its host db
|
||||
// until a final write is called, each operation prefixing all keys with the
|
||||
// pre-configured string.
|
||||
func (t *table) NewBatch() ethdb.Batch { |
||||
return &tableBatch{t.db.NewBatch(), t.prefix} |
||||
} |
||||
|
||||
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
|
||||
func (t *table) NewBatchWithSize(size int) ethdb.Batch { |
||||
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix} |
||||
} |
||||
|
||||
// NewSnapshot creates a database snapshot based on the current state.
|
||||
// The created snapshot will not be affected by all following mutations
|
||||
// happened on the database.
|
||||
func (t *table) NewSnapshot() (ethdb.Snapshot, error) { |
||||
return t.db.NewSnapshot() |
||||
} |
||||
|
||||
// tableBatch is a wrapper around a database batch that prefixes each key access
|
||||
// with a pre-configured string.
|
||||
type tableBatch struct { |
||||
batch ethdb.Batch |
||||
prefix string |
||||
} |
||||
|
||||
// Put inserts the given value into the batch for later committing.
|
||||
func (b *tableBatch) Put(key, value []byte) error { |
||||
return b.batch.Put(append([]byte(b.prefix), key...), value) |
||||
} |
||||
|
||||
// Delete inserts the a key removal into the batch for later committing.
|
||||
func (b *tableBatch) Delete(key []byte) error { |
||||
return b.batch.Delete(append([]byte(b.prefix), key...)) |
||||
} |
||||
|
||||
// ValueSize retrieves the amount of data queued up for writing.
|
||||
func (b *tableBatch) ValueSize() int { |
||||
return b.batch.ValueSize() |
||||
} |
||||
|
||||
// Write flushes any accumulated data to disk.
|
||||
func (b *tableBatch) Write() error { |
||||
return b.batch.Write() |
||||
} |
||||
|
||||
// Reset resets the batch for reuse.
|
||||
func (b *tableBatch) Reset() { |
||||
b.batch.Reset() |
||||
} |
||||
|
||||
// tableReplayer is a wrapper around a batch replayer which truncates
|
||||
// the added prefix.
|
||||
type tableReplayer struct { |
||||
w ethdb.KeyValueWriter |
||||
prefix string |
||||
} |
||||
|
||||
// Put implements the interface KeyValueWriter.
|
||||
func (r *tableReplayer) Put(key []byte, value []byte) error { |
||||
trimmed := key[len(r.prefix):] |
||||
return r.w.Put(trimmed, value) |
||||
} |
||||
|
||||
// Delete implements the interface KeyValueWriter.
|
||||
func (r *tableReplayer) Delete(key []byte) error { |
||||
trimmed := key[len(r.prefix):] |
||||
return r.w.Delete(trimmed) |
||||
} |
||||
|
||||
// Replay replays the batch contents.
|
||||
func (b *tableBatch) Replay(w ethdb.KeyValueWriter) error { |
||||
return b.batch.Replay(&tableReplayer{w: w, prefix: b.prefix}) |
||||
} |
||||
|
||||
// tableIterator is a wrapper around a database iterator that prefixes each key access
|
||||
// with a pre-configured string.
|
||||
type tableIterator struct { |
||||
iter ethdb.Iterator |
||||
prefix string |
||||
} |
||||
|
||||
// Next moves the iterator to the next key/value pair. It returns whether the
|
||||
// iterator is exhausted.
|
||||
func (iter *tableIterator) Next() bool { |
||||
return iter.iter.Next() |
||||
} |
||||
|
||||
// Error returns any accumulated error. Exhausting all the key/value pairs
|
||||
// is not considered to be an error.
|
||||
func (iter *tableIterator) Error() error { |
||||
return iter.iter.Error() |
||||
} |
||||
|
||||
// Key returns the key of the current key/value pair, or nil if done. The caller
|
||||
// should not modify the contents of the returned slice, and its contents may
|
||||
// change on the next call to Next.
|
||||
func (iter *tableIterator) Key() []byte { |
||||
key := iter.iter.Key() |
||||
if key == nil { |
||||
return nil |
||||
} |
||||
return key[len(iter.prefix):] |
||||
} |
||||
|
||||
// Value returns the value of the current key/value pair, or nil if done. The
|
||||
// caller should not modify the contents of the returned slice, and its contents
|
||||
// may change on the next call to Next.
|
||||
func (iter *tableIterator) Value() []byte { |
||||
return iter.iter.Value() |
||||
} |
||||
|
||||
// Release releases associated resources. Release should always succeed and can
|
||||
// be called multiple times without causing error.
|
||||
func (iter *tableIterator) Release() { |
||||
iter.iter.Release() |
||||
} |
@ -0,0 +1,128 @@ |
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rawdb |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
) |
||||
|
||||
func TestTableDatabase(t *testing.T) { testTableDatabase(t, "prefix") } |
||||
func TestEmptyPrefixTableDatabase(t *testing.T) { testTableDatabase(t, "") } |
||||
|
||||
type testReplayer struct { |
||||
puts [][]byte |
||||
dels [][]byte |
||||
} |
||||
|
||||
func (r *testReplayer) Put(key []byte, value []byte) error { |
||||
r.puts = append(r.puts, key) |
||||
return nil |
||||
} |
||||
|
||||
func (r *testReplayer) Delete(key []byte) error { |
||||
r.dels = append(r.dels, key) |
||||
return nil |
||||
} |
||||
|
||||
func testTableDatabase(t *testing.T, prefix string) { |
||||
db := NewTable(NewMemoryDatabase(), prefix) |
||||
|
||||
var entries = []struct { |
||||
key []byte |
||||
value []byte |
||||
}{ |
||||
{[]byte{0x01, 0x02}, []byte{0x0a, 0x0b}}, |
||||
{[]byte{0x03, 0x04}, []byte{0x0c, 0x0d}}, |
||||
{[]byte{0x05, 0x06}, []byte{0x0e, 0x0f}}, |
||||
|
||||
{[]byte{0xff, 0xff, 0x01}, []byte{0x1a, 0x1b}}, |
||||
{[]byte{0xff, 0xff, 0x02}, []byte{0x1c, 0x1d}}, |
||||
{[]byte{0xff, 0xff, 0x03}, []byte{0x1e, 0x1f}}, |
||||
} |
||||
|
||||
// Test Put/Get operation
|
||||
for _, entry := range entries { |
||||
db.Put(entry.key, entry.value) |
||||
} |
||||
for _, entry := range entries { |
||||
got, err := db.Get(entry.key) |
||||
if err != nil { |
||||
t.Fatalf("Failed to get value: %v", err) |
||||
} |
||||
if !bytes.Equal(got, entry.value) { |
||||
t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got) |
||||
} |
||||
} |
||||
|
||||
// Test batch operation
|
||||
db = NewTable(NewMemoryDatabase(), prefix) |
||||
batch := db.NewBatch() |
||||
for _, entry := range entries { |
||||
batch.Put(entry.key, entry.value) |
||||
} |
||||
batch.Write() |
||||
for _, entry := range entries { |
||||
got, err := db.Get(entry.key) |
||||
if err != nil { |
||||
t.Fatalf("Failed to get value: %v", err) |
||||
} |
||||
if !bytes.Equal(got, entry.value) { |
||||
t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got) |
||||
} |
||||
} |
||||
|
||||
// Test batch replayer
|
||||
r := &testReplayer{} |
||||
batch.Replay(r) |
||||
for index, entry := range entries { |
||||
got := r.puts[index] |
||||
if !bytes.Equal(got, entry.key) { |
||||
t.Fatalf("Key mismatch: want=%v, got=%v", entry.key, got) |
||||
} |
||||
} |
||||
|
||||
check := func(iter ethdb.Iterator, expCount, index int) { |
||||
count := 0 |
||||
for iter.Next() { |
||||
key, value := iter.Key(), iter.Value() |
||||
if !bytes.Equal(key, entries[index].key) { |
||||
t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key) |
||||
} |
||||
if !bytes.Equal(value, entries[index].value) { |
||||
t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value) |
||||
} |
||||
index += 1 |
||||
count++ |
||||
} |
||||
if count != expCount { |
||||
t.Fatalf("Wrong number of elems, exp %d got %d", expCount, count) |
||||
} |
||||
iter.Release() |
||||
} |
||||
// Test iterators
|
||||
check(db.NewIterator(nil, nil), 6, 0) |
||||
// Test iterators with prefix
|
||||
check(db.NewIterator([]byte{0xff, 0xff}, nil), 3, 3) |
||||
// Test iterators with start point
|
||||
check(db.NewIterator(nil, []byte{0xff, 0xff, 0x02}), 2, 4) |
||||
// Test iterators with prefix and start point
|
||||
check(db.NewIterator([]byte{0xee}, nil), 0, 0) |
||||
check(db.NewIterator(nil, []byte{0x00}), 6, 0) |
||||
} |
Binary file not shown.
@ -0,0 +1,136 @@ |
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
type accessList struct { |
||||
addresses map[common.Address]int |
||||
slots []map[common.Hash]struct{} |
||||
} |
||||
|
||||
// ContainsAddress returns true if the address is in the access list.
|
||||
func (al *accessList) ContainsAddress(address common.Address) bool { |
||||
_, ok := al.addresses[address] |
||||
return ok |
||||
} |
||||
|
||||
// Contains checks if a slot within an account is present in the access list, returning
|
||||
// separate flags for the presence of the account and the slot respectively.
|
||||
func (al *accessList) Contains(address common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { |
||||
idx, ok := al.addresses[address] |
||||
if !ok { |
||||
// no such address (and hence zero slots)
|
||||
return false, false |
||||
} |
||||
if idx == -1 { |
||||
// address yes, but no slots
|
||||
return true, false |
||||
} |
||||
_, slotPresent = al.slots[idx][slot] |
||||
return true, slotPresent |
||||
} |
||||
|
||||
// newAccessList creates a new accessList.
|
||||
func newAccessList() *accessList { |
||||
return &accessList{ |
||||
addresses: make(map[common.Address]int), |
||||
} |
||||
} |
||||
|
||||
// Copy creates an independent copy of an accessList.
|
||||
func (a *accessList) Copy() *accessList { |
||||
cp := newAccessList() |
||||
for k, v := range a.addresses { |
||||
cp.addresses[k] = v |
||||
} |
||||
cp.slots = make([]map[common.Hash]struct{}, len(a.slots)) |
||||
for i, slotMap := range a.slots { |
||||
newSlotmap := make(map[common.Hash]struct{}, len(slotMap)) |
||||
for k := range slotMap { |
||||
newSlotmap[k] = struct{}{} |
||||
} |
||||
cp.slots[i] = newSlotmap |
||||
} |
||||
return cp |
||||
} |
||||
|
||||
// AddAddress adds an address to the access list, and returns 'true' if the operation
|
||||
// caused a change (addr was not previously in the list).
|
||||
func (al *accessList) AddAddress(address common.Address) bool { |
||||
if _, present := al.addresses[address]; present { |
||||
return false |
||||
} |
||||
al.addresses[address] = -1 |
||||
return true |
||||
} |
||||
|
||||
// AddSlot adds the specified (addr, slot) combo to the access list.
|
||||
// Return values are:
|
||||
// - address added
|
||||
// - slot added
|
||||
// For any 'true' value returned, a corresponding journal entry must be made.
|
||||
func (al *accessList) AddSlot(address common.Address, slot common.Hash) (addrChange bool, slotChange bool) { |
||||
idx, addrPresent := al.addresses[address] |
||||
if !addrPresent || idx == -1 { |
||||
// Address not present, or addr present but no slots there
|
||||
al.addresses[address] = len(al.slots) |
||||
slotmap := map[common.Hash]struct{}{slot: {}} |
||||
al.slots = append(al.slots, slotmap) |
||||
return !addrPresent, true |
||||
} |
||||
// There is already an (address,slot) mapping
|
||||
slotmap := al.slots[idx] |
||||
if _, ok := slotmap[slot]; !ok { |
||||
slotmap[slot] = struct{}{} |
||||
// Journal add slot change
|
||||
return false, true |
||||
} |
||||
// No changes required
|
||||
return false, false |
||||
} |
||||
|
||||
// DeleteSlot removes an (address, slot)-tuple from the access list.
|
||||
// This operation needs to be performed in the same order as the addition happened.
|
||||
// This method is meant to be used by the journal, which maintains ordering of
|
||||
// operations.
|
||||
func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { |
||||
idx, addrOk := al.addresses[address] |
||||
// There are two ways this can fail
|
||||
if !addrOk { |
||||
panic("reverting slot change, address not present in list") |
||||
} |
||||
slotmap := al.slots[idx] |
||||
delete(slotmap, slot) |
||||
// If that was the last (first) slot, remove it
|
||||
// Since additions and rollbacks are always performed in order,
|
||||
// we can delete the item without worrying about screwing up later indices
|
||||
if len(slotmap) == 0 { |
||||
al.slots = al.slots[:idx] |
||||
al.addresses[address] = -1 |
||||
} |
||||
} |
||||
|
||||
// DeleteAddress removes an address from the access list. This operation
|
||||
// needs to be performed in the same order as the addition happened.
|
||||
// This method is meant to be used by the journal, which maintains ordering of
|
||||
// operations.
|
||||
func (al *accessList) DeleteAddress(address common.Address) { |
||||
delete(al.addresses, address) |
||||
} |
@ -0,0 +1,161 @@ |
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
) |
||||
|
||||
// NodeIterator is an iterator to traverse the entire state trie post-order,
|
||||
// including all of the contract code and contract state tries.
|
||||
type NodeIterator struct { |
||||
state *DB // State being iterated
|
||||
|
||||
stateIt trie.NodeIterator // Primary iterator for the global state trie
|
||||
dataIt trie.NodeIterator // Secondary iterator for the data trie of a contract
|
||||
|
||||
accountHash common.Hash // Hash of the node containing the account
|
||||
codeHash common.Hash // Hash of the contract source code
|
||||
code []byte // Source code associated with a contract
|
||||
|
||||
Hash common.Hash // Hash of the current entry being iterated (nil if not standalone)
|
||||
Parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
|
||||
|
||||
Error error // Failure set in case of an internal error in the iterator
|
||||
} |
||||
|
||||
// NewNodeIterator creates an post-order state node iterator.
|
||||
func NewNodeIterator(state *DB) *NodeIterator { |
||||
return &NodeIterator{ |
||||
state: state, |
||||
} |
||||
} |
||||
|
||||
// Next moves the iterator to the next node, returning whether there are any
|
||||
// further nodes. In case of an internal error this method returns false and
|
||||
// sets the Error field to the encountered failure.
|
||||
func (it *NodeIterator) Next() bool { |
||||
// If the iterator failed previously, don't do anything
|
||||
if it.Error != nil { |
||||
return false |
||||
} |
||||
// Otherwise step forward with the iterator and report any errors
|
||||
if err := it.step(); err != nil { |
||||
it.Error = err |
||||
return false |
||||
} |
||||
return it.retrieve() |
||||
} |
||||
|
||||
// step moves the iterator to the next entry of the state trie.
|
||||
func (it *NodeIterator) step() error { |
||||
// Abort if we reached the end of the iteration
|
||||
if it.state == nil { |
||||
return nil |
||||
} |
||||
// Initialize the iterator if we've just started
|
||||
if it.stateIt == nil { |
||||
it.stateIt = it.state.trie.NodeIterator(nil) |
||||
} |
||||
// If we had data nodes previously, we surely have at least state nodes
|
||||
if it.dataIt != nil { |
||||
if cont := it.dataIt.Next(true); !cont { |
||||
if it.dataIt.Error() != nil { |
||||
return it.dataIt.Error() |
||||
} |
||||
it.dataIt = nil |
||||
} |
||||
return nil |
||||
} |
||||
// If we had source code previously, discard that
|
||||
if it.code != nil { |
||||
it.code = nil |
||||
return nil |
||||
} |
||||
// Step to the next state trie node, terminating if we're out of nodes
|
||||
if cont := it.stateIt.Next(true); !cont { |
||||
if it.stateIt.Error() != nil { |
||||
return it.stateIt.Error() |
||||
} |
||||
it.state, it.stateIt = nil, nil |
||||
return nil |
||||
} |
||||
// If the state trie node is an internal entry, leave as is
|
||||
if !it.stateIt.Leaf() { |
||||
return nil |
||||
} |
||||
// Otherwise we've reached an account node, initiate data iteration
|
||||
var account Account |
||||
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { |
||||
return err |
||||
} |
||||
dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
it.dataIt = dataTrie.NodeIterator(nil) |
||||
if !it.dataIt.Next(true) { |
||||
it.dataIt = nil |
||||
} |
||||
if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { |
||||
it.codeHash = common.BytesToHash(account.CodeHash) |
||||
addrHash := common.BytesToHash(it.stateIt.LeafKey()) |
||||
it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash)) |
||||
if err != nil { |
||||
return fmt.Errorf("code %x: %v", account.CodeHash, err) |
||||
} |
||||
if it.code == nil || len(it.code) == 0 { |
||||
it.code, err = it.state.db.ValidatorCode(addrHash, common.BytesToHash(account.CodeHash)) |
||||
if err != nil { |
||||
return fmt.Errorf("code %x: %v", account.CodeHash, err) |
||||
} |
||||
} |
||||
} |
||||
it.accountHash = it.stateIt.Parent() |
||||
return nil |
||||
} |
||||
|
||||
// retrieve pulls and caches the current state entry the iterator is traversing.
|
||||
// The method returns whether there are any more data left for inspection.
|
||||
func (it *NodeIterator) retrieve() bool { |
||||
// Clear out any previously set values
|
||||
it.Hash = common.Hash{} |
||||
|
||||
// If the iteration's done, return no available data
|
||||
if it.state == nil { |
||||
return false |
||||
} |
||||
// Otherwise retrieve the current entry
|
||||
switch { |
||||
case it.dataIt != nil: |
||||
it.Hash, it.Parent = it.dataIt.Hash(), it.dataIt.Parent() |
||||
if it.Parent == (common.Hash{}) { |
||||
it.Parent = it.accountHash |
||||
} |
||||
case it.code != nil: |
||||
it.Hash, it.Parent = it.codeHash, it.accountHash |
||||
case it.stateIt != nil: |
||||
it.Hash, it.Parent = it.stateIt.Hash(), it.stateIt.Parent() |
||||
} |
||||
return true |
||||
} |
@ -0,0 +1,142 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"math/big" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
) |
||||
|
||||
// testAccount is the data associated with an account used by the state tests.
|
||||
type testAccount struct { |
||||
address common.Address |
||||
balance *big.Int |
||||
nonce uint64 |
||||
code []byte |
||||
} |
||||
|
||||
// makeTestState create a sample test state to test node-wise reconstruction.
|
||||
func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { |
||||
// Create an empty state
|
||||
db := rawdb.NewMemoryDatabase() |
||||
sdb := NewDatabase(db) |
||||
state, _ := New(common.Hash{}, sdb, nil) |
||||
|
||||
// Fill it with some arbitrary data
|
||||
var accounts []*testAccount |
||||
for i := byte(0); i < 96; i++ { |
||||
obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i})) |
||||
acc := &testAccount{address: common.BytesToAddress([]byte{i})} |
||||
|
||||
obj.AddBalance(big.NewInt(int64(11 * i))) |
||||
acc.balance = big.NewInt(int64(11 * i)) |
||||
|
||||
obj.SetNonce(uint64(42 * i)) |
||||
acc.nonce = uint64(42 * i) |
||||
|
||||
if i%3 == 0 { |
||||
obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}, false) |
||||
acc.code = []byte{i, i, i, i, i} |
||||
} |
||||
if i%5 == 0 { |
||||
for j := byte(0); j < 5; j++ { |
||||
hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) |
||||
obj.SetState(sdb, hash, hash) |
||||
} |
||||
} |
||||
state.updateStateObject(obj) |
||||
accounts = append(accounts, acc) |
||||
} |
||||
root, _ := state.Commit(false) |
||||
|
||||
// Return the generated state
|
||||
return db, sdb, root, accounts |
||||
} |
||||
|
||||
// Tests that the node iterator indeed walks over the entire database contents.
|
||||
func TestNodeIteratorCoverage(t *testing.T) { |
||||
// Create some arbitrary test state to iterate
|
||||
db, sdb, root, _ := makeTestState() |
||||
sdb.TrieDB().Commit(root, false) |
||||
|
||||
state, err := New(root, sdb, nil) |
||||
if err != nil { |
||||
t.Fatalf("failed to create state trie at %x: %v", root, err) |
||||
} |
||||
// Gather all the node hashes found by the iterator
|
||||
hashes := make(map[common.Hash]struct{}) |
||||
for it := NewNodeIterator(state); it.Next(); { |
||||
if it.Hash != (common.Hash{}) { |
||||
hashes[it.Hash] = struct{}{} |
||||
} |
||||
} |
||||
// Check in-disk nodes
|
||||
var ( |
||||
seenNodes = make(map[common.Hash]struct{}) |
||||
seenCodes = make(map[common.Hash]struct{}) |
||||
) |
||||
it := db.NewIterator(nil, nil) |
||||
for it.Next() { |
||||
ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value()) |
||||
if !ok { |
||||
continue |
||||
} |
||||
seenNodes[hash] = struct{}{} |
||||
} |
||||
it.Release() |
||||
|
||||
// Check in-disk codes
|
||||
it = db.NewIterator(nil, nil) |
||||
for it.Next() { |
||||
ok, hash := rawdb.IsCodeKey(it.Key()) |
||||
if !ok { |
||||
continue |
||||
} |
||||
if _, ok := hashes[common.BytesToHash(hash)]; !ok { |
||||
t.Errorf("state entry not reported %x", it.Key()) |
||||
} |
||||
seenCodes[common.BytesToHash(hash)] = struct{}{} |
||||
} |
||||
it.Release() |
||||
|
||||
// Cross check the iterated hashes and the database/nodepool content
|
||||
for hash := range hashes { |
||||
_, ok := seenNodes[hash] |
||||
if !ok { |
||||
_, ok = seenCodes[hash] |
||||
} |
||||
if !ok { |
||||
t.Errorf("failed to retrieve reported node %x", hash) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// isTrieNode is a helper function which reports if the provided
|
||||
// database entry belongs to a trie node or not.
|
||||
func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) { |
||||
if scheme == rawdb.HashScheme { |
||||
if len(key) == common.HashLength { |
||||
return true, common.BytesToHash(key) |
||||
} |
||||
} |
||||
return false, common.Hash{} |
||||
} |
@ -0,0 +1,30 @@ |
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import "github.com/ethereum/go-ethereum/metrics" |
||||
|
||||
var ( |
||||
accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) |
||||
storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) |
||||
accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) |
||||
storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) |
||||
accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil) |
||||
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) |
||||
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) |
||||
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) |
||||
) |
@ -0,0 +1,87 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
// Account is a modified version of a state.Account, where the root is replaced
|
||||
// with a byte slice. This format can be used to represent full-consensus format
|
||||
// or slim-snapshot format which replaces the empty root and code hash as nil
|
||||
// byte slice.
|
||||
type Account struct { |
||||
Nonce uint64 |
||||
Balance *big.Int |
||||
Root []byte |
||||
CodeHash []byte |
||||
} |
||||
|
||||
// SlimAccount converts a state.Account content into a slim snapshot account
|
||||
func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account { |
||||
slim := Account{ |
||||
Nonce: nonce, |
||||
Balance: balance, |
||||
} |
||||
if root != types.EmptyRootHash { |
||||
slim.Root = root[:] |
||||
} |
||||
if !bytes.Equal(codehash, types.EmptyCodeHash[:]) { |
||||
slim.CodeHash = codehash |
||||
} |
||||
return slim |
||||
} |
||||
|
||||
// SlimAccountRLP converts a state.Account content into a slim snapshot
|
||||
// version RLP encoded.
|
||||
func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte { |
||||
data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash)) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return data |
||||
} |
||||
|
||||
// FullAccount decodes the data on the 'slim RLP' format and return
|
||||
// the consensus format account.
|
||||
func FullAccount(data []byte) (Account, error) { |
||||
var account Account |
||||
if err := rlp.DecodeBytes(data, &account); err != nil { |
||||
return Account{}, err |
||||
} |
||||
if len(account.Root) == 0 { |
||||
account.Root = types.EmptyRootHash[:] |
||||
} |
||||
if len(account.CodeHash) == 0 { |
||||
account.CodeHash = types.EmptyCodeHash[:] |
||||
} |
||||
return account, nil |
||||
} |
||||
|
||||
// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
|
||||
func FullAccountRLP(data []byte) ([]byte, error) { |
||||
account, err := FullAccount(data) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return rlp.EncodeToBytes(account) |
||||
} |
@ -0,0 +1,241 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"errors" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/math" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
const ( |
||||
snapAccount = "account" // Identifier of account snapshot generation
|
||||
snapStorage = "storage" // Identifier of storage snapshot generation
|
||||
) |
||||
|
||||
// generatorStats is a collection of statistics gathered by the snapshot generator
|
||||
// for logging purposes.
|
||||
type generatorStats struct { |
||||
origin uint64 // Origin prefix where generation started
|
||||
start time.Time // Timestamp when generation started
|
||||
accounts uint64 // Number of accounts indexed(generated or recovered)
|
||||
slots uint64 // Number of storage slots indexed(generated or recovered)
|
||||
dangling uint64 // Number of dangling storage slots
|
||||
storage common.StorageSize // Total account and storage slot size(generation or recovery)
|
||||
} |
||||
|
||||
// Log creates an contextual log with the given message and the context pulled
|
||||
// from the internally maintained statistics.
|
||||
func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) { |
||||
var ctx []interface{} |
||||
if root != (common.Hash{}) { |
||||
ctx = append(ctx, []interface{}{"root", root}...) |
||||
} |
||||
// Figure out whether we're after or within an account
|
||||
switch len(marker) { |
||||
case common.HashLength: |
||||
ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...) |
||||
case 2 * common.HashLength: |
||||
ctx = append(ctx, []interface{}{ |
||||
"in", common.BytesToHash(marker[:common.HashLength]), |
||||
"at", common.BytesToHash(marker[common.HashLength:]), |
||||
}...) |
||||
} |
||||
// Add the usual measurements
|
||||
ctx = append(ctx, []interface{}{ |
||||
"accounts", gs.accounts, |
||||
"slots", gs.slots, |
||||
"storage", gs.storage, |
||||
"dangling", gs.dangling, |
||||
"elapsed", common.PrettyDuration(time.Since(gs.start)), |
||||
}...) |
||||
// Calculate the estimated indexing time based on current stats
|
||||
if len(marker) > 0 { |
||||
if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 { |
||||
left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8]) |
||||
|
||||
speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
|
||||
ctx = append(ctx, []interface{}{ |
||||
"eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond), |
||||
}...) |
||||
} |
||||
} |
||||
utils.Logger().Info().Msg(msg) |
||||
} |
||||
|
||||
// generatorContext carries a few global values to be shared by all generation functions.
|
||||
type generatorContext struct { |
||||
stats *generatorStats // Generation statistic collection
|
||||
db ethdb.KeyValueStore // Key-value store containing the snapshot data
|
||||
account *holdableIterator // Iterator of account snapshot data
|
||||
storage *holdableIterator // Iterator of storage snapshot data
|
||||
batch ethdb.Batch // Database batch for writing batch data atomically
|
||||
logged time.Time // The timestamp when last generation progress was displayed
|
||||
} |
||||
|
||||
// newGeneratorContext initializes the context for generation.
|
||||
func newGeneratorContext(stats *generatorStats, db ethdb.KeyValueStore, accMarker []byte, storageMarker []byte) *generatorContext { |
||||
ctx := &generatorContext{ |
||||
stats: stats, |
||||
db: db, |
||||
batch: db.NewBatch(), |
||||
logged: time.Now(), |
||||
} |
||||
ctx.openIterator(snapAccount, accMarker) |
||||
ctx.openIterator(snapStorage, storageMarker) |
||||
return ctx |
||||
} |
||||
|
||||
// openIterator constructs global account and storage snapshot iterators
|
||||
// at the interrupted position. These iterators should be reopened from time
|
||||
// to time to avoid blocking leveldb compaction for a long time.
|
||||
func (ctx *generatorContext) openIterator(kind string, start []byte) { |
||||
if kind == snapAccount { |
||||
iter := ctx.db.NewIterator(rawdb.SnapshotAccountPrefix, start) |
||||
ctx.account = newHoldableIterator(rawdb.NewKeyLengthIterator(iter, 1+common.HashLength)) |
||||
return |
||||
} |
||||
iter := ctx.db.NewIterator(rawdb.SnapshotStoragePrefix, start) |
||||
ctx.storage = newHoldableIterator(rawdb.NewKeyLengthIterator(iter, 1+2*common.HashLength)) |
||||
} |
||||
|
||||
// reopenIterator releases the specified snapshot iterator and re-open it
|
||||
// in the next position. It's aimed for not blocking leveldb compaction.
|
||||
func (ctx *generatorContext) reopenIterator(kind string) { |
||||
// Shift iterator one more step, so that we can reopen
|
||||
// the iterator at the right position.
|
||||
var iter = ctx.account |
||||
if kind == snapStorage { |
||||
iter = ctx.storage |
||||
} |
||||
hasNext := iter.Next() |
||||
if !hasNext { |
||||
// Iterator exhausted, release forever and create an already exhausted virtual iterator
|
||||
iter.Release() |
||||
if kind == snapAccount { |
||||
ctx.account = newHoldableIterator(memorydb.New().NewIterator(nil, nil)) |
||||
return |
||||
} |
||||
ctx.storage = newHoldableIterator(memorydb.New().NewIterator(nil, nil)) |
||||
return |
||||
} |
||||
next := iter.Key() |
||||
iter.Release() |
||||
ctx.openIterator(kind, next[1:]) |
||||
} |
||||
|
||||
// close releases all the held resources.
|
||||
func (ctx *generatorContext) close() { |
||||
ctx.account.Release() |
||||
ctx.storage.Release() |
||||
} |
||||
|
||||
// iterator returns the corresponding iterator specified by the kind.
|
||||
func (ctx *generatorContext) iterator(kind string) *holdableIterator { |
||||
if kind == snapAccount { |
||||
return ctx.account |
||||
} |
||||
return ctx.storage |
||||
} |
||||
|
||||
// removeStorageBefore deletes all storage entries which are located before
|
||||
// the specified account. When the iterator touches the storage entry which
|
||||
// is located in or outside the given account, it stops and holds the current
|
||||
// iterated element locally.
|
||||
func (ctx *generatorContext) removeStorageBefore(account common.Hash) { |
||||
var ( |
||||
count uint64 |
||||
start = time.Now() |
||||
iter = ctx.storage |
||||
) |
||||
for iter.Next() { |
||||
key := iter.Key() |
||||
if bytes.Compare(key[1:1+common.HashLength], account.Bytes()) >= 0 { |
||||
iter.Hold() |
||||
break |
||||
} |
||||
count++ |
||||
ctx.batch.Delete(key) |
||||
if ctx.batch.ValueSize() > ethdb.IdealBatchSize { |
||||
ctx.batch.Write() |
||||
ctx.batch.Reset() |
||||
} |
||||
} |
||||
ctx.stats.dangling += count |
||||
snapStorageCleanCounter.Inc(time.Since(start).Nanoseconds()) |
||||
} |
||||
|
||||
// removeStorageAt deletes all storage entries which are located in the specified
|
||||
// account. When the iterator touches the storage entry which is outside the given
|
||||
// account, it stops and holds the current iterated element locally. An error will
|
||||
// be returned if the initial position of iterator is not in the given account.
|
||||
func (ctx *generatorContext) removeStorageAt(account common.Hash) error { |
||||
var ( |
||||
count int64 |
||||
start = time.Now() |
||||
iter = ctx.storage |
||||
) |
||||
for iter.Next() { |
||||
key := iter.Key() |
||||
cmp := bytes.Compare(key[1:1+common.HashLength], account.Bytes()) |
||||
if cmp < 0 { |
||||
return errors.New("invalid iterator position") |
||||
} |
||||
if cmp > 0 { |
||||
iter.Hold() |
||||
break |
||||
} |
||||
count++ |
||||
ctx.batch.Delete(key) |
||||
if ctx.batch.ValueSize() > ethdb.IdealBatchSize { |
||||
ctx.batch.Write() |
||||
ctx.batch.Reset() |
||||
} |
||||
} |
||||
snapWipedStorageMeter.Mark(count) |
||||
snapStorageCleanCounter.Inc(time.Since(start).Nanoseconds()) |
||||
return nil |
||||
} |
||||
|
||||
// removeStorageLeft deletes all storage entries which are located after
|
||||
// the current iterator position.
|
||||
func (ctx *generatorContext) removeStorageLeft() { |
||||
var ( |
||||
count uint64 |
||||
start = time.Now() |
||||
iter = ctx.storage |
||||
) |
||||
for iter.Next() { |
||||
count++ |
||||
ctx.batch.Delete(iter.Key()) |
||||
if ctx.batch.ValueSize() > ethdb.IdealBatchSize { |
||||
ctx.batch.Write() |
||||
ctx.batch.Reset() |
||||
} |
||||
} |
||||
ctx.stats.dangling += count |
||||
snapDanglingStorageMeter.Mark(int64(count)) |
||||
snapStorageCleanCounter.Inc(time.Since(start).Nanoseconds()) |
||||
} |
@ -0,0 +1,383 @@ |
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
"math" |
||||
"runtime" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// trieKV represents a trie key-value pair
|
||||
type trieKV struct { |
||||
key common.Hash |
||||
value []byte |
||||
} |
||||
|
||||
type ( |
||||
// trieGeneratorFn is the interface of trie generation which can
|
||||
// be implemented by different trie algorithm.
|
||||
trieGeneratorFn func(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan (trieKV), out chan (common.Hash)) |
||||
|
||||
// leafCallbackFn is the callback invoked at the leaves of the trie,
|
||||
// returns the subtrie root with the specified subtrie identifier.
|
||||
leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) |
||||
) |
||||
|
||||
// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
|
||||
func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) { |
||||
return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) |
||||
} |
||||
|
||||
// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
|
||||
func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) { |
||||
return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true) |
||||
} |
||||
|
||||
// GenerateTrie takes the whole snapshot tree as the input, traverses all the
|
||||
// accounts as well as the corresponding storages and regenerate the whole state
|
||||
// (account trie + all storage tries).
|
||||
func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethdb.KeyValueWriter) error { |
||||
// Traverse all state by snapshot, re-generate the whole state trie
|
||||
acctIt, err := snaptree.AccountIterator(root, common.Hash{}) |
||||
if err != nil { |
||||
return err // The required snapshot might not exist.
|
||||
} |
||||
defer acctIt.Release() |
||||
|
||||
scheme := snaptree.triedb.Scheme() |
||||
got, err := generateTrieRoot(dst, scheme, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { |
||||
// Migrate the code first, commit the contract code into the tmp db.
|
||||
if codeHash != types.EmptyCodeHash { |
||||
code := rawdb.ReadCode(src, codeHash) |
||||
if len(code) == 0 { |
||||
return common.Hash{}, errors.New("failed to read code") |
||||
} |
||||
rawdb.WriteCode(dst, codeHash, code) |
||||
} |
||||
// Then migrate all storage trie nodes into the tmp db.
|
||||
storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{}) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
defer storageIt.Release() |
||||
|
||||
hash, err := generateTrieRoot(dst, scheme, storageIt, accountHash, stackTrieGenerate, nil, stat, false) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
return hash, nil |
||||
}, newGenerateStats(), true) |
||||
|
||||
if err != nil { |
||||
return err |
||||
} |
||||
if got != root { |
||||
return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// generateStats is a collection of statistics gathered by the trie generator
|
||||
// for logging purposes.
|
||||
type generateStats struct { |
||||
head common.Hash |
||||
start time.Time |
||||
|
||||
accounts uint64 // Number of accounts done (including those being crawled)
|
||||
slots uint64 // Number of storage slots done (including those being crawled)
|
||||
|
||||
slotsStart map[common.Hash]time.Time // Start time for account slot crawling
|
||||
slotsHead map[common.Hash]common.Hash // Slot head for accounts being crawled
|
||||
|
||||
lock sync.RWMutex |
||||
} |
||||
|
||||
// newGenerateStats creates a new generator stats.
|
||||
func newGenerateStats() *generateStats { |
||||
return &generateStats{ |
||||
slotsStart: make(map[common.Hash]time.Time), |
||||
slotsHead: make(map[common.Hash]common.Hash), |
||||
start: time.Now(), |
||||
} |
||||
} |
||||
|
||||
// progressAccounts updates the generator stats for the account range.
|
||||
func (stat *generateStats) progressAccounts(account common.Hash, done uint64) { |
||||
stat.lock.Lock() |
||||
defer stat.lock.Unlock() |
||||
|
||||
stat.accounts += done |
||||
stat.head = account |
||||
} |
||||
|
||||
// finishAccounts updates the generator stats for the finished account range.
|
||||
func (stat *generateStats) finishAccounts(done uint64) { |
||||
stat.lock.Lock() |
||||
defer stat.lock.Unlock() |
||||
|
||||
stat.accounts += done |
||||
} |
||||
|
||||
// progressContract updates the generator stats for a specific in-progress contract.
|
||||
func (stat *generateStats) progressContract(account common.Hash, slot common.Hash, done uint64) { |
||||
stat.lock.Lock() |
||||
defer stat.lock.Unlock() |
||||
|
||||
stat.slots += done |
||||
stat.slotsHead[account] = slot |
||||
if _, ok := stat.slotsStart[account]; !ok { |
||||
stat.slotsStart[account] = time.Now() |
||||
} |
||||
} |
||||
|
||||
// finishContract updates the generator stats for a specific just-finished contract.
|
||||
func (stat *generateStats) finishContract(account common.Hash, done uint64) { |
||||
stat.lock.Lock() |
||||
defer stat.lock.Unlock() |
||||
|
||||
stat.slots += done |
||||
delete(stat.slotsHead, account) |
||||
delete(stat.slotsStart, account) |
||||
} |
||||
|
||||
// report prints the cumulative progress statistic smartly.
|
||||
func (stat *generateStats) report() { |
||||
stat.lock.RLock() |
||||
defer stat.lock.RUnlock() |
||||
|
||||
ctx := []interface{}{ |
||||
"accounts", stat.accounts, |
||||
"slots", stat.slots, |
||||
"elapsed", common.PrettyDuration(time.Since(stat.start)), |
||||
} |
||||
if stat.accounts > 0 { |
||||
// If there's progress on the account trie, estimate the time to finish crawling it
|
||||
if done := binary.BigEndian.Uint64(stat.head[:8]) / stat.accounts; done > 0 { |
||||
var ( |
||||
left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts |
||||
speed = done/uint64(time.Since(stat.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
|
||||
eta = time.Duration(left/speed) * time.Millisecond |
||||
) |
||||
// If there are large contract crawls in progress, estimate their finish time
|
||||
for acc, head := range stat.slotsHead { |
||||
start := stat.slotsStart[acc] |
||||
if done := binary.BigEndian.Uint64(head[:8]); done > 0 { |
||||
var ( |
||||
left = math.MaxUint64 - binary.BigEndian.Uint64(head[:8]) |
||||
speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
|
||||
) |
||||
// Override the ETA if larger than the largest until now
|
||||
if slotETA := time.Duration(left/speed) * time.Millisecond; eta < slotETA { |
||||
eta = slotETA |
||||
} |
||||
} |
||||
} |
||||
ctx = append(ctx, []interface{}{ |
||||
"eta", common.PrettyDuration(eta), |
||||
}...) |
||||
} |
||||
} |
||||
utils.Logger().Info().Msg("Iterating state snapshot") |
||||
} |
||||
|
||||
// reportDone prints the last log when the whole generation is finished.
|
||||
func (stat *generateStats) reportDone() { |
||||
stat.lock.RLock() |
||||
defer stat.lock.RUnlock() |
||||
|
||||
var ctx []interface{} |
||||
ctx = append(ctx, []interface{}{"accounts", stat.accounts}...) |
||||
if stat.slots != 0 { |
||||
ctx = append(ctx, []interface{}{"slots", stat.slots}...) |
||||
} |
||||
ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...) |
||||
utils.Logger().Info().Msg("Iterated snapshot") |
||||
} |
||||
|
||||
// runReport periodically prints the progress information.
|
||||
func runReport(stats *generateStats, stop chan bool) { |
||||
timer := time.NewTimer(0) |
||||
defer timer.Stop() |
||||
|
||||
for { |
||||
select { |
||||
case <-timer.C: |
||||
stats.report() |
||||
timer.Reset(time.Second * 8) |
||||
case success := <-stop: |
||||
if success { |
||||
stats.reportDone() |
||||
} |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
// generateTrieRoot generates the trie hash based on the snapshot iterator.
|
||||
// It can be used for generating account trie, storage trie or even the
|
||||
// whole state which connects the accounts and the corresponding storages.
|
||||
func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { |
||||
var ( |
||||
in = make(chan trieKV) // chan to pass leaves
|
||||
out = make(chan common.Hash, 1) // chan to collect result
|
||||
stoplog = make(chan bool, 1) // 1-size buffer, works when logging is not enabled
|
||||
wg sync.WaitGroup |
||||
) |
||||
// Spin up a go-routine for trie hash re-generation
|
||||
wg.Add(1) |
||||
go func() { |
||||
defer wg.Done() |
||||
generatorFn(db, scheme, account, in, out) |
||||
}() |
||||
// Spin up a go-routine for progress logging
|
||||
if report && stats != nil { |
||||
wg.Add(1) |
||||
go func() { |
||||
defer wg.Done() |
||||
runReport(stats, stoplog) |
||||
}() |
||||
} |
||||
// Create a semaphore to assign tasks and collect results through. We'll pre-
|
||||
// fill it with nils, thus using the same channel for both limiting concurrent
|
||||
// processing and gathering results.
|
||||
threads := runtime.NumCPU() |
||||
results := make(chan error, threads) |
||||
for i := 0; i < threads; i++ { |
||||
results <- nil // fill the semaphore
|
||||
} |
||||
// stop is a helper function to shutdown the background threads
|
||||
// and return the re-generated trie hash.
|
||||
stop := func(fail error) (common.Hash, error) { |
||||
close(in) |
||||
result := <-out |
||||
for i := 0; i < threads; i++ { |
||||
if err := <-results; err != nil && fail == nil { |
||||
fail = err |
||||
} |
||||
} |
||||
stoplog <- fail == nil |
||||
|
||||
wg.Wait() |
||||
return result, fail |
||||
} |
||||
var ( |
||||
logged = time.Now() |
||||
processed = uint64(0) |
||||
leaf trieKV |
||||
) |
||||
// Start to feed leaves
|
||||
for it.Next() { |
||||
if account == (common.Hash{}) { |
||||
var ( |
||||
err error |
||||
fullData []byte |
||||
) |
||||
if leafCallback == nil { |
||||
fullData, err = FullAccountRLP(it.(AccountIterator).Account()) |
||||
if err != nil { |
||||
return stop(err) |
||||
} |
||||
} else { |
||||
// Wait until the semaphore allows us to continue, aborting if
|
||||
// a sub-task failed
|
||||
if err := <-results; err != nil { |
||||
results <- nil // stop will drain the results, add a noop back for this error we just consumed
|
||||
return stop(err) |
||||
} |
||||
// Fetch the next account and process it concurrently
|
||||
account, err := FullAccount(it.(AccountIterator).Account()) |
||||
if err != nil { |
||||
return stop(err) |
||||
} |
||||
go func(hash common.Hash) { |
||||
subroot, err := leafCallback(db, hash, common.BytesToHash(account.CodeHash), stats) |
||||
if err != nil { |
||||
results <- err |
||||
return |
||||
} |
||||
if !bytes.Equal(account.Root, subroot.Bytes()) { |
||||
results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot) |
||||
return |
||||
} |
||||
results <- nil |
||||
}(it.Hash()) |
||||
fullData, err = rlp.EncodeToBytes(account) |
||||
if err != nil { |
||||
return stop(err) |
||||
} |
||||
} |
||||
leaf = trieKV{it.Hash(), fullData} |
||||
} else { |
||||
leaf = trieKV{it.Hash(), common.CopyBytes(it.(StorageIterator).Slot())} |
||||
} |
||||
in <- leaf |
||||
|
||||
// Accumulate the generation statistic if it's required.
|
||||
processed++ |
||||
if time.Since(logged) > 3*time.Second && stats != nil { |
||||
if account == (common.Hash{}) { |
||||
stats.progressAccounts(it.Hash(), processed) |
||||
} else { |
||||
stats.progressContract(account, it.Hash(), processed) |
||||
} |
||||
logged, processed = time.Now(), 0 |
||||
} |
||||
} |
||||
// Commit the last part statistic.
|
||||
if processed > 0 && stats != nil { |
||||
if account == (common.Hash{}) { |
||||
stats.finishAccounts(processed) |
||||
} else { |
||||
stats.finishContract(account, processed) |
||||
} |
||||
} |
||||
return stop(nil) |
||||
} |
||||
|
||||
func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) { |
||||
var nodeWriter trie.NodeWriteFunc |
||||
if db != nil { |
||||
nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { |
||||
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme) |
||||
} |
||||
} |
||||
t := trie.NewStackTrieWithOwner(nodeWriter, owner) |
||||
for leaf := range in { |
||||
t.TryUpdate(leaf.key[:], leaf.value) |
||||
} |
||||
var root common.Hash |
||||
if db == nil { |
||||
root = t.Hash() |
||||
} else { |
||||
root, _ = t.Commit() |
||||
} |
||||
out <- root |
||||
} |
@ -0,0 +1,559 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"fmt" |
||||
"math" |
||||
"math/rand" |
||||
"sort" |
||||
"sync" |
||||
"sync/atomic" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
bloomfilter "github.com/holiman/bloomfilter/v2" |
||||
) |
||||
|
||||
var ( |
||||
// aggregatorMemoryLimit is the maximum size of the bottom-most diff layer
|
||||
// that aggregates the writes from above until it's flushed into the disk
|
||||
// layer.
|
||||
//
|
||||
// Note, bumping this up might drastically increase the size of the bloom
|
||||
// filters that's stored in every diff layer. Don't do that without fully
|
||||
// understanding all the implications.
|
||||
aggregatorMemoryLimit = uint64(4 * 1024 * 1024) |
||||
|
||||
// aggregatorItemLimit is an approximate number of items that will end up
|
||||
// in the agregator layer before it's flushed out to disk. A plain account
|
||||
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
||||
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
||||
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
||||
// smaller number to be on the safe side.
|
||||
aggregatorItemLimit = aggregatorMemoryLimit / 42 |
||||
|
||||
// bloomTargetError is the target false positive rate when the aggregator
|
||||
// layer is at its fullest. The actual value will probably move around up
|
||||
// and down from this number, it's mostly a ballpark figure.
|
||||
//
|
||||
// Note, dropping this down might drastically increase the size of the bloom
|
||||
// filters that's stored in every diff layer. Don't do that without fully
|
||||
// understanding all the implications.
|
||||
bloomTargetError = 0.02 |
||||
|
||||
// bloomSize is the ideal bloom filter size given the maximum number of items
|
||||
// it's expected to hold and the target false positive error rate.
|
||||
bloomSize = math.Ceil(float64(aggregatorItemLimit) * math.Log(bloomTargetError) / math.Log(1/math.Pow(2, math.Log(2)))) |
||||
|
||||
// bloomFuncs is the ideal number of bits a single entry should set in the
|
||||
// bloom filter to keep its size to a minimum (given it's size and maximum
|
||||
// entry count).
|
||||
bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2)) |
||||
|
||||
// the bloom offsets are runtime constants which determines which part of the
|
||||
// account/storage hash the hasher functions looks at, to determine the
|
||||
// bloom key for an account/slot. This is randomized at init(), so that the
|
||||
// global population of nodes do not all display the exact same behaviour with
|
||||
// regards to bloom content
|
||||
bloomDestructHasherOffset = 0 |
||||
bloomAccountHasherOffset = 0 |
||||
bloomStorageHasherOffset = 0 |
||||
) |
||||
|
||||
func init() { |
||||
// Init the bloom offsets in the range [0:24] (requires 8 bytes)
|
||||
bloomDestructHasherOffset = rand.Intn(25) |
||||
bloomAccountHasherOffset = rand.Intn(25) |
||||
bloomStorageHasherOffset = rand.Intn(25) |
||||
|
||||
// The destruct and account blooms must be different, as the storage slots
|
||||
// will check for destruction too for every bloom miss. It should not collide
|
||||
// with modified accounts.
|
||||
for bloomAccountHasherOffset == bloomDestructHasherOffset { |
||||
bloomAccountHasherOffset = rand.Intn(25) |
||||
} |
||||
} |
||||
|
||||
// diffLayer represents a collection of modifications made to a state snapshot
|
||||
// after running a block on top. It contains one sorted list for the account trie
|
||||
// and one-one list for each storage tries.
|
||||
//
|
||||
// The goal of a diff layer is to act as a journal, tracking recent modifications
|
||||
// made to the state, that have not yet graduated into a semi-immutable state.
|
||||
type diffLayer struct { |
||||
origin *diskLayer // Base disk layer to directly use on bloom misses
|
||||
parent snapshot // Parent snapshot modified by this one, never nil
|
||||
memory uint64 // Approximate guess as to how much memory we use
|
||||
|
||||
root common.Hash // Root hash to which this snapshot diff belongs to
|
||||
stale uint32 // Signals that the layer became stale (state progressed)
|
||||
|
||||
// destructSet is a very special helper marker. If an account is marked as
|
||||
// deleted, then it's recorded in this set. However it's allowed that an account
|
||||
// is included here but still available in other sets(e.g. storageData). The
|
||||
// reason is the diff layer includes all the changes in a *block*. It can
|
||||
// happen that in the tx_1, account A is self-destructed while in the tx_2
|
||||
// it's recreated. But we still need this marker to indicate the "old" A is
|
||||
// deleted, all data in other set belongs to the "new" A.
|
||||
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
|
||||
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
|
||||
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||
|
||||
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
|
||||
|
||||
lock sync.RWMutex |
||||
} |
||||
|
||||
// destructBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
||||
// API requirements of the bloom library used. It's used to convert a destruct
|
||||
// event into a 64 bit mini hash.
|
||||
type destructBloomHasher common.Hash |
||||
|
||||
func (h destructBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } |
||||
func (h destructBloomHasher) Sum(b []byte) []byte { panic("not implemented") } |
||||
func (h destructBloomHasher) Reset() { panic("not implemented") } |
||||
func (h destructBloomHasher) BlockSize() int { panic("not implemented") } |
||||
func (h destructBloomHasher) Size() int { return 8 } |
||||
func (h destructBloomHasher) Sum64() uint64 { |
||||
return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8]) |
||||
} |
||||
|
||||
// accountBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
||||
// API requirements of the bloom library used. It's used to convert an account
|
||||
// hash into a 64 bit mini hash.
|
||||
type accountBloomHasher common.Hash |
||||
|
||||
func (h accountBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } |
||||
func (h accountBloomHasher) Sum(b []byte) []byte { panic("not implemented") } |
||||
func (h accountBloomHasher) Reset() { panic("not implemented") } |
||||
func (h accountBloomHasher) BlockSize() int { panic("not implemented") } |
||||
func (h accountBloomHasher) Size() int { return 8 } |
||||
func (h accountBloomHasher) Sum64() uint64 { |
||||
return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8]) |
||||
} |
||||
|
||||
// storageBloomHasher is a wrapper around a [2]common.Hash to satisfy the interface
|
||||
// API requirements of the bloom library used. It's used to convert an account
|
||||
// hash into a 64 bit mini hash.
|
||||
type storageBloomHasher [2]common.Hash |
||||
|
||||
func (h storageBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } |
||||
func (h storageBloomHasher) Sum(b []byte) []byte { panic("not implemented") } |
||||
func (h storageBloomHasher) Reset() { panic("not implemented") } |
||||
func (h storageBloomHasher) BlockSize() int { panic("not implemented") } |
||||
func (h storageBloomHasher) Size() int { return 8 } |
||||
func (h storageBloomHasher) Sum64() uint64 { |
||||
return binary.BigEndian.Uint64(h[0][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) ^ |
||||
binary.BigEndian.Uint64(h[1][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) |
||||
} |
||||
|
||||
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
||||
// level persistent database or a hierarchical diff already.
|
||||
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { |
||||
// Create the new layer with some pre-allocated data segments
|
||||
dl := &diffLayer{ |
||||
parent: parent, |
||||
root: root, |
||||
destructSet: destructs, |
||||
accountData: accounts, |
||||
storageData: storage, |
||||
storageList: make(map[common.Hash][]common.Hash), |
||||
} |
||||
switch parent := parent.(type) { |
||||
case *diskLayer: |
||||
dl.rebloom(parent) |
||||
case *diffLayer: |
||||
dl.rebloom(parent.origin) |
||||
default: |
||||
panic("unknown parent type") |
||||
} |
||||
// Sanity check that accounts or storage slots are never nil
|
||||
for accountHash, blob := range accounts { |
||||
if blob == nil { |
||||
panic(fmt.Sprintf("account %#x nil", accountHash)) |
||||
} |
||||
// Determine memory size and track the dirty writes
|
||||
dl.memory += uint64(common.HashLength + len(blob)) |
||||
snapshotDirtyAccountWriteMeter.Mark(int64(len(blob))) |
||||
} |
||||
for accountHash, slots := range storage { |
||||
if slots == nil { |
||||
panic(fmt.Sprintf("storage %#x nil", accountHash)) |
||||
} |
||||
// Determine memory size and track the dirty writes
|
||||
for _, data := range slots { |
||||
dl.memory += uint64(common.HashLength + len(data)) |
||||
snapshotDirtyStorageWriteMeter.Mark(int64(len(data))) |
||||
} |
||||
} |
||||
dl.memory += uint64(len(destructs) * common.HashLength) |
||||
return dl |
||||
} |
||||
|
||||
// rebloom discards the layer's current bloom and rebuilds it from scratch based
|
||||
// on the parent's and the local diffs.
|
||||
func (dl *diffLayer) rebloom(origin *diskLayer) { |
||||
dl.lock.Lock() |
||||
defer dl.lock.Unlock() |
||||
|
||||
defer func(start time.Time) { |
||||
snapshotBloomIndexTimer.Update(time.Since(start)) |
||||
}(time.Now()) |
||||
|
||||
// Inject the new origin that triggered the rebloom
|
||||
dl.origin = origin |
||||
|
||||
// Retrieve the parent bloom or create a fresh empty one
|
||||
if parent, ok := dl.parent.(*diffLayer); ok { |
||||
parent.lock.RLock() |
||||
dl.diffed, _ = parent.diffed.Copy() |
||||
parent.lock.RUnlock() |
||||
} else { |
||||
dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs)) |
||||
} |
||||
// Iterate over all the accounts and storage slots and index them
|
||||
for hash := range dl.destructSet { |
||||
dl.diffed.Add(destructBloomHasher(hash)) |
||||
} |
||||
for hash := range dl.accountData { |
||||
dl.diffed.Add(accountBloomHasher(hash)) |
||||
} |
||||
for accountHash, slots := range dl.storageData { |
||||
for storageHash := range slots { |
||||
dl.diffed.Add(storageBloomHasher{accountHash, storageHash}) |
||||
} |
||||
} |
||||
// Calculate the current false positive rate and update the error rate meter.
|
||||
// This is a bit cheating because subsequent layers will overwrite it, but it
|
||||
// should be fine, we're only interested in ballpark figures.
|
||||
k := float64(dl.diffed.K()) |
||||
n := float64(dl.diffed.N()) |
||||
m := float64(dl.diffed.M()) |
||||
snapshotBloomErrorGauge.Update(math.Pow(1.0-math.Exp((-k)*(n+0.5)/(m-1)), k)) |
||||
} |
||||
|
||||
// Root returns the root hash for which this snapshot was made.
|
||||
func (dl *diffLayer) Root() common.Hash { |
||||
return dl.root |
||||
} |
||||
|
||||
// Parent returns the subsequent layer of a diff layer.
|
||||
func (dl *diffLayer) Parent() snapshot { |
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
return dl.parent |
||||
} |
||||
|
||||
// Stale return whether this layer has become stale (was flattened across) or if
|
||||
// it's still live.
|
||||
func (dl *diffLayer) Stale() bool { |
||||
return atomic.LoadUint32(&dl.stale) != 0 |
||||
} |
||||
|
||||
// Account directly retrieves the account associated with a particular hash in
|
||||
// the snapshot slim data format.
|
||||
func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { |
||||
data, err := dl.AccountRLP(hash) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if len(data) == 0 { // can be both nil and []byte{}
|
||||
return nil, nil |
||||
} |
||||
account := new(Account) |
||||
if err := rlp.DecodeBytes(data, account); err != nil { |
||||
panic(err) |
||||
} |
||||
return account, nil |
||||
} |
||||
|
||||
// AccountRLP directly retrieves the account RLP associated with a particular
|
||||
// hash in the snapshot slim data format.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { |
||||
// Check the bloom filter first whether there's even a point in reaching into
|
||||
// all the maps in all the layers below
|
||||
dl.lock.RLock() |
||||
hit := dl.diffed.Contains(accountBloomHasher(hash)) |
||||
if !hit { |
||||
hit = dl.diffed.Contains(destructBloomHasher(hash)) |
||||
} |
||||
var origin *diskLayer |
||||
if !hit { |
||||
origin = dl.origin // extract origin while holding the lock
|
||||
} |
||||
dl.lock.RUnlock() |
||||
|
||||
// If the bloom filter misses, don't even bother with traversing the memory
|
||||
// diff layers, reach straight into the bottom persistent disk layer
|
||||
if origin != nil { |
||||
snapshotBloomAccountMissMeter.Mark(1) |
||||
return origin.AccountRLP(hash) |
||||
} |
||||
// The bloom filter hit, start poking in the internal maps
|
||||
return dl.accountRLP(hash, 0) |
||||
} |
||||
|
||||
// accountRLP is an internal version of AccountRLP that skips the bloom filter
|
||||
// checks and uses the internal maps to try and retrieve the data. It's meant
|
||||
// to be used if a higher layer's bloom filter hit already.
|
||||
func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) { |
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
// If the layer was flattened into, consider it invalid (any live reference to
|
||||
// the original should be marked as unusable).
|
||||
if dl.Stale() { |
||||
return nil, ErrSnapshotStale |
||||
} |
||||
// If the account is known locally, return it
|
||||
if data, ok := dl.accountData[hash]; ok { |
||||
snapshotDirtyAccountHitMeter.Mark(1) |
||||
snapshotDirtyAccountHitDepthHist.Update(int64(depth)) |
||||
snapshotDirtyAccountReadMeter.Mark(int64(len(data))) |
||||
snapshotBloomAccountTrueHitMeter.Mark(1) |
||||
return data, nil |
||||
} |
||||
// If the account is known locally, but deleted, return it
|
||||
if _, ok := dl.destructSet[hash]; ok { |
||||
snapshotDirtyAccountHitMeter.Mark(1) |
||||
snapshotDirtyAccountHitDepthHist.Update(int64(depth)) |
||||
snapshotDirtyAccountInexMeter.Mark(1) |
||||
snapshotBloomAccountTrueHitMeter.Mark(1) |
||||
return nil, nil |
||||
} |
||||
// Account unknown to this diff, resolve from parent
|
||||
if diff, ok := dl.parent.(*diffLayer); ok { |
||||
return diff.accountRLP(hash, depth+1) |
||||
} |
||||
// Failed to resolve through diff layers, mark a bloom error and use the disk
|
||||
snapshotBloomAccountFalseHitMeter.Mark(1) |
||||
return dl.parent.AccountRLP(hash) |
||||
} |
||||
|
||||
// Storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account. If the slot is unknown to this diff, it's parent
|
||||
// is consulted.
|
||||
//
|
||||
// Note the returned slot is not a copy, please don't modify it.
|
||||
func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { |
||||
// Check the bloom filter first whether there's even a point in reaching into
|
||||
// all the maps in all the layers below
|
||||
dl.lock.RLock() |
||||
hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash}) |
||||
if !hit { |
||||
hit = dl.diffed.Contains(destructBloomHasher(accountHash)) |
||||
} |
||||
var origin *diskLayer |
||||
if !hit { |
||||
origin = dl.origin // extract origin while holding the lock
|
||||
} |
||||
dl.lock.RUnlock() |
||||
|
||||
// If the bloom filter misses, don't even bother with traversing the memory
|
||||
// diff layers, reach straight into the bottom persistent disk layer
|
||||
if origin != nil { |
||||
snapshotBloomStorageMissMeter.Mark(1) |
||||
return origin.Storage(accountHash, storageHash) |
||||
} |
||||
// The bloom filter hit, start poking in the internal maps
|
||||
return dl.storage(accountHash, storageHash, 0) |
||||
} |
||||
|
||||
// storage is an internal version of Storage that skips the bloom filter checks
|
||||
// and uses the internal maps to try and retrieve the data. It's meant to be
|
||||
// used if a higher layer's bloom filter hit already.
|
||||
func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) { |
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
// If the layer was flattened into, consider it invalid (any live reference to
|
||||
// the original should be marked as unusable).
|
||||
if dl.Stale() { |
||||
return nil, ErrSnapshotStale |
||||
} |
||||
// If the account is known locally, try to resolve the slot locally
|
||||
if storage, ok := dl.storageData[accountHash]; ok { |
||||
if data, ok := storage[storageHash]; ok { |
||||
snapshotDirtyStorageHitMeter.Mark(1) |
||||
snapshotDirtyStorageHitDepthHist.Update(int64(depth)) |
||||
if n := len(data); n > 0 { |
||||
snapshotDirtyStorageReadMeter.Mark(int64(n)) |
||||
} else { |
||||
snapshotDirtyStorageInexMeter.Mark(1) |
||||
} |
||||
snapshotBloomStorageTrueHitMeter.Mark(1) |
||||
return data, nil |
||||
} |
||||
} |
||||
// If the account is known locally, but deleted, return an empty slot
|
||||
if _, ok := dl.destructSet[accountHash]; ok { |
||||
snapshotDirtyStorageHitMeter.Mark(1) |
||||
snapshotDirtyStorageHitDepthHist.Update(int64(depth)) |
||||
snapshotDirtyStorageInexMeter.Mark(1) |
||||
snapshotBloomStorageTrueHitMeter.Mark(1) |
||||
return nil, nil |
||||
} |
||||
// Storage slot unknown to this diff, resolve from parent
|
||||
if diff, ok := dl.parent.(*diffLayer); ok { |
||||
return diff.storage(accountHash, storageHash, depth+1) |
||||
} |
||||
// Failed to resolve through diff layers, mark a bloom error and use the disk
|
||||
snapshotBloomStorageFalseHitMeter.Mark(1) |
||||
return dl.parent.Storage(accountHash, storageHash) |
||||
} |
||||
|
||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||
// the specified data items.
|
||||
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { |
||||
return newDiffLayer(dl, blockRoot, destructs, accounts, storage) |
||||
} |
||||
|
||||
// flatten pushes all data from this point downwards, flattening everything into
|
||||
// a single diff at the bottom. Since usually the lowermost diff is the largest,
|
||||
// the flattening builds up from there in reverse.
|
||||
func (dl *diffLayer) flatten() snapshot { |
||||
// If the parent is not diff, we're the first in line, return unmodified
|
||||
parent, ok := dl.parent.(*diffLayer) |
||||
if !ok { |
||||
return dl |
||||
} |
||||
// Parent is a diff, flatten it first (note, apart from weird corned cases,
|
||||
// flatten will realistically only ever merge 1 layer, so there's no need to
|
||||
// be smarter about grouping flattens together).
|
||||
parent = parent.flatten().(*diffLayer) |
||||
|
||||
parent.lock.Lock() |
||||
defer parent.lock.Unlock() |
||||
|
||||
// Before actually writing all our data to the parent, first ensure that the
|
||||
// parent hasn't been 'corrupted' by someone else already flattening into it
|
||||
if atomic.SwapUint32(&parent.stale, 1) != 0 { |
||||
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
|
||||
} |
||||
// Overwrite all the updated accounts blindly, merge the sorted list
|
||||
for hash := range dl.destructSet { |
||||
parent.destructSet[hash] = struct{}{} |
||||
delete(parent.accountData, hash) |
||||
delete(parent.storageData, hash) |
||||
} |
||||
for hash, data := range dl.accountData { |
||||
parent.accountData[hash] = data |
||||
} |
||||
// Overwrite all the updated storage slots (individually)
|
||||
for accountHash, storage := range dl.storageData { |
||||
// If storage didn't exist (or was deleted) in the parent, overwrite blindly
|
||||
if _, ok := parent.storageData[accountHash]; !ok { |
||||
parent.storageData[accountHash] = storage |
||||
continue |
||||
} |
||||
// Storage exists in both parent and child, merge the slots
|
||||
comboData := parent.storageData[accountHash] |
||||
for storageHash, data := range storage { |
||||
comboData[storageHash] = data |
||||
} |
||||
} |
||||
// Return the combo parent
|
||||
return &diffLayer{ |
||||
parent: parent.parent, |
||||
origin: parent.origin, |
||||
root: dl.root, |
||||
destructSet: parent.destructSet, |
||||
accountData: parent.accountData, |
||||
storageData: parent.storageData, |
||||
storageList: make(map[common.Hash][]common.Hash), |
||||
diffed: dl.diffed, |
||||
memory: parent.memory + dl.memory, |
||||
} |
||||
} |
||||
|
||||
// AccountList returns a sorted list of all accounts in this diffLayer, including
|
||||
// the deleted ones.
|
||||
//
|
||||
// Note, the returned slice is not a copy, so do not modify it.
|
||||
func (dl *diffLayer) AccountList() []common.Hash { |
||||
// If an old list already exists, return it
|
||||
dl.lock.RLock() |
||||
list := dl.accountList |
||||
dl.lock.RUnlock() |
||||
|
||||
if list != nil { |
||||
return list |
||||
} |
||||
// No old sorted account list exists, generate a new one
|
||||
dl.lock.Lock() |
||||
defer dl.lock.Unlock() |
||||
|
||||
dl.accountList = make([]common.Hash, 0, len(dl.destructSet)+len(dl.accountData)) |
||||
for hash := range dl.accountData { |
||||
dl.accountList = append(dl.accountList, hash) |
||||
} |
||||
for hash := range dl.destructSet { |
||||
if _, ok := dl.accountData[hash]; !ok { |
||||
dl.accountList = append(dl.accountList, hash) |
||||
} |
||||
} |
||||
sort.Sort(hashes(dl.accountList)) |
||||
dl.memory += uint64(len(dl.accountList) * common.HashLength) |
||||
return dl.accountList |
||||
} |
||||
|
||||
// StorageList returns a sorted list of all storage slot hashes in this diffLayer
|
||||
// for the given account. If the whole storage is destructed in this layer, then
|
||||
// an additional flag *destructed = true* will be returned, otherwise the flag is
|
||||
// false. Besides, the returned list will include the hash of deleted storage slot.
|
||||
// Note a special case is an account is deleted in a prior tx but is recreated in
|
||||
// the following tx with some storage slots set. In this case the returned list is
|
||||
// not empty but the flag is true.
|
||||
//
|
||||
// Note, the returned slice is not a copy, so do not modify it.
|
||||
func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) { |
||||
dl.lock.RLock() |
||||
_, destructed := dl.destructSet[accountHash] |
||||
if _, ok := dl.storageData[accountHash]; !ok { |
||||
// Account not tracked by this layer
|
||||
dl.lock.RUnlock() |
||||
return nil, destructed |
||||
} |
||||
// If an old list already exists, return it
|
||||
if list, exist := dl.storageList[accountHash]; exist { |
||||
dl.lock.RUnlock() |
||||
return list, destructed // the cached list can't be nil
|
||||
} |
||||
dl.lock.RUnlock() |
||||
|
||||
// No old sorted account list exists, generate a new one
|
||||
dl.lock.Lock() |
||||
defer dl.lock.Unlock() |
||||
|
||||
storageMap := dl.storageData[accountHash] |
||||
storageList := make([]common.Hash, 0, len(storageMap)) |
||||
for k := range storageMap { |
||||
storageList = append(storageList, k) |
||||
} |
||||
sort.Sort(hashes(storageList)) |
||||
dl.storageList[accountHash] = storageList |
||||
dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength) |
||||
return storageList, destructed |
||||
} |
@ -0,0 +1,399 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
crand "crypto/rand" |
||||
"math/rand" |
||||
"testing" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||
) |
||||
|
||||
func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} { |
||||
copy := make(map[common.Hash]struct{}) |
||||
for hash := range destructs { |
||||
copy[hash] = struct{}{} |
||||
} |
||||
return copy |
||||
} |
||||
|
||||
func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte { |
||||
copy := make(map[common.Hash][]byte) |
||||
for hash, blob := range accounts { |
||||
copy[hash] = blob |
||||
} |
||||
return copy |
||||
} |
||||
|
||||
func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { |
||||
copy := make(map[common.Hash]map[common.Hash][]byte) |
||||
for accHash, slots := range storage { |
||||
copy[accHash] = make(map[common.Hash][]byte) |
||||
for slotHash, blob := range slots { |
||||
copy[accHash][slotHash] = blob |
||||
} |
||||
} |
||||
return copy |
||||
} |
||||
|
||||
// TestMergeBasics tests some simple merges
|
||||
func TestMergeBasics(t *testing.T) { |
||||
var ( |
||||
destructs = make(map[common.Hash]struct{}) |
||||
accounts = make(map[common.Hash][]byte) |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
// Fill up a parent
|
||||
for i := 0; i < 100; i++ { |
||||
h := randomHash() |
||||
data := randomAccount() |
||||
|
||||
accounts[h] = data |
||||
if rand.Intn(4) == 0 { |
||||
destructs[h] = struct{}{} |
||||
} |
||||
if rand.Intn(2) == 0 { |
||||
accStorage := make(map[common.Hash][]byte) |
||||
value := make([]byte, 32) |
||||
crand.Read(value) |
||||
accStorage[randomHash()] = value |
||||
storage[h] = accStorage |
||||
} |
||||
} |
||||
// Add some (identical) layers on top
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) |
||||
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) |
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) |
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) |
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) |
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer) |
||||
|
||||
{ // Check account lists
|
||||
if have, want := len(merged.accountList), 0; have != want { |
||||
t.Errorf("accountList wrong: have %v, want %v", have, want) |
||||
} |
||||
if have, want := len(merged.AccountList()), len(accounts); have != want { |
||||
t.Errorf("AccountList() wrong: have %v, want %v", have, want) |
||||
} |
||||
if have, want := len(merged.accountList), len(accounts); have != want { |
||||
t.Errorf("accountList [2] wrong: have %v, want %v", have, want) |
||||
} |
||||
} |
||||
{ // Check account drops
|
||||
if have, want := len(merged.destructSet), len(destructs); have != want { |
||||
t.Errorf("accountDrop wrong: have %v, want %v", have, want) |
||||
} |
||||
} |
||||
{ // Check storage lists
|
||||
i := 0 |
||||
for aHash, sMap := range storage { |
||||
if have, want := len(merged.storageList), i; have != want { |
||||
t.Errorf("[1] storageList wrong: have %v, want %v", have, want) |
||||
} |
||||
list, _ := merged.StorageList(aHash) |
||||
if have, want := len(list), len(sMap); have != want { |
||||
t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want) |
||||
} |
||||
if have, want := len(merged.storageList[aHash]), len(sMap); have != want { |
||||
t.Errorf("storageList wrong: have %v, want %v", have, want) |
||||
} |
||||
i++ |
||||
} |
||||
} |
||||
} |
||||
|
||||
// TestMergeDelete tests some deletion
|
||||
func TestMergeDelete(t *testing.T) { |
||||
var ( |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
// Fill up a parent
|
||||
h1 := common.HexToHash("0x01") |
||||
h2 := common.HexToHash("0x02") |
||||
|
||||
flipDrops := func() map[common.Hash]struct{} { |
||||
return map[common.Hash]struct{}{ |
||||
h2: {}, |
||||
} |
||||
} |
||||
flipAccs := func() map[common.Hash][]byte { |
||||
return map[common.Hash][]byte{ |
||||
h1: randomAccount(), |
||||
} |
||||
} |
||||
flopDrops := func() map[common.Hash]struct{} { |
||||
return map[common.Hash]struct{}{ |
||||
h1: {}, |
||||
} |
||||
} |
||||
flopAccs := func() map[common.Hash][]byte { |
||||
return map[common.Hash][]byte{ |
||||
h2: randomAccount(), |
||||
} |
||||
} |
||||
// Add some flipAccs-flopping layers on top
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage) |
||||
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage) |
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) |
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) |
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) |
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) |
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) |
||||
|
||||
if data, _ := child.Account(h1); data == nil { |
||||
t.Errorf("last diff layer: expected %x account to be non-nil", h1) |
||||
} |
||||
if data, _ := child.Account(h2); data != nil { |
||||
t.Errorf("last diff layer: expected %x account to be nil", h2) |
||||
} |
||||
if _, ok := child.destructSet[h1]; ok { |
||||
t.Errorf("last diff layer: expected %x drop to be missing", h1) |
||||
} |
||||
if _, ok := child.destructSet[h2]; !ok { |
||||
t.Errorf("last diff layer: expected %x drop to be present", h1) |
||||
} |
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer) |
||||
|
||||
if data, _ := merged.Account(h1); data == nil { |
||||
t.Errorf("merged layer: expected %x account to be non-nil", h1) |
||||
} |
||||
if data, _ := merged.Account(h2); data != nil { |
||||
t.Errorf("merged layer: expected %x account to be nil", h2) |
||||
} |
||||
if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
|
||||
t.Errorf("merged diff layer: expected %x drop to be present", h1) |
||||
} |
||||
if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
|
||||
t.Errorf("merged diff layer: expected %x drop to be present", h1) |
||||
} |
||||
// If we add more granular metering of memory, we can enable this again,
|
||||
// but it's not implemented for now
|
||||
//if have, want := merged.memory, child.memory; have != want {
|
||||
// t.Errorf("mem wrong: have %d, want %d", have, want)
|
||||
//}
|
||||
} |
||||
|
||||
// This tests that if we create a new account, and set a slot, and then merge
|
||||
// it, the lists will be correct.
|
||||
func TestInsertAndMerge(t *testing.T) { |
||||
// Fill up a parent
|
||||
var ( |
||||
acc = common.HexToHash("0x01") |
||||
slot = common.HexToHash("0x02") |
||||
parent *diffLayer |
||||
child *diffLayer |
||||
) |
||||
{ |
||||
var ( |
||||
destructs = make(map[common.Hash]struct{}) |
||||
accounts = make(map[common.Hash][]byte) |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage) |
||||
} |
||||
{ |
||||
var ( |
||||
destructs = make(map[common.Hash]struct{}) |
||||
accounts = make(map[common.Hash][]byte) |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
accounts[acc] = randomAccount() |
||||
storage[acc] = make(map[common.Hash][]byte) |
||||
storage[acc][slot] = []byte{0x01} |
||||
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) |
||||
} |
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer) |
||||
{ // Check that slot value is present
|
||||
have, _ := merged.Storage(acc, slot) |
||||
if want := []byte{0x01}; !bytes.Equal(have, want) { |
||||
t.Errorf("merged slot value wrong: have %x, want %x", have, want) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func emptyLayer() *diskLayer { |
||||
return &diskLayer{ |
||||
diskdb: memorydb.New(), |
||||
cache: fastcache.New(500 * 1024), |
||||
} |
||||
} |
||||
|
||||
// BenchmarkSearch checks how long it takes to find a non-existing key
|
||||
// BenchmarkSearch-6 200000 10481 ns/op (1K per layer)
|
||||
// BenchmarkSearch-6 200000 10760 ns/op (10K per layer)
|
||||
// BenchmarkSearch-6 100000 17866 ns/op
|
||||
//
|
||||
// BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock()
|
||||
func BenchmarkSearch(b *testing.B) { |
||||
// First, we set up 128 diff layers, with 1K items each
|
||||
fill := func(parent snapshot) *diffLayer { |
||||
var ( |
||||
destructs = make(map[common.Hash]struct{}) |
||||
accounts = make(map[common.Hash][]byte) |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
for i := 0; i < 10000; i++ { |
||||
accounts[randomHash()] = randomAccount() |
||||
} |
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) |
||||
} |
||||
var layer snapshot |
||||
layer = emptyLayer() |
||||
for i := 0; i < 128; i++ { |
||||
layer = fill(layer) |
||||
} |
||||
key := crypto.Keccak256Hash([]byte{0x13, 0x38}) |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
layer.AccountRLP(key) |
||||
} |
||||
} |
||||
|
||||
// BenchmarkSearchSlot checks how long it takes to find a non-existing key
|
||||
// - Number of layers: 128
|
||||
// - Each layers contains the account, with a couple of storage slots
|
||||
// BenchmarkSearchSlot-6 100000 14554 ns/op
|
||||
// BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex)
|
||||
// BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic)
|
||||
// With bloom filter:
|
||||
// BenchmarkSearchSlot-6 3467835 351 ns/op
|
||||
func BenchmarkSearchSlot(b *testing.B) { |
||||
// First, we set up 128 diff layers, with 1K items each
|
||||
accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) |
||||
storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) |
||||
accountRLP := randomAccount() |
||||
fill := func(parent snapshot) *diffLayer { |
||||
var ( |
||||
destructs = make(map[common.Hash]struct{}) |
||||
accounts = make(map[common.Hash][]byte) |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
accounts[accountKey] = accountRLP |
||||
|
||||
accStorage := make(map[common.Hash][]byte) |
||||
for i := 0; i < 5; i++ { |
||||
value := make([]byte, 32) |
||||
crand.Read(value) |
||||
accStorage[randomHash()] = value |
||||
storage[accountKey] = accStorage |
||||
} |
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) |
||||
} |
||||
var layer snapshot |
||||
layer = emptyLayer() |
||||
for i := 0; i < 128; i++ { |
||||
layer = fill(layer) |
||||
} |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
layer.Storage(accountKey, storageKey) |
||||
} |
||||
} |
||||
|
||||
// With accountList and sorting
|
||||
// BenchmarkFlatten-6 50 29890856 ns/op
|
||||
//
|
||||
// Without sorting and tracking accountList
|
||||
// BenchmarkFlatten-6 300 5511511 ns/op
|
||||
func BenchmarkFlatten(b *testing.B) { |
||||
fill := func(parent snapshot) *diffLayer { |
||||
var ( |
||||
destructs = make(map[common.Hash]struct{}) |
||||
accounts = make(map[common.Hash][]byte) |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
for i := 0; i < 100; i++ { |
||||
accountKey := randomHash() |
||||
accounts[accountKey] = randomAccount() |
||||
|
||||
accStorage := make(map[common.Hash][]byte) |
||||
for i := 0; i < 20; i++ { |
||||
value := make([]byte, 32) |
||||
crand.Read(value) |
||||
accStorage[randomHash()] = value |
||||
} |
||||
storage[accountKey] = accStorage |
||||
} |
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) |
||||
} |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
b.StopTimer() |
||||
var layer snapshot |
||||
layer = emptyLayer() |
||||
for i := 1; i < 128; i++ { |
||||
layer = fill(layer) |
||||
} |
||||
b.StartTimer() |
||||
|
||||
for i := 1; i < 128; i++ { |
||||
dl, ok := layer.(*diffLayer) |
||||
if !ok { |
||||
break |
||||
} |
||||
layer = dl.flatten() |
||||
} |
||||
b.StopTimer() |
||||
} |
||||
} |
||||
|
||||
// This test writes ~324M of diff layers to disk, spread over
|
||||
// - 128 individual layers,
|
||||
// - each with 200 accounts
|
||||
// - containing 200 slots
|
||||
//
|
||||
// BenchmarkJournal-6 1 1471373923 ns/ops
|
||||
// BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
|
||||
func BenchmarkJournal(b *testing.B) { |
||||
fill := func(parent snapshot) *diffLayer { |
||||
var ( |
||||
destructs = make(map[common.Hash]struct{}) |
||||
accounts = make(map[common.Hash][]byte) |
||||
storage = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
for i := 0; i < 200; i++ { |
||||
accountKey := randomHash() |
||||
accounts[accountKey] = randomAccount() |
||||
|
||||
accStorage := make(map[common.Hash][]byte) |
||||
for i := 0; i < 200; i++ { |
||||
value := make([]byte, 32) |
||||
crand.Read(value) |
||||
accStorage[randomHash()] = value |
||||
} |
||||
storage[accountKey] = accStorage |
||||
} |
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) |
||||
} |
||||
layer := snapshot(emptyLayer()) |
||||
for i := 1; i < 128; i++ { |
||||
layer = fill(layer) |
||||
} |
||||
b.ResetTimer() |
||||
|
||||
for i := 0; i < b.N; i++ { |
||||
layer.Journal(new(bytes.Buffer)) |
||||
} |
||||
} |
@ -0,0 +1,166 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"sync" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
) |
||||
|
||||
// diskLayer is a low level persistent snapshot built on top of a key-value store.
|
||||
type diskLayer struct { |
||||
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
|
||||
triedb *trie.Database // Trie node cache for reconstruction purposes
|
||||
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
|
||||
|
||||
root common.Hash // Root hash of the base snapshot
|
||||
stale bool // Signals that the layer became stale (state progressed)
|
||||
|
||||
genMarker []byte // Marker for the state that's indexed during initial layer generation
|
||||
genPending chan struct{} // Notification channel when generation is done (test synchronicity)
|
||||
genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
|
||||
|
||||
lock sync.RWMutex |
||||
} |
||||
|
||||
// Root returns root hash for which this snapshot was made.
|
||||
func (dl *diskLayer) Root() common.Hash { |
||||
return dl.root |
||||
} |
||||
|
||||
// Parent always returns nil as there's no layer below the disk.
|
||||
func (dl *diskLayer) Parent() snapshot { |
||||
return nil |
||||
} |
||||
|
||||
// Stale return whether this layer has become stale (was flattened across) or if
|
||||
// it's still live.
|
||||
func (dl *diskLayer) Stale() bool { |
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
return dl.stale |
||||
} |
||||
|
||||
// Account directly retrieves the account associated with a particular hash in
|
||||
// the snapshot slim data format.
|
||||
func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { |
||||
data, err := dl.AccountRLP(hash) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if len(data) == 0 { // can be both nil and []byte{}
|
||||
return nil, nil |
||||
} |
||||
account := new(Account) |
||||
if err := rlp.DecodeBytes(data, account); err != nil { |
||||
panic(err) |
||||
} |
||||
return account, nil |
||||
} |
||||
|
||||
// AccountRLP directly retrieves the account RLP associated with a particular
|
||||
// hash in the snapshot slim data format.
|
||||
func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) { |
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
// If the layer was flattened into, consider it invalid (any live reference to
|
||||
// the original should be marked as unusable).
|
||||
if dl.stale { |
||||
return nil, ErrSnapshotStale |
||||
} |
||||
// If the layer is being generated, ensure the requested hash has already been
|
||||
// covered by the generator.
|
||||
if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 { |
||||
return nil, ErrNotCoveredYet |
||||
} |
||||
// If we're in the disk layer, all diff layers missed
|
||||
snapshotDirtyAccountMissMeter.Mark(1) |
||||
|
||||
// Try to retrieve the account from the memory cache
|
||||
if blob, found := dl.cache.HasGet(nil, hash[:]); found { |
||||
snapshotCleanAccountHitMeter.Mark(1) |
||||
snapshotCleanAccountReadMeter.Mark(int64(len(blob))) |
||||
return blob, nil |
||||
} |
||||
// Cache doesn't contain account, pull from disk and cache for later
|
||||
blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash) |
||||
dl.cache.Set(hash[:], blob) |
||||
|
||||
snapshotCleanAccountMissMeter.Mark(1) |
||||
if n := len(blob); n > 0 { |
||||
snapshotCleanAccountWriteMeter.Mark(int64(n)) |
||||
} else { |
||||
snapshotCleanAccountInexMeter.Mark(1) |
||||
} |
||||
return blob, nil |
||||
} |
||||
|
||||
// Storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account.
|
||||
func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { |
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
// If the layer was flattened into, consider it invalid (any live reference to
|
||||
// the original should be marked as unusable).
|
||||
if dl.stale { |
||||
return nil, ErrSnapshotStale |
||||
} |
||||
key := append(accountHash[:], storageHash[:]...) |
||||
|
||||
// If the layer is being generated, ensure the requested hash has already been
|
||||
// covered by the generator.
|
||||
if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 { |
||||
return nil, ErrNotCoveredYet |
||||
} |
||||
// If we're in the disk layer, all diff layers missed
|
||||
snapshotDirtyStorageMissMeter.Mark(1) |
||||
|
||||
// Try to retrieve the storage slot from the memory cache
|
||||
if blob, found := dl.cache.HasGet(nil, key); found { |
||||
snapshotCleanStorageHitMeter.Mark(1) |
||||
snapshotCleanStorageReadMeter.Mark(int64(len(blob))) |
||||
return blob, nil |
||||
} |
||||
// Cache doesn't contain storage slot, pull from disk and cache for later
|
||||
blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash) |
||||
dl.cache.Set(key, blob) |
||||
|
||||
snapshotCleanStorageMissMeter.Mark(1) |
||||
if n := len(blob); n > 0 { |
||||
snapshotCleanStorageWriteMeter.Mark(int64(n)) |
||||
} else { |
||||
snapshotCleanStorageInexMeter.Mark(1) |
||||
} |
||||
return blob, nil |
||||
} |
||||
|
||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||
// the specified data items. Note, the maps are retained by the method to avoid
|
||||
// copying everything.
|
||||
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { |
||||
return newDiffLayer(dl, blockHash, destructs, accounts, storage) |
||||
} |
@ -0,0 +1,574 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
) |
||||
|
||||
// reverse reverses the contents of a byte slice. It's used to update random accs
|
||||
// with deterministic changes.
|
||||
func reverse(blob []byte) []byte { |
||||
res := make([]byte, len(blob)) |
||||
for i, b := range blob { |
||||
res[len(blob)-1-i] = b |
||||
} |
||||
return res |
||||
} |
||||
|
||||
// Tests that merging something into a disk layer persists it into the database
|
||||
// and invalidates any previously written and cached values.
|
||||
func TestDiskMerge(t *testing.T) { |
||||
// Create some accounts in the disk layer
|
||||
db := memorydb.New() |
||||
|
||||
var ( |
||||
accNoModNoCache = common.Hash{0x1} |
||||
accNoModCache = common.Hash{0x2} |
||||
accModNoCache = common.Hash{0x3} |
||||
accModCache = common.Hash{0x4} |
||||
accDelNoCache = common.Hash{0x5} |
||||
accDelCache = common.Hash{0x6} |
||||
conNoModNoCache = common.Hash{0x7} |
||||
conNoModNoCacheSlot = common.Hash{0x70} |
||||
conNoModCache = common.Hash{0x8} |
||||
conNoModCacheSlot = common.Hash{0x80} |
||||
conModNoCache = common.Hash{0x9} |
||||
conModNoCacheSlot = common.Hash{0x90} |
||||
conModCache = common.Hash{0xa} |
||||
conModCacheSlot = common.Hash{0xa0} |
||||
conDelNoCache = common.Hash{0xb} |
||||
conDelNoCacheSlot = common.Hash{0xb0} |
||||
conDelCache = common.Hash{0xc} |
||||
conDelCacheSlot = common.Hash{0xc0} |
||||
conNukeNoCache = common.Hash{0xd} |
||||
conNukeNoCacheSlot = common.Hash{0xd0} |
||||
conNukeCache = common.Hash{0xe} |
||||
conNukeCacheSlot = common.Hash{0xe0} |
||||
baseRoot = randomHash() |
||||
diffRoot = randomHash() |
||||
) |
||||
|
||||
rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:]) |
||||
|
||||
rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:]) |
||||
|
||||
rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) |
||||
|
||||
rawdb.WriteSnapshotRoot(db, baseRoot) |
||||
|
||||
// Create a disk layer based on the above and cache in some data
|
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
baseRoot: &diskLayer{ |
||||
diskdb: db, |
||||
cache: fastcache.New(500 * 1024), |
||||
root: baseRoot, |
||||
}, |
||||
}, |
||||
} |
||||
base := snaps.Snapshot(baseRoot) |
||||
base.AccountRLP(accNoModCache) |
||||
base.AccountRLP(accModCache) |
||||
base.AccountRLP(accDelCache) |
||||
base.Storage(conNoModCache, conNoModCacheSlot) |
||||
base.Storage(conModCache, conModCacheSlot) |
||||
base.Storage(conDelCache, conDelCacheSlot) |
||||
base.Storage(conNukeCache, conNukeCacheSlot) |
||||
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ |
||||
accDelNoCache: {}, |
||||
accDelCache: {}, |
||||
conNukeNoCache: {}, |
||||
conNukeCache: {}, |
||||
}, map[common.Hash][]byte{ |
||||
accModNoCache: reverse(accModNoCache[:]), |
||||
accModCache: reverse(accModCache[:]), |
||||
}, map[common.Hash]map[common.Hash][]byte{ |
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, |
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, |
||||
conDelNoCache: {conDelNoCacheSlot: nil}, |
||||
conDelCache: {conDelCacheSlot: nil}, |
||||
}); err != nil { |
||||
t.Fatalf("failed to update snapshot tree: %v", err) |
||||
} |
||||
if err := snaps.Cap(diffRoot, 0); err != nil { |
||||
t.Fatalf("failed to flatten snapshot tree: %v", err) |
||||
} |
||||
// Retrieve all the data through the disk layer and validate it
|
||||
base = snaps.Snapshot(diffRoot) |
||||
if _, ok := base.(*diskLayer); !ok { |
||||
t.Fatalf("update not flattend into the disk layer") |
||||
} |
||||
|
||||
// assertAccount ensures that an account matches the given blob.
|
||||
assertAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.AccountRLP(account) |
||||
if err != nil { |
||||
t.Errorf("account access (%x) failed: %v", account, err) |
||||
} else if !bytes.Equal(blob, data) { |
||||
t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data) |
||||
} |
||||
} |
||||
assertAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertAccount(accNoModCache, accNoModCache[:]) |
||||
assertAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertAccount(accModCache, reverse(accModCache[:])) |
||||
assertAccount(accDelNoCache, nil) |
||||
assertAccount(accDelCache, nil) |
||||
|
||||
// assertStorage ensures that a storage slot matches the given blob.
|
||||
assertStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.Storage(account, slot) |
||||
if err != nil { |
||||
t.Errorf("storage access (%x:%x) failed: %v", account, slot, err) |
||||
} else if !bytes.Equal(blob, data) { |
||||
t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) |
||||
} |
||||
} |
||||
assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
|
||||
// Retrieve all the data directly from the database and validate it
|
||||
|
||||
// assertDatabaseAccount ensures that an account from the database matches the given blob.
|
||||
assertDatabaseAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) { |
||||
t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertDatabaseAccount(accNoModCache, accNoModCache[:]) |
||||
assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertDatabaseAccount(accModCache, reverse(accModCache[:])) |
||||
assertDatabaseAccount(accDelNoCache, nil) |
||||
assertDatabaseAccount(accDelCache, nil) |
||||
|
||||
// assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
|
||||
assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) { |
||||
t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
} |
||||
|
||||
// Tests that merging something into a disk layer persists it into the database
|
||||
// and invalidates any previously written and cached values, discarding anything
|
||||
// after the in-progress generation marker.
|
||||
func TestDiskPartialMerge(t *testing.T) { |
||||
// Iterate the test a few times to ensure we pick various internal orderings
|
||||
// for the data slots as well as the progress marker.
|
||||
for i := 0; i < 1024; i++ { |
||||
// Create some accounts in the disk layer
|
||||
db := memorydb.New() |
||||
|
||||
var ( |
||||
accNoModNoCache = randomHash() |
||||
accNoModCache = randomHash() |
||||
accModNoCache = randomHash() |
||||
accModCache = randomHash() |
||||
accDelNoCache = randomHash() |
||||
accDelCache = randomHash() |
||||
conNoModNoCache = randomHash() |
||||
conNoModNoCacheSlot = randomHash() |
||||
conNoModCache = randomHash() |
||||
conNoModCacheSlot = randomHash() |
||||
conModNoCache = randomHash() |
||||
conModNoCacheSlot = randomHash() |
||||
conModCache = randomHash() |
||||
conModCacheSlot = randomHash() |
||||
conDelNoCache = randomHash() |
||||
conDelNoCacheSlot = randomHash() |
||||
conDelCache = randomHash() |
||||
conDelCacheSlot = randomHash() |
||||
conNukeNoCache = randomHash() |
||||
conNukeNoCacheSlot = randomHash() |
||||
conNukeCache = randomHash() |
||||
conNukeCacheSlot = randomHash() |
||||
baseRoot = randomHash() |
||||
diffRoot = randomHash() |
||||
genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) |
||||
) |
||||
|
||||
// insertAccount injects an account into the database if it's after the
|
||||
// generator marker, drops the op otherwise. This is needed to seed the
|
||||
// database with a valid starting snapshot.
|
||||
insertAccount := func(account common.Hash, data []byte) { |
||||
if bytes.Compare(account[:], genMarker) <= 0 { |
||||
rawdb.WriteAccountSnapshot(db, account, data[:]) |
||||
} |
||||
} |
||||
insertAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
insertAccount(accNoModCache, accNoModCache[:]) |
||||
insertAccount(accModNoCache, accModNoCache[:]) |
||||
insertAccount(accModCache, accModCache[:]) |
||||
insertAccount(accDelNoCache, accDelNoCache[:]) |
||||
insertAccount(accDelCache, accDelCache[:]) |
||||
|
||||
// insertStorage injects a storage slot into the database if it's after
|
||||
// the generator marker, drops the op otherwise. This is needed to seed
|
||||
// the database with a valid starting snapshot.
|
||||
insertStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 { |
||||
rawdb.WriteStorageSnapshot(db, account, slot, data[:]) |
||||
} |
||||
} |
||||
insertAccount(conNoModNoCache, conNoModNoCache[:]) |
||||
insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
insertAccount(conNoModCache, conNoModCache[:]) |
||||
insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
insertAccount(conModNoCache, conModNoCache[:]) |
||||
insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) |
||||
insertAccount(conModCache, conModCache[:]) |
||||
insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) |
||||
insertAccount(conDelNoCache, conDelNoCache[:]) |
||||
insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) |
||||
insertAccount(conDelCache, conDelCache[:]) |
||||
insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) |
||||
|
||||
insertAccount(conNukeNoCache, conNukeNoCache[:]) |
||||
insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) |
||||
insertAccount(conNukeCache, conNukeCache[:]) |
||||
insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) |
||||
|
||||
rawdb.WriteSnapshotRoot(db, baseRoot) |
||||
|
||||
// Create a disk layer based on the above using a random progress marker
|
||||
// and cache in some data.
|
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
baseRoot: &diskLayer{ |
||||
diskdb: db, |
||||
cache: fastcache.New(500 * 1024), |
||||
root: baseRoot, |
||||
}, |
||||
}, |
||||
} |
||||
snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker |
||||
base := snaps.Snapshot(baseRoot) |
||||
|
||||
// assertAccount ensures that an account matches the given blob if it's
|
||||
// already covered by the disk snapshot, and errors out otherwise.
|
||||
assertAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.AccountRLP(account) |
||||
if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet { |
||||
t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob) |
||||
} |
||||
if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) |
||||
} |
||||
} |
||||
assertAccount(accNoModCache, accNoModCache[:]) |
||||
assertAccount(accModCache, accModCache[:]) |
||||
assertAccount(accDelCache, accDelCache[:]) |
||||
|
||||
// assertStorage ensures that a storage slot matches the given blob if
|
||||
// it's already covered by the disk snapshot, and errors out otherwise.
|
||||
assertStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.Storage(account, slot) |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet { |
||||
t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) |
||||
} |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) |
||||
} |
||||
} |
||||
assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) |
||||
assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) |
||||
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) |
||||
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ |
||||
accDelNoCache: {}, |
||||
accDelCache: {}, |
||||
conNukeNoCache: {}, |
||||
conNukeCache: {}, |
||||
}, map[common.Hash][]byte{ |
||||
accModNoCache: reverse(accModNoCache[:]), |
||||
accModCache: reverse(accModCache[:]), |
||||
}, map[common.Hash]map[common.Hash][]byte{ |
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, |
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, |
||||
conDelNoCache: {conDelNoCacheSlot: nil}, |
||||
conDelCache: {conDelCacheSlot: nil}, |
||||
}); err != nil { |
||||
t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) |
||||
} |
||||
if err := snaps.Cap(diffRoot, 0); err != nil { |
||||
t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) |
||||
} |
||||
// Retrieve all the data through the disk layer and validate it
|
||||
base = snaps.Snapshot(diffRoot) |
||||
if _, ok := base.(*diskLayer); !ok { |
||||
t.Fatalf("test %d: update not flattend into the disk layer", i) |
||||
} |
||||
assertAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertAccount(accNoModCache, accNoModCache[:]) |
||||
assertAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertAccount(accModCache, reverse(accModCache[:])) |
||||
assertAccount(accDelNoCache, nil) |
||||
assertAccount(accDelCache, nil) |
||||
|
||||
assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
|
||||
// Retrieve all the data directly from the database and validate it
|
||||
|
||||
// assertDatabaseAccount ensures that an account inside the database matches
|
||||
// the given blob if it's already covered by the disk snapshot, and does not
|
||||
// exist otherwise.
|
||||
assertDatabaseAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob := rawdb.ReadAccountSnapshot(db, account) |
||||
if bytes.Compare(account[:], genMarker) > 0 && blob != nil { |
||||
t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob) |
||||
} |
||||
if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertDatabaseAccount(accNoModCache, accNoModCache[:]) |
||||
assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertDatabaseAccount(accModCache, reverse(accModCache[:])) |
||||
assertDatabaseAccount(accDelNoCache, nil) |
||||
assertDatabaseAccount(accDelCache, nil) |
||||
|
||||
// assertDatabaseStorage ensures that a storage slot inside the database
|
||||
// matches the given blob if it's already covered by the disk snapshot,
|
||||
// and does not exist otherwise.
|
||||
assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob := rawdb.ReadStorageSnapshot(db, account, slot) |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil { |
||||
t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) |
||||
} |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
} |
||||
} |
||||
|
||||
// Tests that when the bottom-most diff layer is merged into the disk
|
||||
// layer whether the corresponding generator is persisted correctly.
|
||||
func TestDiskGeneratorPersistence(t *testing.T) { |
||||
var ( |
||||
accOne = randomHash() |
||||
accTwo = randomHash() |
||||
accOneSlotOne = randomHash() |
||||
accOneSlotTwo = randomHash() |
||||
|
||||
accThree = randomHash() |
||||
accThreeSlot = randomHash() |
||||
baseRoot = randomHash() |
||||
diffRoot = randomHash() |
||||
diffTwoRoot = randomHash() |
||||
genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) |
||||
) |
||||
// Testing scenario 1, the disk layer is still under the construction.
|
||||
db := rawdb.NewMemoryDatabase() |
||||
|
||||
rawdb.WriteAccountSnapshot(db, accOne, accOne[:]) |
||||
rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:]) |
||||
rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:]) |
||||
rawdb.WriteSnapshotRoot(db, baseRoot) |
||||
|
||||
// Create a disk layer based on all above updates
|
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
baseRoot: &diskLayer{ |
||||
diskdb: db, |
||||
cache: fastcache.New(500 * 1024), |
||||
root: baseRoot, |
||||
genMarker: genMarker, |
||||
}, |
||||
}, |
||||
} |
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{ |
||||
accTwo: accTwo[:], |
||||
}, nil); err != nil { |
||||
t.Fatalf("failed to update snapshot tree: %v", err) |
||||
} |
||||
if err := snaps.Cap(diffRoot, 0); err != nil { |
||||
t.Fatalf("failed to flatten snapshot tree: %v", err) |
||||
} |
||||
blob := rawdb.ReadSnapshotGenerator(db) |
||||
var generator journalGenerator |
||||
if err := rlp.DecodeBytes(blob, &generator); err != nil { |
||||
t.Fatalf("Failed to decode snapshot generator %v", err) |
||||
} |
||||
if !bytes.Equal(generator.Marker, genMarker) { |
||||
t.Fatalf("Generator marker is not matched") |
||||
} |
||||
// Test scenario 2, the disk layer is fully generated
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{ |
||||
accThree: accThree.Bytes(), |
||||
}, map[common.Hash]map[common.Hash][]byte{ |
||||
accThree: {accThreeSlot: accThreeSlot.Bytes()}, |
||||
}); err != nil { |
||||
t.Fatalf("failed to update snapshot tree: %v", err) |
||||
} |
||||
diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer) |
||||
diskLayer.genMarker = nil // Construction finished
|
||||
if err := snaps.Cap(diffTwoRoot, 0); err != nil { |
||||
t.Fatalf("failed to flatten snapshot tree: %v", err) |
||||
} |
||||
blob = rawdb.ReadSnapshotGenerator(db) |
||||
if err := rlp.DecodeBytes(blob, &generator); err != nil { |
||||
t.Fatalf("Failed to decode snapshot generator %v", err) |
||||
} |
||||
if len(generator.Marker) != 0 { |
||||
t.Fatalf("Failed to update snapshot generator") |
||||
} |
||||
} |
||||
|
||||
// Tests that merging something into a disk layer persists it into the database
|
||||
// and invalidates any previously written and cached values, discarding anything
|
||||
// after the in-progress generation marker.
|
||||
//
|
||||
// This test case is a tiny specialized case of TestDiskPartialMerge, which tests
|
||||
// some very specific cornercases that random tests won't ever trigger.
|
||||
func TestDiskMidAccountPartialMerge(t *testing.T) { |
||||
// TODO(@karalabe) ?
|
||||
} |
||||
|
||||
// TestDiskSeek tests that seek-operations work on the disk layer
|
||||
func TestDiskSeek(t *testing.T) { |
||||
// Create some accounts in the disk layer
|
||||
db := rawdb.NewMemoryDatabase() |
||||
defer db.Close() |
||||
|
||||
// Fill even keys [0,2,4...]
|
||||
for i := 0; i < 0xff; i += 2 { |
||||
acc := common.Hash{byte(i)} |
||||
rawdb.WriteAccountSnapshot(db, acc, acc[:]) |
||||
} |
||||
// Add an 'higher' key, with incorrect (higher) prefix
|
||||
highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1} |
||||
db.Put(highKey, []byte{0xff, 0xff}) |
||||
|
||||
baseRoot := randomHash() |
||||
rawdb.WriteSnapshotRoot(db, baseRoot) |
||||
|
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
baseRoot: &diskLayer{ |
||||
diskdb: db, |
||||
cache: fastcache.New(500 * 1024), |
||||
root: baseRoot, |
||||
}, |
||||
}, |
||||
} |
||||
// Test some different seek positions
|
||||
type testcase struct { |
||||
pos byte |
||||
expkey byte |
||||
} |
||||
var cases = []testcase{ |
||||
{0xff, 0x55}, // this should exit immediately without checking key
|
||||
{0x01, 0x02}, |
||||
{0xfe, 0xfe}, |
||||
{0xfd, 0xfe}, |
||||
{0x00, 0x00}, |
||||
} |
||||
for i, tc := range cases { |
||||
it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos}) |
||||
if err != nil { |
||||
t.Fatalf("case %d, error: %v", i, err) |
||||
} |
||||
count := 0 |
||||
for it.Next() { |
||||
k, v, err := it.Hash()[0], it.Account()[0], it.Error() |
||||
if err != nil { |
||||
t.Fatalf("test %d, item %d, error: %v", i, count, err) |
||||
} |
||||
// First item in iterator should have the expected key
|
||||
if count == 0 && k != tc.expkey { |
||||
t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey) |
||||
} |
||||
count++ |
||||
if v != k { |
||||
t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k) |
||||
} |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,756 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"math/big" |
||||
"time" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
var ( |
||||
// accountCheckRange is the upper limit of the number of accounts involved in
|
||||
// each range check. This is a value estimated based on experience. If this
|
||||
// range is too large, the failure rate of range proof will increase. Otherwise,
|
||||
// if the range is too small, the efficiency of the state recovery will decrease.
|
||||
accountCheckRange = 128 |
||||
|
||||
// storageCheckRange is the upper limit of the number of storage slots involved
|
||||
// in each range check. This is a value estimated based on experience. If this
|
||||
// range is too large, the failure rate of range proof will increase. Otherwise,
|
||||
// if the range is too small, the efficiency of the state recovery will decrease.
|
||||
storageCheckRange = 1024 |
||||
|
||||
// errMissingTrie is returned if the target trie is missing while the generation
|
||||
// is running. In this case the generation is aborted and wait the new signal.
|
||||
errMissingTrie = errors.New("missing trie") |
||||
) |
||||
|
||||
// generateSnapshot regenerates a brand new snapshot based on an existing state
|
||||
// database and head block asynchronously. The snapshot is returned immediately
|
||||
// and generation is continued in the background until done.
|
||||
func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer { |
||||
// Create a new disk layer with an initialized state marker at zero
|
||||
var ( |
||||
stats = &generatorStats{start: time.Now()} |
||||
batch = diskdb.NewBatch() |
||||
genMarker = []byte{} // Initialized but empty!
|
||||
) |
||||
rawdb.WriteSnapshotRoot(batch, root) |
||||
journalProgress(batch, genMarker, stats) |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Fatal().Err(err).Msg("Failed to write initialized state marker") |
||||
} |
||||
base := &diskLayer{ |
||||
diskdb: diskdb, |
||||
triedb: triedb, |
||||
root: root, |
||||
cache: fastcache.New(cache * 1024 * 1024), |
||||
genMarker: genMarker, |
||||
genPending: make(chan struct{}), |
||||
genAbort: make(chan chan *generatorStats), |
||||
} |
||||
go base.generate(stats) |
||||
utils.Logger().Debug().Interface("root", root).Msg("Start snapshot generation") |
||||
return base |
||||
} |
||||
|
||||
// journalProgress persists the generator stats into the database to resume later.
|
||||
func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) { |
||||
// Write out the generator marker. Note it's a standalone disk layer generator
|
||||
// which is not mixed with journal. It's ok if the generator is persisted while
|
||||
// journal is not.
|
||||
entry := journalGenerator{ |
||||
Done: marker == nil, |
||||
Marker: marker, |
||||
} |
||||
if stats != nil { |
||||
entry.Accounts = stats.accounts |
||||
entry.Slots = stats.slots |
||||
entry.Storage = uint64(stats.storage) |
||||
} |
||||
blob, err := rlp.EncodeToBytes(entry) |
||||
if err != nil { |
||||
panic(err) // Cannot happen, here to catch dev errors
|
||||
} |
||||
var logstr string |
||||
switch { |
||||
case marker == nil: |
||||
logstr = "done" |
||||
case bytes.Equal(marker, []byte{}): |
||||
logstr = "empty" |
||||
case len(marker) == common.HashLength: |
||||
logstr = fmt.Sprintf("%#x", marker) |
||||
default: |
||||
logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) |
||||
} |
||||
utils.Logger().Debug().Err(err).Str("progress", logstr).Msg("Journalled generator progress") |
||||
rawdb.WriteSnapshotGenerator(db, blob) |
||||
} |
||||
|
||||
// proofResult contains the output of range proving which can be used
|
||||
// for further processing regardless if it is successful or not.
|
||||
type proofResult struct { |
||||
keys [][]byte // The key set of all elements being iterated, even proving is failed
|
||||
vals [][]byte // The val set of all elements being iterated, even proving is failed
|
||||
diskMore bool // Set when the database has extra snapshot states since last iteration
|
||||
trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving)
|
||||
proofErr error // Indicator whether the given state range is valid or not
|
||||
tr *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil)
|
||||
} |
||||
|
||||
// valid returns the indicator that range proof is successful or not.
|
||||
func (result *proofResult) valid() bool { |
||||
return result.proofErr == nil |
||||
} |
||||
|
||||
// last returns the last verified element key regardless of whether the range proof is
|
||||
// successful or not. Nil is returned if nothing involved in the proving.
|
||||
func (result *proofResult) last() []byte { |
||||
var last []byte |
||||
if len(result.keys) > 0 { |
||||
last = result.keys[len(result.keys)-1] |
||||
} |
||||
return last |
||||
} |
||||
|
||||
// forEach iterates all the visited elements and applies the given callback on them.
|
||||
// The iteration is aborted if the callback returns non-nil error.
|
||||
func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error { |
||||
for i := 0; i < len(result.keys); i++ { |
||||
key, val := result.keys[i], result.vals[i] |
||||
if err := callback(key, val); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// proveRange proves the snapshot segment with particular prefix is "valid".
|
||||
// The iteration start point will be assigned if the iterator is restored from
|
||||
// the last interruption. Max will be assigned in order to limit the maximum
|
||||
// amount of data involved in each iteration.
|
||||
//
|
||||
// The proof result will be returned if the range proving is finished, otherwise
|
||||
// the error will be returned to abort the entire procedure.
|
||||
func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { |
||||
var ( |
||||
keys [][]byte |
||||
vals [][]byte |
||||
proof = rawdb.NewMemoryDatabase() |
||||
diskMore = false |
||||
iter = ctx.iterator(kind) |
||||
start = time.Now() |
||||
min = append(prefix, origin...) |
||||
) |
||||
for iter.Next() { |
||||
// Ensure the iterated item is always equal or larger than the given origin.
|
||||
key := iter.Key() |
||||
if bytes.Compare(key, min) < 0 { |
||||
return nil, errors.New("invalid iteration position") |
||||
} |
||||
// Ensure the iterated item still fall in the specified prefix. If
|
||||
// not which means the items in the specified area are all visited.
|
||||
// Move the iterator a step back since we iterate one extra element
|
||||
// out.
|
||||
if !bytes.Equal(key[:len(prefix)], prefix) { |
||||
iter.Hold() |
||||
break |
||||
} |
||||
// Break if we've reached the max size, and signal that we're not
|
||||
// done yet. Move the iterator a step back since we iterate one
|
||||
// extra element out.
|
||||
if len(keys) == max { |
||||
iter.Hold() |
||||
diskMore = true |
||||
break |
||||
} |
||||
keys = append(keys, common.CopyBytes(key[len(prefix):])) |
||||
|
||||
if valueConvertFn == nil { |
||||
vals = append(vals, common.CopyBytes(iter.Value())) |
||||
} else { |
||||
val, err := valueConvertFn(iter.Value()) |
||||
if err != nil { |
||||
// Special case, the state data is corrupted (invalid slim-format account),
|
||||
// don't abort the entire procedure directly. Instead, let the fallback
|
||||
// generation to heal the invalid data.
|
||||
//
|
||||
// Here append the original value to ensure that the number of key and
|
||||
// value are aligned.
|
||||
vals = append(vals, common.CopyBytes(iter.Value())) |
||||
utils.Logger().Error().Err(err).Msg("Failed to convert account state data") |
||||
} else { |
||||
vals = append(vals, val) |
||||
} |
||||
} |
||||
} |
||||
// Update metrics for database iteration and merkle proving
|
||||
if kind == snapStorage { |
||||
snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds()) |
||||
} else { |
||||
snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds()) |
||||
} |
||||
defer func(start time.Time) { |
||||
if kind == snapStorage { |
||||
snapStorageProveCounter.Inc(time.Since(start).Nanoseconds()) |
||||
} else { |
||||
snapAccountProveCounter.Inc(time.Since(start).Nanoseconds()) |
||||
} |
||||
}(time.Now()) |
||||
|
||||
// The snap state is exhausted, pass the entire key/val set for verification
|
||||
root := trieId.Root |
||||
if origin == nil && !diskMore { |
||||
stackTr := trie.NewStackTrie(nil) |
||||
for i, key := range keys { |
||||
stackTr.TryUpdate(key, vals[i]) |
||||
} |
||||
if gotRoot := stackTr.Hash(); gotRoot != root { |
||||
return &proofResult{ |
||||
keys: keys, |
||||
vals: vals, |
||||
proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root), |
||||
}, nil |
||||
} |
||||
return &proofResult{keys: keys, vals: vals}, nil |
||||
} |
||||
// Snap state is chunked, generate edge proofs for verification.
|
||||
tr, err := trie.New(trieId, dl.triedb) |
||||
if err != nil { |
||||
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) |
||||
return nil, errMissingTrie |
||||
} |
||||
// Firstly find out the key of last iterated element.
|
||||
var last []byte |
||||
if len(keys) > 0 { |
||||
last = keys[len(keys)-1] |
||||
} |
||||
// Generate the Merkle proofs for the first and last element
|
||||
if origin == nil { |
||||
origin = common.Hash{}.Bytes() |
||||
} |
||||
if err := tr.Prove(origin, 0, proof); err != nil { |
||||
utils.Logger().Debug().Err(err). |
||||
Msg("Failed to prove range") |
||||
|
||||
return &proofResult{ |
||||
keys: keys, |
||||
vals: vals, |
||||
diskMore: diskMore, |
||||
proofErr: err, |
||||
tr: tr, |
||||
}, nil |
||||
} |
||||
if last != nil { |
||||
if err := tr.Prove(last, 0, proof); err != nil { |
||||
utils.Logger().Debug().Err(err).Str("kind", kind).Bytes("last", last).Msg("Failed to prove range") |
||||
return &proofResult{ |
||||
keys: keys, |
||||
vals: vals, |
||||
diskMore: diskMore, |
||||
proofErr: err, |
||||
tr: tr, |
||||
}, nil |
||||
} |
||||
} |
||||
// Verify the snapshot segment with range prover, ensure that all flat states
|
||||
// in this range correspond to merkle trie.
|
||||
cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof) |
||||
return &proofResult{ |
||||
keys: keys, |
||||
vals: vals, |
||||
diskMore: diskMore, |
||||
trieMore: cont, |
||||
proofErr: err, |
||||
tr: tr}, |
||||
nil |
||||
} |
||||
|
||||
// onStateCallback is a function that is called by generateRange, when processing a range of
|
||||
// accounts or storage slots. For each element, the callback is invoked.
|
||||
//
|
||||
// - If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot.
|
||||
// - If 'write' is true, then this element needs to be updated with the 'val'.
|
||||
// - If 'write' is false, then this element is already correct, and needs no update.
|
||||
// The 'val' is the canonical encoding of the value (not the slim format for accounts)
|
||||
//
|
||||
// However, for accounts, the storage trie of the account needs to be checked. Also,
|
||||
// dangling storages(storage exists but the corresponding account is missing) need to
|
||||
// be cleaned up.
|
||||
type onStateCallback func(key []byte, val []byte, write bool, delete bool) error |
||||
|
||||
// generateRange generates the state segment with particular prefix. Generation can
|
||||
// either verify the correctness of existing state through range-proof and skip
|
||||
// generation, or iterate trie to regenerate state on demand.
|
||||
func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { |
||||
// Use range prover to check the validity of the flat state in the range
|
||||
result, err := dl.proveRange(ctx, trieId, prefix, kind, origin, max, valueConvertFn) |
||||
if err != nil { |
||||
return false, nil, err |
||||
} |
||||
last := result.last() |
||||
|
||||
// Construct contextual logger
|
||||
logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)} |
||||
if len(origin) > 0 { |
||||
logCtx = append(logCtx, "origin", hexutil.Encode(origin)) |
||||
} |
||||
logger := utils.GetLogger().New() |
||||
|
||||
// The range prover says the range is correct, skip trie iteration
|
||||
if result.valid() { |
||||
snapSuccessfulRangeProofMeter.Mark(1) |
||||
logger.Trace("Proved state range", "last", hexutil.Encode(last)) |
||||
|
||||
// The verification is passed, process each state with the given
|
||||
// callback function. If this state represents a contract, the
|
||||
// corresponding storage check will be performed in the callback
|
||||
if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil { |
||||
return false, nil, err |
||||
} |
||||
// Only abort the iteration when both database and trie are exhausted
|
||||
return !result.diskMore && !result.trieMore, last, nil |
||||
} |
||||
logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr) |
||||
snapFailedRangeProofMeter.Mark(1) |
||||
|
||||
// Special case, the entire trie is missing. In the original trie scheme,
|
||||
// all the duplicated subtries will be filtered out (only one copy of data
|
||||
// will be stored). While in the snapshot model, all the storage tries
|
||||
// belong to different contracts will be kept even they are duplicated.
|
||||
// Track it to a certain extent remove the noise data used for statistics.
|
||||
if origin == nil && last == nil { |
||||
meter := snapMissallAccountMeter |
||||
if kind == snapStorage { |
||||
meter = snapMissallStorageMeter |
||||
} |
||||
meter.Mark(1) |
||||
} |
||||
// We use the snap data to build up a cache which can be used by the
|
||||
// main account trie as a primary lookup when resolving hashes
|
||||
var resolver trie.NodeResolver |
||||
if len(result.keys) > 0 { |
||||
mdb := rawdb.NewMemoryDatabase() |
||||
tdb := trie.NewDatabase(mdb) |
||||
snapTrie := trie.NewEmpty(tdb) |
||||
for i, key := range result.keys { |
||||
snapTrie.Update(key, result.vals[i]) |
||||
} |
||||
root, nodes := snapTrie.Commit(false) |
||||
if nodes != nil { |
||||
tdb.Update(trie.NewWithNodeSet(nodes)) |
||||
tdb.Commit(root, false) |
||||
} |
||||
resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte { |
||||
return rawdb.ReadTrieNode(mdb, owner, path, hash, tdb.Scheme()) |
||||
} |
||||
} |
||||
// Construct the trie for state iteration, reuse the trie
|
||||
// if it's already opened with some nodes resolved.
|
||||
tr := result.tr |
||||
if tr == nil { |
||||
tr, err = trie.New(trieId, dl.triedb) |
||||
if err != nil { |
||||
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) |
||||
return false, nil, errMissingTrie |
||||
} |
||||
} |
||||
var ( |
||||
trieMore bool |
||||
nodeIt = tr.NodeIterator(origin) |
||||
iter = trie.NewIterator(nodeIt) |
||||
kvkeys, kvvals = result.keys, result.vals |
||||
|
||||
// counters
|
||||
count = 0 // number of states delivered by iterator
|
||||
created = 0 // states created from the trie
|
||||
updated = 0 // states updated from the trie
|
||||
deleted = 0 // states not in trie, but were in snapshot
|
||||
untouched = 0 // states already correct
|
||||
|
||||
// timers
|
||||
start = time.Now() |
||||
internal time.Duration |
||||
) |
||||
nodeIt.AddResolver(resolver) |
||||
|
||||
for iter.Next() { |
||||
if last != nil && bytes.Compare(iter.Key, last) > 0 { |
||||
trieMore = true |
||||
break |
||||
} |
||||
count++ |
||||
write := true |
||||
created++ |
||||
for len(kvkeys) > 0 { |
||||
if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 { |
||||
// delete the key
|
||||
istart := time.Now() |
||||
if err := onState(kvkeys[0], nil, false, true); err != nil { |
||||
return false, nil, err |
||||
} |
||||
kvkeys = kvkeys[1:] |
||||
kvvals = kvvals[1:] |
||||
deleted++ |
||||
internal += time.Since(istart) |
||||
continue |
||||
} else if cmp == 0 { |
||||
// the snapshot key can be overwritten
|
||||
created-- |
||||
if write = !bytes.Equal(kvvals[0], iter.Value); write { |
||||
updated++ |
||||
} else { |
||||
untouched++ |
||||
} |
||||
kvkeys = kvkeys[1:] |
||||
kvvals = kvvals[1:] |
||||
} |
||||
break |
||||
} |
||||
istart := time.Now() |
||||
if err := onState(iter.Key, iter.Value, write, false); err != nil { |
||||
return false, nil, err |
||||
} |
||||
internal += time.Since(istart) |
||||
} |
||||
if iter.Err != nil { |
||||
return false, nil, iter.Err |
||||
} |
||||
// Delete all stale snapshot states remaining
|
||||
istart := time.Now() |
||||
for _, key := range kvkeys { |
||||
if err := onState(key, nil, false, true); err != nil { |
||||
return false, nil, err |
||||
} |
||||
deleted += 1 |
||||
} |
||||
internal += time.Since(istart) |
||||
|
||||
// Update metrics for counting trie iteration
|
||||
if kind == snapStorage { |
||||
snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) |
||||
} else { |
||||
snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) |
||||
} |
||||
logger.Debug("Regenerated state range", "root", trieId.Root, "last", hexutil.Encode(last), |
||||
"count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) |
||||
|
||||
// If there are either more trie items, or there are more snap items
|
||||
// (in the next segment), then we need to keep working
|
||||
return !trieMore && !result.diskMore, last, nil |
||||
} |
||||
|
||||
// checkAndFlush checks if an interruption signal is received or the
|
||||
// batch size has exceeded the allowance.
|
||||
func (dl *diskLayer) checkAndFlush(ctx *generatorContext, current []byte) error { |
||||
var abort chan *generatorStats |
||||
select { |
||||
case abort = <-dl.genAbort: |
||||
default: |
||||
} |
||||
if ctx.batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { |
||||
if bytes.Compare(current, dl.genMarker) < 0 { |
||||
utils.Logger().Error(). |
||||
Str("current", fmt.Sprintf("%x", current)). |
||||
Str("genMarker", fmt.Sprintf("%x", dl.genMarker)). |
||||
Msg("Snapshot generator went backwards") |
||||
} |
||||
// Flush out the batch anyway no matter it's empty or not.
|
||||
// It's possible that all the states are recovered and the
|
||||
// generation indeed makes progress.
|
||||
journalProgress(ctx.batch, current, ctx.stats) |
||||
|
||||
if err := ctx.batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
ctx.batch.Reset() |
||||
|
||||
dl.lock.Lock() |
||||
dl.genMarker = current |
||||
dl.lock.Unlock() |
||||
|
||||
if abort != nil { |
||||
ctx.stats.Log("Aborting state snapshot generation", dl.root, current) |
||||
return newAbortErr(abort) // bubble up an error for interruption
|
||||
} |
||||
// Don't hold the iterators too long, release them to let compactor works
|
||||
ctx.reopenIterator(snapAccount) |
||||
ctx.reopenIterator(snapStorage) |
||||
} |
||||
if time.Since(ctx.logged) > 8*time.Second { |
||||
ctx.stats.Log("Generating state snapshot", dl.root, current) |
||||
ctx.logged = time.Now() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// generateStorages generates the missing storage slots of the specific contract.
|
||||
// It's supposed to restart the generation from the given origin position.
|
||||
func generateStorages(ctx *generatorContext, dl *diskLayer, stateRoot common.Hash, account common.Hash, storageRoot common.Hash, storeMarker []byte) error { |
||||
onStorage := func(key []byte, val []byte, write bool, delete bool) error { |
||||
defer func(start time.Time) { |
||||
snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) |
||||
}(time.Now()) |
||||
|
||||
if delete { |
||||
rawdb.DeleteStorageSnapshot(ctx.batch, account, common.BytesToHash(key)) |
||||
snapWipedStorageMeter.Mark(1) |
||||
return nil |
||||
} |
||||
if write { |
||||
rawdb.WriteStorageSnapshot(ctx.batch, account, common.BytesToHash(key), val) |
||||
snapGeneratedStorageMeter.Mark(1) |
||||
} else { |
||||
snapRecoveredStorageMeter.Mark(1) |
||||
} |
||||
ctx.stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val)) |
||||
ctx.stats.slots++ |
||||
|
||||
// If we've exceeded our batch allowance or termination was requested, flush to disk
|
||||
if err := dl.checkAndFlush(ctx, append(account[:], key...)); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
// Loop for re-generating the missing storage slots.
|
||||
var origin = common.CopyBytes(storeMarker) |
||||
for { |
||||
id := trie.StorageTrieID(stateRoot, account, storageRoot) |
||||
exhausted, last, err := dl.generateRange(ctx, id, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil) |
||||
if err != nil { |
||||
return err // The procedure it aborted, either by external signal or internal error.
|
||||
} |
||||
// Abort the procedure if the entire contract storage is generated
|
||||
if exhausted { |
||||
break |
||||
} |
||||
if origin = increaseKey(last); origin == nil { |
||||
break // special case, the last is 0xffffffff...fff
|
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// generateAccounts generates the missing snapshot accounts as well as their
|
||||
// storage slots in the main trie. It's supposed to restart the generation
|
||||
// from the given origin position.
|
||||
func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) error { |
||||
onAccount := func(key []byte, val []byte, write bool, delete bool) error { |
||||
// Make sure to clear all dangling storages before this account
|
||||
account := common.BytesToHash(key) |
||||
ctx.removeStorageBefore(account) |
||||
|
||||
start := time.Now() |
||||
if delete { |
||||
rawdb.DeleteAccountSnapshot(ctx.batch, account) |
||||
snapWipedAccountMeter.Mark(1) |
||||
snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) |
||||
|
||||
ctx.removeStorageAt(account) |
||||
return nil |
||||
} |
||||
// Retrieve the current account and flatten it into the internal format
|
||||
var acc struct { |
||||
Nonce uint64 |
||||
Balance *big.Int |
||||
Root common.Hash |
||||
CodeHash []byte |
||||
} |
||||
if err := rlp.DecodeBytes(val, &acc); err != nil { |
||||
utils.Logger().Fatal().Err(err).Msg("Invalid account encountered during snapshot creation") |
||||
} |
||||
// If the account is not yet in-progress, write it out
|
||||
if accMarker == nil || !bytes.Equal(account[:], accMarker) { |
||||
dataLen := len(val) // Approximate size, saves us a round of RLP-encoding
|
||||
if !write { |
||||
if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { |
||||
dataLen -= 32 |
||||
} |
||||
if acc.Root == types.EmptyRootHash { |
||||
dataLen -= 32 |
||||
} |
||||
snapRecoveredAccountMeter.Mark(1) |
||||
} else { |
||||
data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) |
||||
dataLen = len(data) |
||||
rawdb.WriteAccountSnapshot(ctx.batch, account, data) |
||||
snapGeneratedAccountMeter.Mark(1) |
||||
} |
||||
ctx.stats.storage += common.StorageSize(1 + common.HashLength + dataLen) |
||||
ctx.stats.accounts++ |
||||
} |
||||
// If the snap generation goes here after interrupted, genMarker may go backward
|
||||
// when last genMarker is consisted of accountHash and storageHash
|
||||
marker := account[:] |
||||
if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength { |
||||
marker = dl.genMarker[:] |
||||
} |
||||
// If we've exceeded our batch allowance or termination was requested, flush to disk
|
||||
if err := dl.checkAndFlush(ctx, marker); err != nil { |
||||
return err |
||||
} |
||||
snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) // let's count flush time as well
|
||||
|
||||
// If the iterated account is the contract, create a further loop to
|
||||
// verify or regenerate the contract storage.
|
||||
if acc.Root == types.EmptyRootHash { |
||||
ctx.removeStorageAt(account) |
||||
} else { |
||||
var storeMarker []byte |
||||
if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength { |
||||
storeMarker = dl.genMarker[common.HashLength:] |
||||
} |
||||
if err := generateStorages(ctx, dl, dl.root, account, acc.Root, storeMarker); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
// Some account processed, unmark the marker
|
||||
accMarker = nil |
||||
return nil |
||||
} |
||||
// Always reset the initial account range as 1 whenever recover from the
|
||||
// interruption. TODO(rjl493456442) can we remove it?
|
||||
var accountRange = accountCheckRange |
||||
if len(accMarker) > 0 { |
||||
accountRange = 1 |
||||
} |
||||
origin := common.CopyBytes(accMarker) |
||||
for { |
||||
id := trie.StateTrieID(dl.root) |
||||
exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP) |
||||
if err != nil { |
||||
return err // The procedure it aborted, either by external signal or internal error.
|
||||
} |
||||
origin = increaseKey(last) |
||||
|
||||
// Last step, cleanup the storages after the last account.
|
||||
// All the left storages should be treated as dangling.
|
||||
if origin == nil || exhausted { |
||||
ctx.removeStorageLeft() |
||||
break |
||||
} |
||||
accountRange = accountCheckRange |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// generate is a background thread that iterates over the state and storage tries,
|
||||
// constructing the state snapshot. All the arguments are purely for statistics
|
||||
// gathering and logging, since the method surfs the blocks as they arrive, often
|
||||
// being restarted.
|
||||
func (dl *diskLayer) generate(stats *generatorStats) { |
||||
var ( |
||||
accMarker []byte |
||||
abort chan *generatorStats |
||||
) |
||||
if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
|
||||
accMarker = dl.genMarker[:common.HashLength] |
||||
} |
||||
stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) |
||||
|
||||
// Initialize the global generator context. The snapshot iterators are
|
||||
// opened at the interrupted position because the assumption is held
|
||||
// that all the snapshot data are generated correctly before the marker.
|
||||
// Even if the snapshot data is updated during the interruption (before
|
||||
// or at the marker), the assumption is still held.
|
||||
// For the account or storage slot at the interruption, they will be
|
||||
// processed twice by the generator(they are already processed in the
|
||||
// last run) but it's fine.
|
||||
ctx := newGeneratorContext(stats, dl.diskdb, accMarker, dl.genMarker) |
||||
defer ctx.close() |
||||
|
||||
if err := generateAccounts(ctx, dl, accMarker); err != nil { |
||||
// Extract the received interruption signal if exists
|
||||
if aerr, ok := err.(*abortErr); ok { |
||||
abort = aerr.abort |
||||
} |
||||
// Aborted by internal error, wait the signal
|
||||
if abort == nil { |
||||
abort = <-dl.genAbort |
||||
} |
||||
abort <- stats |
||||
return |
||||
} |
||||
// Snapshot fully generated, set the marker to nil.
|
||||
// Note even there is nothing to commit, persist the
|
||||
// generator anyway to mark the snapshot is complete.
|
||||
journalProgress(ctx.batch, nil, stats) |
||||
if err := ctx.batch.Write(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Failed to flush batch") |
||||
|
||||
abort = <-dl.genAbort |
||||
abort <- stats |
||||
return |
||||
} |
||||
ctx.batch.Reset() |
||||
|
||||
utils.Logger().Info(). |
||||
Uint64("accounts", stats.accounts). |
||||
Uint64("slots", stats.slots). |
||||
Interface("storage", stats.storage). |
||||
Uint64("dangling", stats.dangling). |
||||
Interface("elapsed", common.PrettyDuration(time.Since(stats.start))). |
||||
Msg("Generated state snapshot") |
||||
|
||||
dl.lock.Lock() |
||||
dl.genMarker = nil |
||||
close(dl.genPending) |
||||
dl.lock.Unlock() |
||||
|
||||
// Someone will be looking for us, wait it out
|
||||
abort = <-dl.genAbort |
||||
abort <- nil |
||||
} |
||||
|
||||
// increaseKey increase the input key by one bit. Return nil if the entire
|
||||
// addition operation overflows.
|
||||
func increaseKey(key []byte) []byte { |
||||
for i := len(key) - 1; i >= 0; i-- { |
||||
key[i]++ |
||||
if key[i] != 0x0 { |
||||
return key |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// abortErr wraps an interruption signal received to represent the
|
||||
// generation is aborted by external processes.
|
||||
type abortErr struct { |
||||
abort chan *generatorStats |
||||
} |
||||
|
||||
func newAbortErr(abort chan *generatorStats) error { |
||||
return &abortErr{abort: abort} |
||||
} |
||||
|
||||
func (err *abortErr) Error() string { |
||||
return "aborted" |
||||
} |
@ -0,0 +1,861 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/big" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"golang.org/x/crypto/sha3" |
||||
) |
||||
|
||||
func hashData(input []byte) common.Hash { |
||||
var hasher = sha3.NewLegacyKeccak256() |
||||
var hash common.Hash |
||||
hasher.Reset() |
||||
hasher.Write(input) |
||||
hasher.Sum(hash[:0]) |
||||
return hash |
||||
} |
||||
|
||||
// Tests that snapshot generation from an empty database.
|
||||
func TestGeneration(t *testing.T) { |
||||
// We can't use statedb to make a test trie (circular dependency), so make
|
||||
// a fake one manually. We're going with a small account trie of 3 accounts,
|
||||
// two of which also has the same 3-slot storage trie attached.
|
||||
var helper = newHelper() |
||||
stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false) |
||||
|
||||
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
|
||||
root, snap := helper.CommitAndGenerate() |
||||
if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want { |
||||
t.Fatalf("have %#x want %#x", have, want) |
||||
} |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
|
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation with existent flat state.
|
||||
func TestGenerateExistentState(t *testing.T) { |
||||
// We can't use statedb to make a test trie (circular dependency), so make
|
||||
// a fake one manually. We're going with a small account trie of 3 accounts,
|
||||
// two of which also has the same 3-slot storage trie attached.
|
||||
var helper = newHelper() |
||||
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
|
||||
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
root, snap := helper.CommitAndGenerate() |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
|
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) { |
||||
t.Helper() |
||||
|
||||
accIt := snap.AccountIterator(common.Hash{}) |
||||
defer accIt.Release() |
||||
|
||||
snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate, |
||||
func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { |
||||
storageIt, _ := snap.StorageIterator(accountHash, common.Hash{}) |
||||
defer storageIt.Release() |
||||
|
||||
hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
return hash, nil |
||||
}, newGenerateStats(), true) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if snapRoot != trieRoot { |
||||
t.Fatalf("snaproot: %#x != trieroot #%x", snapRoot, trieRoot) |
||||
} |
||||
if err := CheckDanglingStorage(snap.diskdb); err != nil { |
||||
t.Fatalf("Detected dangling storages: %v", err) |
||||
} |
||||
} |
||||
|
||||
type testHelper struct { |
||||
diskdb ethdb.Database |
||||
triedb *trie.Database |
||||
accTrie *trie.StateTrie |
||||
nodes *trie.MergedNodeSet |
||||
} |
||||
|
||||
func newHelper() *testHelper { |
||||
diskdb := rawdb.NewMemoryDatabase() |
||||
triedb := trie.NewDatabase(diskdb) |
||||
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(common.Hash{}), triedb) |
||||
return &testHelper{ |
||||
diskdb: diskdb, |
||||
triedb: triedb, |
||||
accTrie: accTrie, |
||||
nodes: trie.NewMergedNodeSet(), |
||||
} |
||||
} |
||||
|
||||
func (t *testHelper) addTrieAccount(acckey string, acc *Account) { |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
t.accTrie.Update([]byte(acckey), val) |
||||
} |
||||
|
||||
func (t *testHelper) addSnapAccount(acckey string, acc *Account) { |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
key := hashData([]byte(acckey)) |
||||
rawdb.WriteAccountSnapshot(t.diskdb, key, val) |
||||
} |
||||
|
||||
func (t *testHelper) addAccount(acckey string, acc *Account) { |
||||
t.addTrieAccount(acckey, acc) |
||||
t.addSnapAccount(acckey, acc) |
||||
} |
||||
|
||||
func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string) { |
||||
accHash := hashData([]byte(accKey)) |
||||
for i, key := range keys { |
||||
rawdb.WriteStorageSnapshot(t.diskdb, accHash, hashData([]byte(key)), []byte(vals[i])) |
||||
} |
||||
} |
||||
|
||||
func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte { |
||||
id := trie.StorageTrieID(stateRoot, owner, common.Hash{}) |
||||
stTrie, _ := trie.NewStateTrie(id, t.triedb) |
||||
for i, k := range keys { |
||||
stTrie.Update([]byte(k), []byte(vals[i])) |
||||
} |
||||
if !commit { |
||||
return stTrie.Hash().Bytes() |
||||
} |
||||
root, nodes := stTrie.Commit(false) |
||||
if nodes != nil { |
||||
t.nodes.Merge(nodes) |
||||
} |
||||
return root.Bytes() |
||||
} |
||||
|
||||
func (t *testHelper) Commit() common.Hash { |
||||
root, nodes := t.accTrie.Commit(true) |
||||
if nodes != nil { |
||||
t.nodes.Merge(nodes) |
||||
} |
||||
t.triedb.Update(t.nodes) |
||||
t.triedb.Commit(root, false) |
||||
return root |
||||
} |
||||
|
||||
func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) { |
||||
root := t.Commit() |
||||
snap := generateSnapshot(t.diskdb, t.triedb, 16, root) |
||||
return root, snap |
||||
} |
||||
|
||||
// Tests that snapshot generation with existent flat state, where the flat state
|
||||
// contains some errors:
|
||||
// - the contract with empty storage root but has storage entries in the disk
|
||||
// - the contract with non empty storage root but empty storage slots
|
||||
// - the contract(non-empty storage) misses some storage slots
|
||||
// - miss in the beginning
|
||||
// - miss in the middle
|
||||
// - miss in the end
|
||||
//
|
||||
// - the contract(non-empty storage) has wrong storage slots
|
||||
// - wrong slots in the beginning
|
||||
// - wrong slots in the middle
|
||||
// - wrong slots in the end
|
||||
//
|
||||
// - the contract(non-empty storage) has extra storage slots
|
||||
// - extra slots in the beginning
|
||||
// - extra slots in the middle
|
||||
// - extra slots in the end
|
||||
func TestGenerateExistentStateWithWrongStorage(t *testing.T) { |
||||
helper := newHelper() |
||||
|
||||
// Account one, empty root but non-empty database
|
||||
helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
// Account two, non empty root but empty database
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
|
||||
// Miss slots
|
||||
{ |
||||
// Account three, non empty root but misses slots in the beginning
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"}) |
||||
|
||||
// Account four, non empty root but misses slots in the middle
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"}) |
||||
|
||||
// Account five, non empty root but misses slots in the end
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"}) |
||||
} |
||||
|
||||
// Wrong storage slots
|
||||
{ |
||||
// Account six, non empty root but wrong slots in the beginning
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"}) |
||||
|
||||
// Account seven, non empty root but wrong slots in the middle
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"}) |
||||
|
||||
// Account eight, non empty root but wrong slots in the end
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"}) |
||||
|
||||
// Account 9, non empty root but rotated slots
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"}) |
||||
} |
||||
|
||||
// Extra storage slots
|
||||
{ |
||||
// Account 10, non empty root but extra slots in the beginning
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"}) |
||||
|
||||
// Account 11, non empty root but extra slots in the middle
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"}) |
||||
|
||||
// Account 12, non empty root but extra slots in the end
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"}) |
||||
} |
||||
|
||||
root, snap := helper.CommitAndGenerate() |
||||
t.Logf("Root: %#x\n", root) // Root = 0x8746cce9fd9c658b2cfd639878ed6584b7a2b3e73bb40f607fcfa156002429a0
|
||||
|
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation with existent flat state, where the flat state
|
||||
// contains some errors:
|
||||
// - miss accounts
|
||||
// - wrong accounts
|
||||
// - extra accounts
|
||||
func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { |
||||
helper := newHelper() |
||||
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
|
||||
// Trie accounts [acc-1, acc-2, acc-3, acc-4, acc-6]
|
||||
// Extra accounts [acc-0, acc-5, acc-7]
|
||||
|
||||
// Missing accounts, only in the trie
|
||||
{ |
||||
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
|
||||
helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
|
||||
helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
|
||||
} |
||||
|
||||
// Wrong accounts
|
||||
{ |
||||
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) |
||||
|
||||
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
} |
||||
|
||||
// Extra accounts, only in the snap
|
||||
{ |
||||
helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
|
||||
helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle
|
||||
helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
|
||||
} |
||||
|
||||
root, snap := helper.CommitAndGenerate() |
||||
t.Logf("Root: %#x\n", root) // Root = 0x825891472281463511e7ebcc7f109e4f9200c20fa384754e11fd605cd98464e8
|
||||
|
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
|
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation errors out correctly in case of a missing trie
|
||||
// node in the account trie.
|
||||
func TestGenerateCorruptAccountTrie(t *testing.T) { |
||||
// We can't use statedb to make a test trie (circular dependency), so make
|
||||
// a fake one manually. We're going with a small account trie of 3 accounts,
|
||||
// without any storage slots to keep the test smaller.
|
||||
helper := newHelper() |
||||
|
||||
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
|
||||
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
|
||||
|
||||
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
|
||||
|
||||
// Delete an account trie leaf and ensure the generator chokes
|
||||
helper.triedb.Commit(root, false) |
||||
helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes()) |
||||
|
||||
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
t.Errorf("Snapshot generated against corrupt account trie") |
||||
|
||||
case <-time.After(time.Second): |
||||
// Not generated fast enough, hopefully blocked inside on missing trie node fail
|
||||
} |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation errors out correctly in case of a missing root
|
||||
// trie node for a storage trie. It's similar to internal corruption but it is
|
||||
// handled differently inside the generator.
|
||||
func TestGenerateMissingStorageTrie(t *testing.T) { |
||||
// We can't use statedb to make a test trie (circular dependency), so make
|
||||
// a fake one manually. We're going with a small account trie of 3 accounts,
|
||||
// two of which also has the same 3-slot storage trie attached.
|
||||
helper := newHelper() |
||||
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
||||
|
||||
root := helper.Commit() |
||||
|
||||
// Delete a storage trie root and ensure the generator chokes
|
||||
helper.diskdb.Delete(stRoot) |
||||
|
||||
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
t.Errorf("Snapshot generated against corrupt storage trie") |
||||
|
||||
case <-time.After(time.Second): |
||||
// Not generated fast enough, hopefully blocked inside on missing trie node fail
|
||||
} |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation errors out correctly in case of a missing trie
|
||||
// node in a storage trie.
|
||||
func TestGenerateCorruptStorageTrie(t *testing.T) { |
||||
// We can't use statedb to make a test trie (circular dependency), so make
|
||||
// a fake one manually. We're going with a small account trie of 3 accounts,
|
||||
// two of which also has the same 3-slot storage trie attached.
|
||||
helper := newHelper() |
||||
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
||||
|
||||
root := helper.Commit() |
||||
|
||||
// Delete a storage trie leaf and ensure the generator chokes
|
||||
helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes()) |
||||
|
||||
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
t.Errorf("Snapshot generated against corrupt storage trie") |
||||
|
||||
case <-time.After(time.Second): |
||||
// Not generated fast enough, hopefully blocked inside on missing trie node fail
|
||||
} |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation when an extra account with storage exists in the snap state.
|
||||
func TestGenerateWithExtraAccounts(t *testing.T) { |
||||
helper := newHelper() |
||||
{ |
||||
// Account one in the trie
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), |
||||
[]string{"key-1", "key-2", "key-3", "key-4", "key-5"}, |
||||
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"}, |
||||
true, |
||||
) |
||||
acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
|
||||
// Identical in the snap
|
||||
key := hashData([]byte("acc-1")) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, key, val) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-4")), []byte("val-4")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-5")), []byte("val-5")) |
||||
} |
||||
{ |
||||
// Account two exists only in the snapshot
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), |
||||
[]string{"key-1", "key-2", "key-3", "key-4", "key-5"}, |
||||
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"}, |
||||
true, |
||||
) |
||||
acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
key := hashData([]byte("acc-2")) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, key, val) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-1")), []byte("b-val-1")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3")) |
||||
} |
||||
root := helper.Commit() |
||||
|
||||
// To verify the test: If we now inspect the snap db, there should exist extraneous storage items
|
||||
if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { |
||||
t.Fatalf("expected snap storage to exist") |
||||
} |
||||
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
|
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
// If we now inspect the snap db, there should exist no extraneous storage items
|
||||
if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { |
||||
t.Fatalf("expected slot to be removed, got %v", string(data)) |
||||
} |
||||
} |
||||
|
||||
// Tests that snapshot generation when an extra account with storage exists in the snap state.
|
||||
func TestGenerateWithManyExtraAccounts(t *testing.T) { |
||||
helper := newHelper() |
||||
{ |
||||
// Account one in the trie
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), |
||||
[]string{"key-1", "key-2", "key-3"}, |
||||
[]string{"val-1", "val-2", "val-3"}, |
||||
true, |
||||
) |
||||
acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
|
||||
// Identical in the snap
|
||||
key := hashData([]byte("acc-1")) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, key, val) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2")) |
||||
rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3")) |
||||
} |
||||
{ |
||||
// 100 accounts exist only in snapshot
|
||||
for i := 0; i < 1000; i++ { |
||||
acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
key := hashData([]byte(fmt.Sprintf("acc-%d", i))) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, key, val) |
||||
} |
||||
} |
||||
root, snap := helper.CommitAndGenerate() |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests this case
|
||||
// maxAccountRange 3
|
||||
// snapshot-accounts: 01, 02, 03, 04, 05, 06, 07
|
||||
// trie-accounts: 03, 07
|
||||
//
|
||||
// We iterate three snapshot storage slots (max = 3) from the database. They are 0x01, 0x02, 0x03.
|
||||
// The trie has a lot of deletions.
|
||||
// So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted.
|
||||
// But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished.
|
||||
func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { |
||||
accountCheckRange = 3 |
||||
helper := newHelper() |
||||
{ |
||||
acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) |
||||
helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val) |
||||
|
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x01"), val) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), val) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x03"), val) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x04"), val) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x05"), val) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x06"), val) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x07"), val) |
||||
} |
||||
root, snap := helper.CommitAndGenerate() |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// TestGenerateWithMalformedSnapdata tests what happes if we have some junk
|
||||
// in the snapshot database, which cannot be parsed back to an account
|
||||
func TestGenerateWithMalformedSnapdata(t *testing.T) { |
||||
accountCheckRange = 3 |
||||
helper := newHelper() |
||||
{ |
||||
acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} |
||||
val, _ := rlp.EncodeToBytes(acc) |
||||
helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) |
||||
|
||||
junk := make([]byte, 100) |
||||
copy(junk, []byte{0xde, 0xad}) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), junk) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x03"), junk) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x04"), junk) |
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x05"), junk) |
||||
} |
||||
root, snap := helper.CommitAndGenerate() |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
// If we now inspect the snap db, there should exist no extraneous storage items
|
||||
if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { |
||||
t.Fatalf("expected slot to be removed, got %v", string(data)) |
||||
} |
||||
} |
||||
|
||||
func TestGenerateFromEmptySnap(t *testing.T) { |
||||
//enableLogging()
|
||||
accountCheckRange = 10 |
||||
storageCheckRange = 20 |
||||
helper := newHelper() |
||||
// Add 1K accounts to the trie
|
||||
for i := 0; i < 400; i++ { |
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addTrieAccount(fmt.Sprintf("acc-%d", i), |
||||
&Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
} |
||||
root, snap := helper.CommitAndGenerate() |
||||
t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
|
||||
|
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation with existent flat state, where the flat state
|
||||
// storage is correct, but incomplete.
|
||||
// The incomplete part is on the second range
|
||||
// snap: [ 0x01, 0x02, 0x03, 0x04] , [ 0x05, 0x06, 0x07, {missing}] (with storageCheck = 4)
|
||||
// trie: 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
|
||||
// This hits a case where the snap verification passes, but there are more elements in the trie
|
||||
// which we must also add.
|
||||
func TestGenerateWithIncompleteStorage(t *testing.T) { |
||||
storageCheckRange = 4 |
||||
helper := newHelper() |
||||
stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"} |
||||
stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"} |
||||
// We add 8 accounts, each one is missing exactly one of the storage slots. This means
|
||||
// we don't have to order the keys and figure out exactly which hash-key winds up
|
||||
// on the sensitive spots at the boundaries
|
||||
for i := 0; i < 8; i++ { |
||||
accKey := fmt.Sprintf("acc-%d", i) |
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(accKey)), stKeys, stVals, true) |
||||
helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
var moddedKeys []string |
||||
var moddedVals []string |
||||
for ii := 0; ii < 8; ii++ { |
||||
if ii != i { |
||||
moddedKeys = append(moddedKeys, stKeys[ii]) |
||||
moddedVals = append(moddedVals, stVals[ii]) |
||||
} |
||||
} |
||||
helper.addSnapStorage(accKey, moddedKeys, moddedVals) |
||||
} |
||||
root, snap := helper.CommitAndGenerate() |
||||
t.Logf("Root: %#x\n", root) // Root: 0xca73f6f05ba4ca3024ef340ef3dfca8fdabc1b677ff13f5a9571fd49c16e67ff
|
||||
|
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
func incKey(key []byte) []byte { |
||||
for i := len(key) - 1; i >= 0; i-- { |
||||
key[i]++ |
||||
if key[i] != 0x0 { |
||||
break |
||||
} |
||||
} |
||||
return key |
||||
} |
||||
|
||||
func decKey(key []byte) []byte { |
||||
for i := len(key) - 1; i >= 0; i-- { |
||||
key[i]-- |
||||
if key[i] != 0xff { |
||||
break |
||||
} |
||||
} |
||||
return key |
||||
} |
||||
|
||||
func populateDangling(disk ethdb.KeyValueStore) { |
||||
populate := func(accountHash common.Hash, keys []string, vals []string) { |
||||
for i, key := range keys { |
||||
rawdb.WriteStorageSnapshot(disk, accountHash, hashData([]byte(key)), []byte(vals[i])) |
||||
} |
||||
} |
||||
// Dangling storages of the "first" account
|
||||
populate(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
// Dangling storages of the "last" account
|
||||
populate(common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
// Dangling storages around the account 1
|
||||
hash := decKey(hashData([]byte("acc-1")).Bytes()) |
||||
populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
hash = incKey(hashData([]byte("acc-1")).Bytes()) |
||||
populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
// Dangling storages around the account 2
|
||||
hash = decKey(hashData([]byte("acc-2")).Bytes()) |
||||
populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
hash = incKey(hashData([]byte("acc-2")).Bytes()) |
||||
populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
// Dangling storages around the account 3
|
||||
hash = decKey(hashData([]byte("acc-3")).Bytes()) |
||||
populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
hash = incKey(hashData([]byte("acc-3")).Bytes()) |
||||
populate(common.BytesToHash(hash), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
// Dangling storages of the random account
|
||||
populate(randomHash(), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
populate(randomHash(), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
populate(randomHash(), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
} |
||||
|
||||
// Tests that snapshot generation with dangling storages. Dangling storage means
|
||||
// the storage data is existent while the corresponding account data is missing.
|
||||
//
|
||||
// This test will populate some dangling storages to see if they can be cleaned up.
|
||||
func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { |
||||
var helper = newHelper() |
||||
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
|
||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) |
||||
|
||||
populateDangling(helper.diskdb) |
||||
|
||||
root, snap := helper.CommitAndGenerate() |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
|
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
||||
|
||||
// Tests that snapshot generation with dangling storages. Dangling storage means
|
||||
// the storage data is existent while the corresponding account data is missing.
|
||||
//
|
||||
// This test will populate some dangling storages to see if they can be cleaned up.
|
||||
func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { |
||||
var helper = newHelper() |
||||
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
|
||||
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) |
||||
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) |
||||
|
||||
populateDangling(helper.diskdb) |
||||
|
||||
root, snap := helper.CommitAndGenerate() |
||||
select { |
||||
case <-snap.genPending: |
||||
// Snapshot generation succeeded
|
||||
|
||||
case <-time.After(3 * time.Second): |
||||
t.Errorf("Snapshot generation failed") |
||||
} |
||||
checkSnapRoot(t, snap, root) |
||||
|
||||
// Signal abortion to the generator and wait for it to tear down
|
||||
stop := make(chan *generatorStats) |
||||
snap.genAbort <- stop |
||||
<-stop |
||||
} |
@ -0,0 +1,97 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
) |
||||
|
||||
// holdableIterator is a wrapper of underlying database iterator. It extends
|
||||
// the basic iterator interface by adding Hold which can hold the element
|
||||
// locally where the iterator is currently located and serve it up next time.
|
||||
type holdableIterator struct { |
||||
it ethdb.Iterator |
||||
key []byte |
||||
val []byte |
||||
atHeld bool |
||||
} |
||||
|
||||
// newHoldableIterator initializes the holdableIterator with the given iterator.
|
||||
func newHoldableIterator(it ethdb.Iterator) *holdableIterator { |
||||
return &holdableIterator{it: it} |
||||
} |
||||
|
||||
// Hold holds the element locally where the iterator is currently located which
|
||||
// can be served up next time.
|
||||
func (it *holdableIterator) Hold() { |
||||
if it.it.Key() == nil { |
||||
return // nothing to hold
|
||||
} |
||||
it.key = common.CopyBytes(it.it.Key()) |
||||
it.val = common.CopyBytes(it.it.Value()) |
||||
it.atHeld = false |
||||
} |
||||
|
||||
// Next moves the iterator to the next key/value pair. It returns whether the
|
||||
// iterator is exhausted.
|
||||
func (it *holdableIterator) Next() bool { |
||||
if !it.atHeld && it.key != nil { |
||||
it.atHeld = true |
||||
} else if it.atHeld { |
||||
it.atHeld = false |
||||
it.key = nil |
||||
it.val = nil |
||||
} |
||||
if it.key != nil { |
||||
return true // shifted to locally held value
|
||||
} |
||||
return it.it.Next() |
||||
} |
||||
|
||||
// Error returns any accumulated error. Exhausting all the key/value pairs
|
||||
// is not considered to be an error.
|
||||
func (it *holdableIterator) Error() error { return it.it.Error() } |
||||
|
||||
// Release releases associated resources. Release should always succeed and can
|
||||
// be called multiple times without causing error.
|
||||
func (it *holdableIterator) Release() { |
||||
it.atHeld = false |
||||
it.key = nil |
||||
it.val = nil |
||||
it.it.Release() |
||||
} |
||||
|
||||
// Key returns the key of the current key/value pair, or nil if done. The caller
|
||||
// should not modify the contents of the returned slice, and its contents may
|
||||
// change on the next call to Next.
|
||||
func (it *holdableIterator) Key() []byte { |
||||
if it.key != nil { |
||||
return it.key |
||||
} |
||||
return it.it.Key() |
||||
} |
||||
|
||||
// Value returns the value of the current key/value pair, or nil if done. The
|
||||
// caller should not modify the contents of the returned slice, and its contents
|
||||
// may change on the next call to Next.
|
||||
func (it *holdableIterator) Value() []byte { |
||||
if it.val != nil { |
||||
return it.val |
||||
} |
||||
return it.it.Value() |
||||
} |
@ -0,0 +1,163 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
) |
||||
|
||||
func TestIteratorHold(t *testing.T) { |
||||
// Create the key-value data store
|
||||
var ( |
||||
content = map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"} |
||||
order = []string{"k1", "k2", "k3"} |
||||
db = rawdb.NewMemoryDatabase() |
||||
) |
||||
for key, val := range content { |
||||
if err := db.Put([]byte(key), []byte(val)); err != nil { |
||||
t.Fatalf("failed to insert item %s:%s into database: %v", key, val, err) |
||||
} |
||||
} |
||||
// Iterate over the database with the given configs and verify the results
|
||||
it, idx := newHoldableIterator(db.NewIterator(nil, nil)), 0 |
||||
|
||||
// Nothing should be affected for calling Discard on non-initialized iterator
|
||||
it.Hold() |
||||
|
||||
for it.Next() { |
||||
if len(content) <= idx { |
||||
t.Errorf("more items than expected: checking idx=%d (key %q), expecting len=%d", idx, it.Key(), len(order)) |
||||
break |
||||
} |
||||
if !bytes.Equal(it.Key(), []byte(order[idx])) { |
||||
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx]) |
||||
} |
||||
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) { |
||||
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]]) |
||||
} |
||||
// Should be safe to call discard multiple times
|
||||
it.Hold() |
||||
it.Hold() |
||||
|
||||
// Shift iterator to the discarded element
|
||||
it.Next() |
||||
if !bytes.Equal(it.Key(), []byte(order[idx])) { |
||||
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx]) |
||||
} |
||||
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) { |
||||
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]]) |
||||
} |
||||
|
||||
// Discard/Next combo should work always
|
||||
it.Hold() |
||||
it.Next() |
||||
if !bytes.Equal(it.Key(), []byte(order[idx])) { |
||||
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx]) |
||||
} |
||||
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) { |
||||
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]]) |
||||
} |
||||
idx++ |
||||
} |
||||
if err := it.Error(); err != nil { |
||||
t.Errorf("iteration failed: %v", err) |
||||
} |
||||
if idx != len(order) { |
||||
t.Errorf("iteration terminated prematurely: have %d, want %d", idx, len(order)) |
||||
} |
||||
db.Close() |
||||
} |
||||
|
||||
func TestReopenIterator(t *testing.T) { |
||||
var ( |
||||
content = map[common.Hash]string{ |
||||
common.HexToHash("a1"): "v1", |
||||
common.HexToHash("a2"): "v2", |
||||
common.HexToHash("a3"): "v3", |
||||
common.HexToHash("a4"): "v4", |
||||
common.HexToHash("a5"): "v5", |
||||
common.HexToHash("a6"): "v6", |
||||
} |
||||
order = []common.Hash{ |
||||
common.HexToHash("a1"), |
||||
common.HexToHash("a2"), |
||||
common.HexToHash("a3"), |
||||
common.HexToHash("a4"), |
||||
common.HexToHash("a5"), |
||||
common.HexToHash("a6"), |
||||
} |
||||
db = rawdb.NewMemoryDatabase() |
||||
) |
||||
for key, val := range content { |
||||
rawdb.WriteAccountSnapshot(db, key, []byte(val)) |
||||
} |
||||
checkVal := func(it *holdableIterator, index int) { |
||||
if !bytes.Equal(it.Key(), append(rawdb.SnapshotAccountPrefix, order[index].Bytes()...)) { |
||||
t.Fatalf("Unexpected data entry key, want %v got %v", order[index], it.Key()) |
||||
} |
||||
if !bytes.Equal(it.Value(), []byte(content[order[index]])) { |
||||
t.Fatalf("Unexpected data entry key, want %v got %v", []byte(content[order[index]]), it.Value()) |
||||
} |
||||
} |
||||
// Iterate over the database with the given configs and verify the results
|
||||
ctx, idx := newGeneratorContext(&generatorStats{}, db, nil, nil), -1 |
||||
|
||||
idx++ |
||||
ctx.account.Next() |
||||
checkVal(ctx.account, idx) |
||||
|
||||
ctx.reopenIterator(snapAccount) |
||||
idx++ |
||||
ctx.account.Next() |
||||
checkVal(ctx.account, idx) |
||||
|
||||
// reopen twice
|
||||
ctx.reopenIterator(snapAccount) |
||||
ctx.reopenIterator(snapAccount) |
||||
idx++ |
||||
ctx.account.Next() |
||||
checkVal(ctx.account, idx) |
||||
|
||||
// reopen iterator with held value
|
||||
ctx.account.Next() |
||||
ctx.account.Hold() |
||||
ctx.reopenIterator(snapAccount) |
||||
idx++ |
||||
ctx.account.Next() |
||||
checkVal(ctx.account, idx) |
||||
|
||||
// reopen twice iterator with held value
|
||||
ctx.account.Next() |
||||
ctx.account.Hold() |
||||
ctx.reopenIterator(snapAccount) |
||||
ctx.reopenIterator(snapAccount) |
||||
idx++ |
||||
ctx.account.Next() |
||||
checkVal(ctx.account, idx) |
||||
|
||||
// shift to the end and reopen
|
||||
ctx.account.Next() // the end
|
||||
ctx.reopenIterator(snapAccount) |
||||
ctx.account.Next() |
||||
if ctx.account.Key() != nil { |
||||
t.Fatal("Unexpected iterated entry") |
||||
} |
||||
} |
@ -0,0 +1,400 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
) |
||||
|
||||
// Iterator is an iterator to step over all the accounts or the specific
|
||||
// storage in a snapshot which may or may not be composed of multiple layers.
|
||||
type Iterator interface { |
||||
// Next steps the iterator forward one element, returning false if exhausted,
|
||||
// or an error if iteration failed for some reason (e.g. root being iterated
|
||||
// becomes stale and garbage collected).
|
||||
Next() bool |
||||
|
||||
// Error returns any failure that occurred during iteration, which might have
|
||||
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
||||
Error() error |
||||
|
||||
// Hash returns the hash of the account or storage slot the iterator is
|
||||
// currently at.
|
||||
Hash() common.Hash |
||||
|
||||
// Release releases associated resources. Release should always succeed and
|
||||
// can be called multiple times without causing error.
|
||||
Release() |
||||
} |
||||
|
||||
// AccountIterator is an iterator to step over all the accounts in a snapshot,
|
||||
// which may or may not be composed of multiple layers.
|
||||
type AccountIterator interface { |
||||
Iterator |
||||
|
||||
// Account returns the RLP encoded slim account the iterator is currently at.
|
||||
// An error will be returned if the iterator becomes invalid
|
||||
Account() []byte |
||||
} |
||||
|
||||
// StorageIterator is an iterator to step over the specific storage in a snapshot,
|
||||
// which may or may not be composed of multiple layers.
|
||||
type StorageIterator interface { |
||||
Iterator |
||||
|
||||
// Slot returns the storage slot the iterator is currently at. An error will
|
||||
// be returned if the iterator becomes invalid
|
||||
Slot() []byte |
||||
} |
||||
|
||||
// diffAccountIterator is an account iterator that steps over the accounts (both
|
||||
// live and deleted) contained within a single diff layer. Higher order iterators
|
||||
// will use the deleted accounts to skip deeper iterators.
|
||||
type diffAccountIterator struct { |
||||
// curHash is the current hash the iterator is positioned on. The field is
|
||||
// explicitly tracked since the referenced diff layer might go stale after
|
||||
// the iterator was positioned and we don't want to fail accessing the old
|
||||
// hash as long as the iterator is not touched any more.
|
||||
curHash common.Hash |
||||
|
||||
layer *diffLayer // Live layer to retrieve values from
|
||||
keys []common.Hash // Keys left in the layer to iterate
|
||||
fail error // Any failures encountered (stale)
|
||||
} |
||||
|
||||
// AccountIterator creates an account iterator over a single diff layer.
|
||||
func (dl *diffLayer) AccountIterator(seek common.Hash) AccountIterator { |
||||
// Seek out the requested starting account
|
||||
hashes := dl.AccountList() |
||||
index := sort.Search(len(hashes), func(i int) bool { |
||||
return bytes.Compare(seek[:], hashes[i][:]) <= 0 |
||||
}) |
||||
// Assemble and returned the already seeked iterator
|
||||
return &diffAccountIterator{ |
||||
layer: dl, |
||||
keys: hashes[index:], |
||||
} |
||||
} |
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted.
|
||||
func (it *diffAccountIterator) Next() bool { |
||||
// If the iterator was already stale, consider it a programmer error. Although
|
||||
// we could just return false here, triggering this path would probably mean
|
||||
// somebody forgot to check for Error, so lets blow up instead of undefined
|
||||
// behavior that's hard to debug.
|
||||
if it.fail != nil { |
||||
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail)) |
||||
} |
||||
// Stop iterating if all keys were exhausted
|
||||
if len(it.keys) == 0 { |
||||
return false |
||||
} |
||||
if it.layer.Stale() { |
||||
it.fail, it.keys = ErrSnapshotStale, nil |
||||
return false |
||||
} |
||||
// Iterator seems to be still alive, retrieve and cache the live hash
|
||||
it.curHash = it.keys[0] |
||||
// key cached, shift the iterator and notify the user of success
|
||||
it.keys = it.keys[1:] |
||||
return true |
||||
} |
||||
|
||||
// Error returns any failure that occurred during iteration, which might have
|
||||
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
||||
func (it *diffAccountIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Hash returns the hash of the account the iterator is currently at.
|
||||
func (it *diffAccountIterator) Hash() common.Hash { |
||||
return it.curHash |
||||
} |
||||
|
||||
// Account returns the RLP encoded slim account the iterator is currently at.
|
||||
// This method may _fail_, if the underlying layer has been flattened between
|
||||
// the call to Next and Account. That type of error will set it.Err.
|
||||
// This method assumes that flattening does not delete elements from
|
||||
// the accountdata mapping (writing nil into it is fine though), and will panic
|
||||
// if elements have been deleted.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (it *diffAccountIterator) Account() []byte { |
||||
it.layer.lock.RLock() |
||||
blob, ok := it.layer.accountData[it.curHash] |
||||
if !ok { |
||||
if _, ok := it.layer.destructSet[it.curHash]; ok { |
||||
it.layer.lock.RUnlock() |
||||
return nil |
||||
} |
||||
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash)) |
||||
} |
||||
it.layer.lock.RUnlock() |
||||
if it.layer.Stale() { |
||||
it.fail, it.keys = ErrSnapshotStale, nil |
||||
} |
||||
return blob |
||||
} |
||||
|
||||
// Release is a noop for diff account iterators as there are no held resources.
|
||||
func (it *diffAccountIterator) Release() {} |
||||
|
||||
// diskAccountIterator is an account iterator that steps over the live accounts
|
||||
// contained within a disk layer.
|
||||
type diskAccountIterator struct { |
||||
layer *diskLayer |
||||
it ethdb.Iterator |
||||
} |
||||
|
||||
// AccountIterator creates an account iterator over a disk layer.
|
||||
func (dl *diskLayer) AccountIterator(seek common.Hash) AccountIterator { |
||||
pos := common.TrimRightZeroes(seek[:]) |
||||
return &diskAccountIterator{ |
||||
layer: dl, |
||||
it: dl.diskdb.NewIterator(rawdb.SnapshotAccountPrefix, pos), |
||||
} |
||||
} |
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted.
|
||||
func (it *diskAccountIterator) Next() bool { |
||||
// If the iterator was already exhausted, don't bother
|
||||
if it.it == nil { |
||||
return false |
||||
} |
||||
// Try to advance the iterator and release it if we reached the end
|
||||
for { |
||||
if !it.it.Next() { |
||||
it.it.Release() |
||||
it.it = nil |
||||
return false |
||||
} |
||||
if len(it.it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { |
||||
break |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// Error returns any failure that occurred during iteration, which might have
|
||||
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
||||
//
|
||||
// A diff layer is immutable after creation content wise and can always be fully
|
||||
// iterated without error, so this method always returns nil.
|
||||
func (it *diskAccountIterator) Error() error { |
||||
if it.it == nil { |
||||
return nil // Iterator is exhausted and released
|
||||
} |
||||
return it.it.Error() |
||||
} |
||||
|
||||
// Hash returns the hash of the account the iterator is currently at.
|
||||
func (it *diskAccountIterator) Hash() common.Hash { |
||||
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
|
||||
} |
||||
|
||||
// Account returns the RLP encoded slim account the iterator is currently at.
|
||||
func (it *diskAccountIterator) Account() []byte { |
||||
return it.it.Value() |
||||
} |
||||
|
||||
// Release releases the database snapshot held during iteration.
|
||||
func (it *diskAccountIterator) Release() { |
||||
// The iterator is auto-released on exhaustion, so make sure it's still alive
|
||||
if it.it != nil { |
||||
it.it.Release() |
||||
it.it = nil |
||||
} |
||||
} |
||||
|
||||
// diffStorageIterator is a storage iterator that steps over the specific storage
|
||||
// (both live and deleted) contained within a single diff layer. Higher order
|
||||
// iterators will use the deleted slot to skip deeper iterators.
|
||||
type diffStorageIterator struct { |
||||
// curHash is the current hash the iterator is positioned on. The field is
|
||||
// explicitly tracked since the referenced diff layer might go stale after
|
||||
// the iterator was positioned and we don't want to fail accessing the old
|
||||
// hash as long as the iterator is not touched any more.
|
||||
curHash common.Hash |
||||
account common.Hash |
||||
|
||||
layer *diffLayer // Live layer to retrieve values from
|
||||
keys []common.Hash // Keys left in the layer to iterate
|
||||
fail error // Any failures encountered (stale)
|
||||
} |
||||
|
||||
// StorageIterator creates a storage iterator over a single diff layer.
|
||||
// Except the storage iterator is returned, there is an additional flag
|
||||
// "destructed" returned. If it's true then it means the whole storage is
|
||||
// destructed in this layer(maybe recreated too), don't bother deeper layer
|
||||
// for storage retrieval.
|
||||
func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) { |
||||
// Create the storage for this account even it's marked
|
||||
// as destructed. The iterator is for the new one which
|
||||
// just has the same address as the deleted one.
|
||||
hashes, destructed := dl.StorageList(account) |
||||
index := sort.Search(len(hashes), func(i int) bool { |
||||
return bytes.Compare(seek[:], hashes[i][:]) <= 0 |
||||
}) |
||||
// Assemble and returned the already seeked iterator
|
||||
return &diffStorageIterator{ |
||||
layer: dl, |
||||
account: account, |
||||
keys: hashes[index:], |
||||
}, destructed |
||||
} |
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted.
|
||||
func (it *diffStorageIterator) Next() bool { |
||||
// If the iterator was already stale, consider it a programmer error. Although
|
||||
// we could just return false here, triggering this path would probably mean
|
||||
// somebody forgot to check for Error, so lets blow up instead of undefined
|
||||
// behavior that's hard to debug.
|
||||
if it.fail != nil { |
||||
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail)) |
||||
} |
||||
// Stop iterating if all keys were exhausted
|
||||
if len(it.keys) == 0 { |
||||
return false |
||||
} |
||||
if it.layer.Stale() { |
||||
it.fail, it.keys = ErrSnapshotStale, nil |
||||
return false |
||||
} |
||||
// Iterator seems to be still alive, retrieve and cache the live hash
|
||||
it.curHash = it.keys[0] |
||||
// key cached, shift the iterator and notify the user of success
|
||||
it.keys = it.keys[1:] |
||||
return true |
||||
} |
||||
|
||||
// Error returns any failure that occurred during iteration, which might have
|
||||
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
||||
func (it *diffStorageIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Hash returns the hash of the storage slot the iterator is currently at.
|
||||
func (it *diffStorageIterator) Hash() common.Hash { |
||||
return it.curHash |
||||
} |
||||
|
||||
// Slot returns the raw storage slot value the iterator is currently at.
|
||||
// This method may _fail_, if the underlying layer has been flattened between
|
||||
// the call to Next and Value. That type of error will set it.Err.
|
||||
// This method assumes that flattening does not delete elements from
|
||||
// the storage mapping (writing nil into it is fine though), and will panic
|
||||
// if elements have been deleted.
|
||||
//
|
||||
// Note the returned slot is not a copy, please don't modify it.
|
||||
func (it *diffStorageIterator) Slot() []byte { |
||||
it.layer.lock.RLock() |
||||
storage, ok := it.layer.storageData[it.account] |
||||
if !ok { |
||||
panic(fmt.Sprintf("iterator referenced non-existent account storage: %x", it.account)) |
||||
} |
||||
// Storage slot might be nil(deleted), but it must exist
|
||||
blob, ok := storage[it.curHash] |
||||
if !ok { |
||||
panic(fmt.Sprintf("iterator referenced non-existent storage slot: %x", it.curHash)) |
||||
} |
||||
it.layer.lock.RUnlock() |
||||
if it.layer.Stale() { |
||||
it.fail, it.keys = ErrSnapshotStale, nil |
||||
} |
||||
return blob |
||||
} |
||||
|
||||
// Release is a noop for diff account iterators as there are no held resources.
|
||||
func (it *diffStorageIterator) Release() {} |
||||
|
||||
// diskStorageIterator is a storage iterator that steps over the live storage
|
||||
// contained within a disk layer.
|
||||
type diskStorageIterator struct { |
||||
layer *diskLayer |
||||
account common.Hash |
||||
it ethdb.Iterator |
||||
} |
||||
|
||||
// StorageIterator creates a storage iterator over a disk layer.
|
||||
// If the whole storage is destructed, then all entries in the disk
|
||||
// layer are deleted already. So the "destructed" flag returned here
|
||||
// is always false.
|
||||
func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) { |
||||
pos := common.TrimRightZeroes(seek[:]) |
||||
return &diskStorageIterator{ |
||||
layer: dl, |
||||
account: account, |
||||
it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos), |
||||
}, false |
||||
} |
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted.
|
||||
func (it *diskStorageIterator) Next() bool { |
||||
// If the iterator was already exhausted, don't bother
|
||||
if it.it == nil { |
||||
return false |
||||
} |
||||
// Try to advance the iterator and release it if we reached the end
|
||||
for { |
||||
if !it.it.Next() { |
||||
it.it.Release() |
||||
it.it = nil |
||||
return false |
||||
} |
||||
if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength { |
||||
break |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// Error returns any failure that occurred during iteration, which might have
|
||||
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
||||
//
|
||||
// A diff layer is immutable after creation content wise and can always be fully
|
||||
// iterated without error, so this method always returns nil.
|
||||
func (it *diskStorageIterator) Error() error { |
||||
if it.it == nil { |
||||
return nil // Iterator is exhausted and released
|
||||
} |
||||
return it.it.Error() |
||||
} |
||||
|
||||
// Hash returns the hash of the storage slot the iterator is currently at.
|
||||
func (it *diskStorageIterator) Hash() common.Hash { |
||||
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
|
||||
} |
||||
|
||||
// Slot returns the raw storage slot content the iterator is currently at.
|
||||
func (it *diskStorageIterator) Slot() []byte { |
||||
return it.it.Value() |
||||
} |
||||
|
||||
// Release releases the database snapshot held during iteration.
|
||||
func (it *diskStorageIterator) Release() { |
||||
// The iterator is auto-released on exhaustion, so make sure it's still alive
|
||||
if it.it != nil { |
||||
it.it.Release() |
||||
it.it = nil |
||||
} |
||||
} |
@ -0,0 +1,213 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// binaryIterator is a simplistic iterator to step over the accounts or storage
|
||||
// in a snapshot, which may or may not be composed of multiple layers. Performance
|
||||
// wise this iterator is slow, it's meant for cross validating the fast one,
|
||||
type binaryIterator struct { |
||||
a Iterator |
||||
b Iterator |
||||
aDone bool |
||||
bDone bool |
||||
accountIterator bool |
||||
k common.Hash |
||||
account common.Hash |
||||
fail error |
||||
} |
||||
|
||||
// initBinaryAccountIterator creates a simplistic iterator to step over all the
|
||||
// accounts in a slow, but easily verifiable way. Note this function is used for
|
||||
// initialization, use `newBinaryAccountIterator` as the API.
|
||||
func (dl *diffLayer) initBinaryAccountIterator() Iterator { |
||||
parent, ok := dl.parent.(*diffLayer) |
||||
if !ok { |
||||
l := &binaryIterator{ |
||||
a: dl.AccountIterator(common.Hash{}), |
||||
b: dl.Parent().AccountIterator(common.Hash{}), |
||||
accountIterator: true, |
||||
} |
||||
l.aDone = !l.a.Next() |
||||
l.bDone = !l.b.Next() |
||||
return l |
||||
} |
||||
l := &binaryIterator{ |
||||
a: dl.AccountIterator(common.Hash{}), |
||||
b: parent.initBinaryAccountIterator(), |
||||
accountIterator: true, |
||||
} |
||||
l.aDone = !l.a.Next() |
||||
l.bDone = !l.b.Next() |
||||
return l |
||||
} |
||||
|
||||
// initBinaryStorageIterator creates a simplistic iterator to step over all the
|
||||
// storage slots in a slow, but easily verifiable way. Note this function is used
|
||||
// for initialization, use `newBinaryStorageIterator` as the API.
|
||||
func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator { |
||||
parent, ok := dl.parent.(*diffLayer) |
||||
if !ok { |
||||
// If the storage in this layer is already destructed, discard all
|
||||
// deeper layers but still return an valid single-branch iterator.
|
||||
a, destructed := dl.StorageIterator(account, common.Hash{}) |
||||
if destructed { |
||||
l := &binaryIterator{ |
||||
a: a, |
||||
account: account, |
||||
} |
||||
l.aDone = !l.a.Next() |
||||
l.bDone = true |
||||
return l |
||||
} |
||||
// The parent is disk layer, don't need to take care "destructed"
|
||||
// anymore.
|
||||
b, _ := dl.Parent().StorageIterator(account, common.Hash{}) |
||||
l := &binaryIterator{ |
||||
a: a, |
||||
b: b, |
||||
account: account, |
||||
} |
||||
l.aDone = !l.a.Next() |
||||
l.bDone = !l.b.Next() |
||||
return l |
||||
} |
||||
// If the storage in this layer is already destructed, discard all
|
||||
// deeper layers but still return an valid single-branch iterator.
|
||||
a, destructed := dl.StorageIterator(account, common.Hash{}) |
||||
if destructed { |
||||
l := &binaryIterator{ |
||||
a: a, |
||||
account: account, |
||||
} |
||||
l.aDone = !l.a.Next() |
||||
l.bDone = true |
||||
return l |
||||
} |
||||
l := &binaryIterator{ |
||||
a: a, |
||||
b: parent.initBinaryStorageIterator(account), |
||||
account: account, |
||||
} |
||||
l.aDone = !l.a.Next() |
||||
l.bDone = !l.b.Next() |
||||
return l |
||||
} |
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted,
|
||||
// or an error if iteration failed for some reason (e.g. root being iterated
|
||||
// becomes stale and garbage collected).
|
||||
func (it *binaryIterator) Next() bool { |
||||
if it.aDone && it.bDone { |
||||
return false |
||||
} |
||||
first: |
||||
if it.aDone { |
||||
it.k = it.b.Hash() |
||||
it.bDone = !it.b.Next() |
||||
return true |
||||
} |
||||
if it.bDone { |
||||
it.k = it.a.Hash() |
||||
it.aDone = !it.a.Next() |
||||
return true |
||||
} |
||||
nextA, nextB := it.a.Hash(), it.b.Hash() |
||||
if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 { |
||||
it.aDone = !it.a.Next() |
||||
it.k = nextA |
||||
return true |
||||
} else if diff == 0 { |
||||
// Now we need to advance one of them
|
||||
it.aDone = !it.a.Next() |
||||
goto first |
||||
} |
||||
it.bDone = !it.b.Next() |
||||
it.k = nextB |
||||
return true |
||||
} |
||||
|
||||
// Error returns any failure that occurred during iteration, which might have
|
||||
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
||||
func (it *binaryIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Hash returns the hash of the account the iterator is currently at.
|
||||
func (it *binaryIterator) Hash() common.Hash { |
||||
return it.k |
||||
} |
||||
|
||||
// Account returns the RLP encoded slim account the iterator is currently at, or
|
||||
// nil if the iterated snapshot stack became stale (you can check Error after
|
||||
// to see if it failed or not).
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (it *binaryIterator) Account() []byte { |
||||
if !it.accountIterator { |
||||
return nil |
||||
} |
||||
// The topmost iterator must be `diffAccountIterator`
|
||||
blob, err := it.a.(*diffAccountIterator).layer.AccountRLP(it.k) |
||||
if err != nil { |
||||
it.fail = err |
||||
return nil |
||||
} |
||||
return blob |
||||
} |
||||
|
||||
// Slot returns the raw storage slot data the iterator is currently at, or
|
||||
// nil if the iterated snapshot stack became stale (you can check Error after
|
||||
// to see if it failed or not).
|
||||
//
|
||||
// Note the returned slot is not a copy, please don't modify it.
|
||||
func (it *binaryIterator) Slot() []byte { |
||||
if it.accountIterator { |
||||
return nil |
||||
} |
||||
blob, err := it.a.(*diffStorageIterator).layer.Storage(it.account, it.k) |
||||
if err != nil { |
||||
it.fail = err |
||||
return nil |
||||
} |
||||
return blob |
||||
} |
||||
|
||||
// Release recursively releases all the iterators in the stack.
|
||||
func (it *binaryIterator) Release() { |
||||
it.a.Release() |
||||
it.b.Release() |
||||
} |
||||
|
||||
// newBinaryAccountIterator creates a simplistic account iterator to step over
|
||||
// all the accounts in a slow, but easily verifiable way.
|
||||
func (dl *diffLayer) newBinaryAccountIterator() AccountIterator { |
||||
iter := dl.initBinaryAccountIterator() |
||||
return iter.(AccountIterator) |
||||
} |
||||
|
||||
// newBinaryStorageIterator creates a simplistic account iterator to step over
|
||||
// all the storage slots in a slow, but easily verifiable way.
|
||||
func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator { |
||||
iter := dl.initBinaryStorageIterator(account) |
||||
return iter.(StorageIterator) |
||||
} |
@ -0,0 +1,350 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// weightedIterator is a iterator with an assigned weight. It is used to prioritise
|
||||
// which account or storage slot is the correct one if multiple iterators find the
|
||||
// same one (modified in multiple consecutive blocks).
|
||||
type weightedIterator struct { |
||||
it Iterator |
||||
priority int |
||||
} |
||||
|
||||
// weightedIterators is a set of iterators implementing the sort.Interface.
|
||||
type weightedIterators []*weightedIterator |
||||
|
||||
// Len implements sort.Interface, returning the number of active iterators.
|
||||
func (its weightedIterators) Len() int { return len(its) } |
||||
|
||||
// Less implements sort.Interface, returning which of two iterators in the stack
|
||||
// is before the other.
|
||||
func (its weightedIterators) Less(i, j int) bool { |
||||
// Order the iterators primarily by the account hashes
|
||||
hashI := its[i].it.Hash() |
||||
hashJ := its[j].it.Hash() |
||||
|
||||
switch bytes.Compare(hashI[:], hashJ[:]) { |
||||
case -1: |
||||
return true |
||||
case 1: |
||||
return false |
||||
} |
||||
// Same account/storage-slot in multiple layers, split by priority
|
||||
return its[i].priority < its[j].priority |
||||
} |
||||
|
||||
// Swap implements sort.Interface, swapping two entries in the iterator stack.
|
||||
func (its weightedIterators) Swap(i, j int) { |
||||
its[i], its[j] = its[j], its[i] |
||||
} |
||||
|
||||
// fastIterator is a more optimized multi-layer iterator which maintains a
|
||||
// direct mapping of all iterators leading down to the bottom layer.
|
||||
type fastIterator struct { |
||||
tree *Tree // Snapshot tree to reinitialize stale sub-iterators with
|
||||
root common.Hash // Root hash to reinitialize stale sub-iterators through
|
||||
|
||||
curAccount []byte |
||||
curSlot []byte |
||||
|
||||
iterators weightedIterators |
||||
initiated bool |
||||
account bool |
||||
fail error |
||||
} |
||||
|
||||
// newFastIterator creates a new hierarchical account or storage iterator with one
|
||||
// element per diff layer. The returned combo iterator can be used to walk over
|
||||
// the entire snapshot diff stack simultaneously.
|
||||
func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) { |
||||
snap := tree.Snapshot(root) |
||||
if snap == nil { |
||||
return nil, fmt.Errorf("unknown snapshot: %x", root) |
||||
} |
||||
fi := &fastIterator{ |
||||
tree: tree, |
||||
root: root, |
||||
account: accountIterator, |
||||
} |
||||
current := snap.(snapshot) |
||||
for depth := 0; current != nil; depth++ { |
||||
if accountIterator { |
||||
fi.iterators = append(fi.iterators, &weightedIterator{ |
||||
it: current.AccountIterator(seek), |
||||
priority: depth, |
||||
}) |
||||
} else { |
||||
// If the whole storage is destructed in this layer, don't
|
||||
// bother deeper layer anymore. But we should still keep
|
||||
// the iterator for this layer, since the iterator can contain
|
||||
// some valid slots which belongs to the re-created account.
|
||||
it, destructed := current.StorageIterator(account, seek) |
||||
fi.iterators = append(fi.iterators, &weightedIterator{ |
||||
it: it, |
||||
priority: depth, |
||||
}) |
||||
if destructed { |
||||
break |
||||
} |
||||
} |
||||
current = current.Parent() |
||||
} |
||||
fi.init() |
||||
return fi, nil |
||||
} |
||||
|
||||
// init walks over all the iterators and resolves any clashes between them, after
|
||||
// which it prepares the stack for step-by-step iteration.
|
||||
func (fi *fastIterator) init() { |
||||
// Track which account hashes are iterators positioned on
|
||||
var positioned = make(map[common.Hash]int) |
||||
|
||||
// Position all iterators and track how many remain live
|
||||
for i := 0; i < len(fi.iterators); i++ { |
||||
// Retrieve the first element and if it clashes with a previous iterator,
|
||||
// advance either the current one or the old one. Repeat until nothing is
|
||||
// clashing any more.
|
||||
it := fi.iterators[i] |
||||
for { |
||||
// If the iterator is exhausted, drop it off the end
|
||||
if !it.it.Next() { |
||||
it.it.Release() |
||||
last := len(fi.iterators) - 1 |
||||
|
||||
fi.iterators[i] = fi.iterators[last] |
||||
fi.iterators[last] = nil |
||||
fi.iterators = fi.iterators[:last] |
||||
|
||||
i-- |
||||
break |
||||
} |
||||
// The iterator is still alive, check for collisions with previous ones
|
||||
hash := it.it.Hash() |
||||
if other, exist := positioned[hash]; !exist { |
||||
positioned[hash] = i |
||||
break |
||||
} else { |
||||
// Iterators collide, one needs to be progressed, use priority to
|
||||
// determine which.
|
||||
//
|
||||
// This whole else-block can be avoided, if we instead
|
||||
// do an initial priority-sort of the iterators. If we do that,
|
||||
// then we'll only wind up here if a lower-priority (preferred) iterator
|
||||
// has the same value, and then we will always just continue.
|
||||
// However, it costs an extra sort, so it's probably not better
|
||||
if fi.iterators[other].priority < it.priority { |
||||
// The 'it' should be progressed
|
||||
continue |
||||
} else { |
||||
// The 'other' should be progressed, swap them
|
||||
it = fi.iterators[other] |
||||
fi.iterators[other], fi.iterators[i] = fi.iterators[i], fi.iterators[other] |
||||
continue |
||||
} |
||||
} |
||||
} |
||||
} |
||||
// Re-sort the entire list
|
||||
sort.Sort(fi.iterators) |
||||
fi.initiated = false |
||||
} |
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted.
|
||||
func (fi *fastIterator) Next() bool { |
||||
if len(fi.iterators) == 0 { |
||||
return false |
||||
} |
||||
if !fi.initiated { |
||||
// Don't forward first time -- we had to 'Next' once in order to
|
||||
// do the sorting already
|
||||
fi.initiated = true |
||||
if fi.account { |
||||
fi.curAccount = fi.iterators[0].it.(AccountIterator).Account() |
||||
} else { |
||||
fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot() |
||||
} |
||||
if innerErr := fi.iterators[0].it.Error(); innerErr != nil { |
||||
fi.fail = innerErr |
||||
return false |
||||
} |
||||
if fi.curAccount != nil || fi.curSlot != nil { |
||||
return true |
||||
} |
||||
// Implicit else: we've hit a nil-account or nil-slot, and need to
|
||||
// fall through to the loop below to land on something non-nil
|
||||
} |
||||
// If an account or a slot is deleted in one of the layers, the key will
|
||||
// still be there, but the actual value will be nil. However, the iterator
|
||||
// should not export nil-values (but instead simply omit the key), so we
|
||||
// need to loop here until we either
|
||||
// - get a non-nil value,
|
||||
// - hit an error,
|
||||
// - or exhaust the iterator
|
||||
for { |
||||
if !fi.next(0) { |
||||
return false // exhausted
|
||||
} |
||||
if fi.account { |
||||
fi.curAccount = fi.iterators[0].it.(AccountIterator).Account() |
||||
} else { |
||||
fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot() |
||||
} |
||||
if innerErr := fi.iterators[0].it.Error(); innerErr != nil { |
||||
fi.fail = innerErr |
||||
return false // error
|
||||
} |
||||
if fi.curAccount != nil || fi.curSlot != nil { |
||||
break // non-nil value found
|
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// next handles the next operation internally and should be invoked when we know
|
||||
// that two elements in the list may have the same value.
|
||||
//
|
||||
// For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
|
||||
// invoke next(3), which will call Next on elem 3 (the second '5') and will
|
||||
// cascade along the list, applying the same operation if needed.
|
||||
func (fi *fastIterator) next(idx int) bool { |
||||
// If this particular iterator got exhausted, remove it and return true (the
|
||||
// next one is surely not exhausted yet, otherwise it would have been removed
|
||||
// already).
|
||||
if it := fi.iterators[idx].it; !it.Next() { |
||||
it.Release() |
||||
|
||||
fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...) |
||||
return len(fi.iterators) > 0 |
||||
} |
||||
// If there's no one left to cascade into, return
|
||||
if idx == len(fi.iterators)-1 { |
||||
return true |
||||
} |
||||
// We next-ed the iterator at 'idx', now we may have to re-sort that element
|
||||
var ( |
||||
cur, next = fi.iterators[idx], fi.iterators[idx+1] |
||||
curHash, nextHash = cur.it.Hash(), next.it.Hash() |
||||
) |
||||
if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 { |
||||
// It is still in correct place
|
||||
return true |
||||
} else if diff == 0 && cur.priority < next.priority { |
||||
// So still in correct place, but we need to iterate on the next
|
||||
fi.next(idx + 1) |
||||
return true |
||||
} |
||||
// At this point, the iterator is in the wrong location, but the remaining
|
||||
// list is sorted. Find out where to move the item.
|
||||
clash := -1 |
||||
index := sort.Search(len(fi.iterators), func(n int) bool { |
||||
// The iterator always advances forward, so anything before the old slot
|
||||
// is known to be behind us, so just skip them altogether. This actually
|
||||
// is an important clause since the sort order got invalidated.
|
||||
if n < idx { |
||||
return false |
||||
} |
||||
if n == len(fi.iterators)-1 { |
||||
// Can always place an elem last
|
||||
return true |
||||
} |
||||
nextHash := fi.iterators[n+1].it.Hash() |
||||
if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 { |
||||
return true |
||||
} else if diff > 0 { |
||||
return false |
||||
} |
||||
// The elem we're placing it next to has the same value,
|
||||
// so whichever winds up on n+1 will need further iteration
|
||||
clash = n + 1 |
||||
|
||||
return cur.priority < fi.iterators[n+1].priority |
||||
}) |
||||
fi.move(idx, index) |
||||
if clash != -1 { |
||||
fi.next(clash) |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// move advances an iterator to another position in the list.
|
||||
func (fi *fastIterator) move(index, newpos int) { |
||||
elem := fi.iterators[index] |
||||
copy(fi.iterators[index:], fi.iterators[index+1:newpos+1]) |
||||
fi.iterators[newpos] = elem |
||||
} |
||||
|
||||
// Error returns any failure that occurred during iteration, which might have
|
||||
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
||||
func (fi *fastIterator) Error() error { |
||||
return fi.fail |
||||
} |
||||
|
||||
// Hash returns the current key
|
||||
func (fi *fastIterator) Hash() common.Hash { |
||||
return fi.iterators[0].it.Hash() |
||||
} |
||||
|
||||
// Account returns the current account blob.
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (fi *fastIterator) Account() []byte { |
||||
return fi.curAccount |
||||
} |
||||
|
||||
// Slot returns the current storage slot.
|
||||
// Note the returned slot is not a copy, please don't modify it.
|
||||
func (fi *fastIterator) Slot() []byte { |
||||
return fi.curSlot |
||||
} |
||||
|
||||
// Release iterates over all the remaining live layer iterators and releases each
|
||||
// of them individually.
|
||||
func (fi *fastIterator) Release() { |
||||
for _, it := range fi.iterators { |
||||
it.it.Release() |
||||
} |
||||
fi.iterators = nil |
||||
} |
||||
|
||||
// Debug is a convenience helper during testing
|
||||
func (fi *fastIterator) Debug() { |
||||
for _, it := range fi.iterators { |
||||
fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0]) |
||||
} |
||||
fmt.Println() |
||||
} |
||||
|
||||
// newFastAccountIterator creates a new hierarchical account iterator with one
|
||||
// element per diff layer. The returned combo iterator can be used to walk over
|
||||
// the entire snapshot diff stack simultaneously.
|
||||
func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) { |
||||
return newFastIterator(tree, root, common.Hash{}, seek, true) |
||||
} |
||||
|
||||
// newFastStorageIterator creates a new hierarchical storage iterator with one
|
||||
// element per diff layer. The returned combo iterator can be used to walk over
|
||||
// the entire snapshot diff stack simultaneously.
|
||||
func newFastStorageIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) { |
||||
return newFastIterator(tree, root, account, seek, false) |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,374 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"time" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
const journalVersion uint64 = 0 |
||||
|
||||
// journalGenerator is a disk layer entry containing the generator progress marker.
|
||||
type journalGenerator struct { |
||||
// Indicator that whether the database was in progress of being wiped.
|
||||
// It's deprecated but keep it here for background compatibility.
|
||||
Wiping bool |
||||
|
||||
Done bool // Whether the generator finished creating the snapshot
|
||||
Marker []byte |
||||
Accounts uint64 |
||||
Slots uint64 |
||||
Storage uint64 |
||||
} |
||||
|
||||
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
|
||||
type journalDestruct struct { |
||||
Hash common.Hash |
||||
} |
||||
|
||||
// journalAccount is an account entry in a diffLayer's disk journal.
|
||||
type journalAccount struct { |
||||
Hash common.Hash |
||||
Blob []byte |
||||
} |
||||
|
||||
// journalStorage is an account's storage map in a diffLayer's disk journal.
|
||||
type journalStorage struct { |
||||
Hash common.Hash |
||||
Keys []common.Hash |
||||
Vals [][]byte |
||||
} |
||||
|
||||
func ParseGeneratorStatus(generatorBlob []byte) string { |
||||
if len(generatorBlob) == 0 { |
||||
return "" |
||||
} |
||||
var generator journalGenerator |
||||
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil { |
||||
utils.Logger().Warn().Err(err).Msg("failed to decode snapshot generator") |
||||
return "" |
||||
} |
||||
// Figure out whether we're after or within an account
|
||||
var m string |
||||
switch marker := generator.Marker; len(marker) { |
||||
case common.HashLength: |
||||
m = fmt.Sprintf("at %#x", marker) |
||||
case 2 * common.HashLength: |
||||
m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:]) |
||||
default: |
||||
m = fmt.Sprintf("%#x", marker) |
||||
} |
||||
return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`, |
||||
generator.Done, generator.Accounts, generator.Slots, generator.Storage, m) |
||||
} |
||||
|
||||
// loadAndParseJournal tries to parse the snapshot journal in latest format.
|
||||
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) { |
||||
// Retrieve the disk layer generator. It must exist, no matter the
|
||||
// snapshot is fully generated or not. Otherwise the entire disk
|
||||
// layer is invalid.
|
||||
generatorBlob := rawdb.ReadSnapshotGenerator(db) |
||||
if len(generatorBlob) == 0 { |
||||
return nil, journalGenerator{}, errors.New("missing snapshot generator") |
||||
} |
||||
var generator journalGenerator |
||||
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil { |
||||
return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err) |
||||
} |
||||
// Retrieve the diff layer journal. It's possible that the journal is
|
||||
// not existent, e.g. the disk layer is generating while that the Geth
|
||||
// crashes without persisting the diff journal.
|
||||
// So if there is no journal, or the journal is invalid(e.g. the journal
|
||||
// is not matched with disk layer; or the it's the legacy-format journal,
|
||||
// etc.), we just discard all diffs and try to recover them later.
|
||||
var current snapshot = base |
||||
err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error { |
||||
current = newDiffLayer(current, root, destructSet, accountData, storageData) |
||||
return nil |
||||
}) |
||||
if err != nil { |
||||
return base, generator, nil |
||||
} |
||||
return current, generator, nil |
||||
} |
||||
|
||||
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
|
||||
func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash, cache int, recovery bool, noBuild bool) (snapshot, bool, error) { |
||||
// If snapshotting is disabled (initial sync in progress), don't do anything,
|
||||
// wait for the chain to permit us to do something meaningful
|
||||
if rawdb.ReadSnapshotDisabled(diskdb) { |
||||
return nil, true, nil |
||||
} |
||||
// Retrieve the block number and hash of the snapshot, failing if no snapshot
|
||||
// is present in the database (or crashed mid-update).
|
||||
baseRoot := rawdb.ReadSnapshotRoot(diskdb) |
||||
if baseRoot == (common.Hash{}) { |
||||
return nil, false, errors.New("missing or corrupted snapshot") |
||||
} |
||||
base := &diskLayer{ |
||||
diskdb: diskdb, |
||||
triedb: triedb, |
||||
cache: fastcache.New(cache * 1024 * 1024), |
||||
root: baseRoot, |
||||
} |
||||
snapshot, generator, err := loadAndParseJournal(diskdb, base) |
||||
if err != nil { |
||||
utils.Logger().Warn().Err(err).Msg("Failed to load journal") |
||||
return nil, false, err |
||||
} |
||||
// Entire snapshot journal loaded, sanity check the head. If the loaded
|
||||
// snapshot is not matched with current state root, print a warning log
|
||||
// or discard the entire snapshot it's legacy snapshot.
|
||||
//
|
||||
// Possible scenario: Geth was crashed without persisting journal and then
|
||||
// restart, the head is rewound to the point with available state(trie)
|
||||
// which is below the snapshot. In this case the snapshot can be recovered
|
||||
// by re-executing blocks but right now it's unavailable.
|
||||
if head := snapshot.Root(); head != root { |
||||
// If it's legacy snapshot, or it's new-format snapshot but
|
||||
// it's not in recovery mode, returns the error here for
|
||||
// rebuilding the entire snapshot forcibly.
|
||||
if !recovery { |
||||
return nil, false, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) |
||||
} |
||||
// It's in snapshot recovery, the assumption is held that
|
||||
// the disk layer is always higher than chain head. It can
|
||||
// be eventually recovered when the chain head beyonds the
|
||||
// disk layer.
|
||||
utils.Logger().Warn().Err(err). |
||||
Interface("snaproot", head). |
||||
Interface("chainroot", root). |
||||
Msg("Snapshot is not continuous with chain") |
||||
} |
||||
// Load the disk layer status from the generator if it's not complete
|
||||
if !generator.Done { |
||||
base.genMarker = generator.Marker |
||||
if base.genMarker == nil { |
||||
base.genMarker = []byte{} |
||||
} |
||||
} |
||||
// Everything loaded correctly, resume any suspended operations
|
||||
// if the background generation is allowed
|
||||
if !generator.Done && !noBuild { |
||||
base.genPending = make(chan struct{}) |
||||
base.genAbort = make(chan chan *generatorStats) |
||||
|
||||
var origin uint64 |
||||
if len(generator.Marker) >= 8 { |
||||
origin = binary.BigEndian.Uint64(generator.Marker) |
||||
} |
||||
go base.generate(&generatorStats{ |
||||
origin: origin, |
||||
start: time.Now(), |
||||
accounts: generator.Accounts, |
||||
slots: generator.Slots, |
||||
storage: common.StorageSize(generator.Storage), |
||||
}) |
||||
} |
||||
return snapshot, false, nil |
||||
} |
||||
|
||||
// Journal terminates any in-progress snapshot generation, also implicitly pushing
|
||||
// the progress into the database.
|
||||
func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { |
||||
// If the snapshot is currently being generated, abort it
|
||||
var stats *generatorStats |
||||
if dl.genAbort != nil { |
||||
abort := make(chan *generatorStats) |
||||
dl.genAbort <- abort |
||||
|
||||
if stats = <-abort; stats != nil { |
||||
stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker) |
||||
} |
||||
} |
||||
// Ensure the layer didn't get stale
|
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
if dl.stale { |
||||
return common.Hash{}, ErrSnapshotStale |
||||
} |
||||
// Ensure the generator stats is written even if none was ran this cycle
|
||||
journalProgress(dl.diskdb, dl.genMarker, stats) |
||||
|
||||
utils.Logger().Debug().Interface("root", dl.root).Msg("Journalled disk layer") |
||||
return dl.root, nil |
||||
} |
||||
|
||||
// Journal writes the memory layer contents into a buffer to be stored in the
|
||||
// database as the snapshot journal.
|
||||
func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { |
||||
// Journal the parent first
|
||||
base, err := dl.parent.Journal(buffer) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
// Ensure the layer didn't get stale
|
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
if dl.Stale() { |
||||
return common.Hash{}, ErrSnapshotStale |
||||
} |
||||
// Everything below was journalled, persist this layer too
|
||||
if err := rlp.Encode(buffer, dl.root); err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
destructs := make([]journalDestruct, 0, len(dl.destructSet)) |
||||
for hash := range dl.destructSet { |
||||
destructs = append(destructs, journalDestruct{Hash: hash}) |
||||
} |
||||
if err := rlp.Encode(buffer, destructs); err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
accounts := make([]journalAccount, 0, len(dl.accountData)) |
||||
for hash, blob := range dl.accountData { |
||||
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) |
||||
} |
||||
if err := rlp.Encode(buffer, accounts); err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
storage := make([]journalStorage, 0, len(dl.storageData)) |
||||
for hash, slots := range dl.storageData { |
||||
keys := make([]common.Hash, 0, len(slots)) |
||||
vals := make([][]byte, 0, len(slots)) |
||||
for key, val := range slots { |
||||
keys = append(keys, key) |
||||
vals = append(vals, val) |
||||
} |
||||
storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals}) |
||||
} |
||||
if err := rlp.Encode(buffer, storage); err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
utils.Logger().Debug().Err(err).Interface("root", dl.root).Interface("parent", dl.parent.Root()).Msg("Journalled diff layer") |
||||
return base, nil |
||||
} |
||||
|
||||
// journalCallback is a function which is invoked by iterateJournal, every
|
||||
// time a difflayer is loaded from disk.
|
||||
type journalCallback = func(parent common.Hash, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error |
||||
|
||||
// iterateJournal iterates through the journalled difflayers, loading them from
|
||||
// the database, and invoking the callback for each loaded layer.
|
||||
// The order is incremental; starting with the bottom-most difflayer, going towards
|
||||
// the most recent layer.
|
||||
// This method returns error either if there was some error reading from disk,
|
||||
// OR if the callback returns an error when invoked.
|
||||
func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error { |
||||
journal := rawdb.ReadSnapshotJournal(db) |
||||
if len(journal) == 0 { |
||||
utils.Logger().Warn().Str("diffs", "missing").Msg("Loaded snapshot journal") |
||||
return nil |
||||
} |
||||
r := rlp.NewStream(bytes.NewReader(journal), 0) |
||||
// Firstly, resolve the first element as the journal version
|
||||
version, err := r.Uint64() |
||||
if err != nil { |
||||
utils.Logger().Warn().Err(err).Msg("Failed to resolve the journal version") |
||||
return errors.New("failed to resolve journal version") |
||||
} |
||||
if version != journalVersion { |
||||
utils.Logger().Warn().Err(err). |
||||
Uint64("required", journalVersion). |
||||
Uint64("got", version). |
||||
Msg("Discarded the snapshot journal with wrong version") |
||||
|
||||
return errors.New("wrong journal version") |
||||
} |
||||
// Secondly, resolve the disk layer root, ensure it's continuous
|
||||
// with disk layer. Note now we can ensure it's the snapshot journal
|
||||
// correct version, so we expect everything can be resolved properly.
|
||||
var parent common.Hash |
||||
if err := r.Decode(&parent); err != nil { |
||||
return errors.New("missing disk layer root") |
||||
} |
||||
if baseRoot := rawdb.ReadSnapshotRoot(db); baseRoot != parent { |
||||
utils.Logger().Warn().Err(err). |
||||
Interface("disk_root", baseRoot). |
||||
Str("diffs", "unmatched"). |
||||
Msg("Loaded snapshot journal") |
||||
|
||||
return fmt.Errorf("mismatched disk and diff layers") |
||||
} |
||||
for { |
||||
var ( |
||||
root common.Hash |
||||
destructs []journalDestruct |
||||
accounts []journalAccount |
||||
storage []journalStorage |
||||
destructSet = make(map[common.Hash]struct{}) |
||||
accountData = make(map[common.Hash][]byte) |
||||
storageData = make(map[common.Hash]map[common.Hash][]byte) |
||||
) |
||||
// Read the next diff journal entry
|
||||
if err := r.Decode(&root); err != nil { |
||||
// The first read may fail with EOF, marking the end of the journal
|
||||
if errors.Is(err, io.EOF) { |
||||
return nil |
||||
} |
||||
return fmt.Errorf("load diff root: %v", err) |
||||
} |
||||
if err := r.Decode(&destructs); err != nil { |
||||
return fmt.Errorf("load diff destructs: %v", err) |
||||
} |
||||
if err := r.Decode(&accounts); err != nil { |
||||
return fmt.Errorf("load diff accounts: %v", err) |
||||
} |
||||
if err := r.Decode(&storage); err != nil { |
||||
return fmt.Errorf("load diff storage: %v", err) |
||||
} |
||||
for _, entry := range destructs { |
||||
destructSet[entry.Hash] = struct{}{} |
||||
} |
||||
for _, entry := range accounts { |
||||
if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
|
||||
accountData[entry.Hash] = entry.Blob |
||||
} else { |
||||
accountData[entry.Hash] = nil |
||||
} |
||||
} |
||||
for _, entry := range storage { |
||||
slots := make(map[common.Hash][]byte) |
||||
for i, key := range entry.Keys { |
||||
if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
|
||||
slots[key] = entry.Vals[i] |
||||
} else { |
||||
slots[key] = nil |
||||
} |
||||
} |
||||
storageData[entry.Hash] = slots |
||||
} |
||||
if err := callback(parent, root, destructSet, accountData, storageData); err != nil { |
||||
return err |
||||
} |
||||
parent = root |
||||
} |
||||
} |
@ -0,0 +1,53 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import "github.com/ethereum/go-ethereum/metrics" |
||||
|
||||
// Metrics in generation
|
||||
var ( |
||||
snapGeneratedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/generated", nil) |
||||
snapRecoveredAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/recovered", nil) |
||||
snapWipedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/wiped", nil) |
||||
snapMissallAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/missall", nil) |
||||
snapGeneratedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/generated", nil) |
||||
snapRecoveredStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/recovered", nil) |
||||
snapWipedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/wiped", nil) |
||||
snapMissallStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/missall", nil) |
||||
snapDanglingStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/dangling", nil) |
||||
snapSuccessfulRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/success", nil) |
||||
snapFailedRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/failure", nil) |
||||
|
||||
// snapAccountProveCounter measures time spent on the account proving
|
||||
snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil) |
||||
// snapAccountTrieReadCounter measures time spent on the account trie iteration
|
||||
snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil) |
||||
// snapAccountSnapReadCounter measures time spent on the snapshot account iteration
|
||||
snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil) |
||||
// snapAccountWriteCounter measures time spent on writing/updating/deleting accounts
|
||||
snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil) |
||||
// snapStorageProveCounter measures time spent on storage proving
|
||||
snapStorageProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/prove", nil) |
||||
// snapStorageTrieReadCounter measures time spent on the storage trie iteration
|
||||
snapStorageTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/trieread", nil) |
||||
// snapStorageSnapReadCounter measures time spent on the snapshot storage iteration
|
||||
snapStorageSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/snapread", nil) |
||||
// snapStorageWriteCounter measures time spent on writing/updating storages
|
||||
snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil) |
||||
// snapStorageCleanCounter measures time spent on deleting storages
|
||||
snapStorageCleanCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/clean", nil) |
||||
) |
@ -0,0 +1,854 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package snapshot implements a journalled, dynamic state dump.
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"sync" |
||||
"sync/atomic" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
var ( |
||||
snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil) |
||||
snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil) |
||||
snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil) |
||||
snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil) |
||||
snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil) |
||||
|
||||
snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil) |
||||
snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil) |
||||
snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil) |
||||
snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil) |
||||
snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil) |
||||
|
||||
snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil) |
||||
snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil) |
||||
snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil) |
||||
snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil) |
||||
snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil) |
||||
|
||||
snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil) |
||||
snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil) |
||||
snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil) |
||||
snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil) |
||||
snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil) |
||||
|
||||
snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) |
||||
snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) |
||||
|
||||
snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil) |
||||
snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil) |
||||
snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil) |
||||
snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil) |
||||
|
||||
snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil) |
||||
snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil) |
||||
|
||||
snapshotBloomAccountTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil) |
||||
snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil) |
||||
snapshotBloomAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil) |
||||
|
||||
snapshotBloomStorageTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil) |
||||
snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil) |
||||
snapshotBloomStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil) |
||||
|
||||
// ErrSnapshotStale is returned from data accessors if the underlying snapshot
|
||||
// layer had been invalidated due to the chain progressing forward far enough
|
||||
// to not maintain the layer's original state.
|
||||
ErrSnapshotStale = errors.New("snapshot stale") |
||||
|
||||
// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
|
||||
// is being generated currently and the requested data item is not yet in the
|
||||
// range of accounts covered.
|
||||
ErrNotCoveredYet = errors.New("not covered yet") |
||||
|
||||
// ErrNotConstructed is returned if the callers want to iterate the snapshot
|
||||
// while the generation is not finished yet.
|
||||
ErrNotConstructed = errors.New("snapshot is not constructed") |
||||
|
||||
// errSnapshotCycle is returned if a snapshot is attempted to be inserted
|
||||
// that forms a cycle in the snapshot tree.
|
||||
errSnapshotCycle = errors.New("snapshot cycle") |
||||
) |
||||
|
||||
// Snapshot represents the functionality supported by a snapshot storage layer.
|
||||
type Snapshot interface { |
||||
// Root returns the root hash for which this snapshot was made.
|
||||
Root() common.Hash |
||||
|
||||
// Account directly retrieves the account associated with a particular hash in
|
||||
// the snapshot slim data format.
|
||||
Account(hash common.Hash) (*Account, error) |
||||
|
||||
// AccountRLP directly retrieves the account RLP associated with a particular
|
||||
// hash in the snapshot slim data format.
|
||||
AccountRLP(hash common.Hash) ([]byte, error) |
||||
|
||||
// Storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account.
|
||||
Storage(accountHash, storageHash common.Hash) ([]byte, error) |
||||
} |
||||
|
||||
// snapshot is the internal version of the snapshot data layer that supports some
|
||||
// additional methods compared to the public API.
|
||||
type snapshot interface { |
||||
Snapshot |
||||
|
||||
// Parent returns the subsequent layer of a snapshot, or nil if the base was
|
||||
// reached.
|
||||
//
|
||||
// Note, the method is an internal helper to avoid type switching between the
|
||||
// disk and diff layers. There is no locking involved.
|
||||
Parent() snapshot |
||||
|
||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||
// the specified data items.
|
||||
//
|
||||
// Note, the maps are retained by the method to avoid copying everything.
|
||||
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer |
||||
|
||||
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
||||
// This is meant to be used during shutdown to persist the snapshot without
|
||||
// flattening everything down (bad for reorgs).
|
||||
Journal(buffer *bytes.Buffer) (common.Hash, error) |
||||
|
||||
// Stale return whether this layer has become stale (was flattened across) or
|
||||
// if it's still live.
|
||||
Stale() bool |
||||
|
||||
// AccountIterator creates an account iterator over an arbitrary layer.
|
||||
AccountIterator(seek common.Hash) AccountIterator |
||||
|
||||
// StorageIterator creates a storage iterator over an arbitrary layer.
|
||||
StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) |
||||
} |
||||
|
||||
// Config includes the configurations for snapshots.
|
||||
type Config struct { |
||||
CacheSize int // Megabytes permitted to use for read caches
|
||||
Recovery bool // Indicator that the snapshots is in the recovery mode
|
||||
NoBuild bool // Indicator that the snapshots generation is disallowed
|
||||
AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously
|
||||
} |
||||
|
||||
// Tree is an Ethereum state snapshot tree. It consists of one persistent base
|
||||
// layer backed by a key-value store, on top of which arbitrarily many in-memory
|
||||
// diff layers are topped. The memory diffs can form a tree with branching, but
|
||||
// the disk layer is singleton and common to all. If a reorg goes deeper than the
|
||||
// disk layer, everything needs to be deleted.
|
||||
//
|
||||
// The goal of a state snapshot is twofold: to allow direct access to account and
|
||||
// storage data to avoid expensive multi-level trie lookups; and to allow sorted,
|
||||
// cheap iteration of the account/storage tries for sync aid.
|
||||
type Tree struct { |
||||
config Config // Snapshots configurations
|
||||
diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
|
||||
triedb *trie.Database // In-memory cache to access the trie through
|
||||
layers map[common.Hash]snapshot // Collection of all known layers
|
||||
lock sync.RWMutex |
||||
|
||||
// Test hooks
|
||||
onFlatten func() // Hook invoked when the bottom most diff layers are flattened
|
||||
} |
||||
|
||||
// New attempts to load an already existing snapshot from a persistent key-value
|
||||
// store (with a number of memory layers from a journal), ensuring that the head
|
||||
// of the snapshot matches the expected one.
|
||||
//
|
||||
// If the snapshot is missing or the disk layer is broken, the snapshot will be
|
||||
// reconstructed using both the existing data and the state trie.
|
||||
// The repair happens on a background thread.
|
||||
//
|
||||
// If the memory layers in the journal do not match the disk layer (e.g. there is
|
||||
// a gap) or the journal is missing, there are two repair cases:
|
||||
//
|
||||
// - if the 'recovery' parameter is true, memory diff-layers and the disk-layer
|
||||
// will all be kept. This case happens when the snapshot is 'ahead' of the
|
||||
// state trie.
|
||||
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
||||
// a background thread.
|
||||
func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) { |
||||
// Create a new, empty snapshot tree
|
||||
snap := &Tree{ |
||||
config: config, |
||||
diskdb: diskdb, |
||||
triedb: triedb, |
||||
layers: make(map[common.Hash]snapshot), |
||||
} |
||||
// Attempt to load a previously persisted snapshot and rebuild one if failed
|
||||
head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild) |
||||
if disabled { |
||||
utils.Logger().Warn().Err(err).Msg("Snapshot maintenance disabled (syncing)") |
||||
return snap, nil |
||||
} |
||||
// Create the building waiter iff the background generation is allowed
|
||||
if !config.NoBuild && !config.AsyncBuild { |
||||
defer snap.waitBuild() |
||||
} |
||||
if err != nil { |
||||
utils.Logger().Warn().Err(err).Msg("Failed to load snapshot") |
||||
if !config.NoBuild { |
||||
snap.Rebuild(root) |
||||
return snap, nil |
||||
} |
||||
return nil, err // Bail out the error, don't rebuild automatically.
|
||||
} |
||||
// Existing snapshot loaded, seed all the layers
|
||||
for head != nil { |
||||
snap.layers[head.Root()] = head |
||||
head = head.Parent() |
||||
} |
||||
return snap, nil |
||||
} |
||||
|
||||
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
||||
// to be used by tests to ensure we're testing what we believe we are.
|
||||
func (t *Tree) waitBuild() { |
||||
// Find the rebuild termination channel
|
||||
var done chan struct{} |
||||
|
||||
t.lock.RLock() |
||||
for _, layer := range t.layers { |
||||
if layer, ok := layer.(*diskLayer); ok { |
||||
done = layer.genPending |
||||
break |
||||
} |
||||
} |
||||
t.lock.RUnlock() |
||||
|
||||
// Wait until the snapshot is generated
|
||||
if done != nil { |
||||
<-done |
||||
} |
||||
} |
||||
|
||||
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
||||
// layers in memory and marks snapshots disabled globally. In order to resume
|
||||
// the snapshot functionality, the caller must invoke Rebuild.
|
||||
func (t *Tree) Disable() { |
||||
// Interrupt any live snapshot layers
|
||||
t.lock.Lock() |
||||
defer t.lock.Unlock() |
||||
|
||||
for _, layer := range t.layers { |
||||
switch layer := layer.(type) { |
||||
case *diskLayer: |
||||
// If the base layer is generating, abort it
|
||||
if layer.genAbort != nil { |
||||
abort := make(chan *generatorStats) |
||||
layer.genAbort <- abort |
||||
<-abort |
||||
} |
||||
// Layer should be inactive now, mark it as stale
|
||||
layer.lock.Lock() |
||||
layer.stale = true |
||||
layer.lock.Unlock() |
||||
|
||||
case *diffLayer: |
||||
// If the layer is a simple diff, simply mark as stale
|
||||
layer.lock.Lock() |
||||
atomic.StoreUint32(&layer.stale, 1) |
||||
layer.lock.Unlock() |
||||
|
||||
default: |
||||
panic(fmt.Sprintf("unknown layer type: %T", layer)) |
||||
} |
||||
} |
||||
t.layers = map[common.Hash]snapshot{} |
||||
|
||||
// Delete all snapshot liveness information from the database
|
||||
batch := t.diskdb.NewBatch() |
||||
|
||||
rawdb.WriteSnapshotDisabled(batch) |
||||
rawdb.DeleteSnapshotRoot(batch) |
||||
rawdb.DeleteSnapshotJournal(batch) |
||||
rawdb.DeleteSnapshotGenerator(batch) |
||||
rawdb.DeleteSnapshotRecoveryNumber(batch) |
||||
// Note, we don't delete the sync progress
|
||||
|
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Fatal().Err(err).Msg("Failed to disable snapshots") |
||||
} |
||||
} |
||||
|
||||
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
|
||||
// snapshot is maintained for that block.
|
||||
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot { |
||||
t.lock.RLock() |
||||
defer t.lock.RUnlock() |
||||
|
||||
return t.layers[blockRoot] |
||||
} |
||||
|
||||
// Snapshots returns all visited layers from the topmost layer with specific
|
||||
// root and traverses downward. The layer amount is limited by the given number.
|
||||
// If nodisk is set, then disk layer is excluded.
|
||||
func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot { |
||||
t.lock.RLock() |
||||
defer t.lock.RUnlock() |
||||
|
||||
if limits == 0 { |
||||
return nil |
||||
} |
||||
layer := t.layers[root] |
||||
if layer == nil { |
||||
return nil |
||||
} |
||||
var ret []Snapshot |
||||
for { |
||||
if _, isdisk := layer.(*diskLayer); isdisk && nodisk { |
||||
break |
||||
} |
||||
ret = append(ret, layer) |
||||
limits -= 1 |
||||
if limits == 0 { |
||||
break |
||||
} |
||||
parent := layer.Parent() |
||||
if parent == nil { |
||||
break |
||||
} |
||||
layer = parent |
||||
} |
||||
return ret |
||||
} |
||||
|
||||
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
||||
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
||||
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { |
||||
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
||||
// special case that can only happen for Clique networks where empty blocks
|
||||
// don't modify the state (0 block subsidy).
|
||||
//
|
||||
// Although we could silently ignore this internally, it should be the caller's
|
||||
// responsibility to avoid even attempting to insert such a snapshot.
|
||||
if blockRoot == parentRoot { |
||||
return errSnapshotCycle |
||||
} |
||||
// Generate a new snapshot on top of the parent
|
||||
parent := t.Snapshot(parentRoot) |
||||
if parent == nil { |
||||
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) |
||||
} |
||||
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage) |
||||
|
||||
// Save the new snapshot for later
|
||||
t.lock.Lock() |
||||
defer t.lock.Unlock() |
||||
|
||||
t.layers[snap.root] = snap |
||||
return nil |
||||
} |
||||
|
||||
// Cap traverses downwards the snapshot tree from a head block hash until the
|
||||
// number of allowed layers are crossed. All layers beyond the permitted number
|
||||
// are flattened downwards.
|
||||
//
|
||||
// Note, the final diff layer count in general will be one more than the amount
|
||||
// requested. This happens because the bottom-most diff layer is the accumulator
|
||||
// which may or may not overflow and cascade to disk. Since this last layer's
|
||||
// survival is only known *after* capping, we need to omit it from the count if
|
||||
// we want to ensure that *at least* the requested number of diff layers remain.
|
||||
func (t *Tree) Cap(root common.Hash, layers int) error { |
||||
// Retrieve the head snapshot to cap from
|
||||
snap := t.Snapshot(root) |
||||
if snap == nil { |
||||
return fmt.Errorf("snapshot [%#x] missing", root) |
||||
} |
||||
diff, ok := snap.(*diffLayer) |
||||
if !ok { |
||||
return fmt.Errorf("snapshot [%#x] is disk layer", root) |
||||
} |
||||
// If the generator is still running, use a more aggressive cap
|
||||
diff.origin.lock.RLock() |
||||
if diff.origin.genMarker != nil && layers > 8 { |
||||
layers = 8 |
||||
} |
||||
diff.origin.lock.RUnlock() |
||||
|
||||
// Run the internal capping and discard all stale layers
|
||||
t.lock.Lock() |
||||
defer t.lock.Unlock() |
||||
|
||||
// Flattening the bottom-most diff layer requires special casing since there's
|
||||
// no child to rewire to the grandparent. In that case we can fake a temporary
|
||||
// child for the capping and then remove it.
|
||||
if layers == 0 { |
||||
// If full commit was requested, flatten the diffs and merge onto disk
|
||||
diff.lock.RLock() |
||||
base := diffToDisk(diff.flatten().(*diffLayer)) |
||||
diff.lock.RUnlock() |
||||
|
||||
// Replace the entire snapshot tree with the flat base
|
||||
t.layers = map[common.Hash]snapshot{base.root: base} |
||||
return nil |
||||
} |
||||
persisted := t.cap(diff, layers) |
||||
|
||||
// Remove any layer that is stale or links into a stale layer
|
||||
children := make(map[common.Hash][]common.Hash) |
||||
for root, snap := range t.layers { |
||||
if diff, ok := snap.(*diffLayer); ok { |
||||
parent := diff.parent.Root() |
||||
children[parent] = append(children[parent], root) |
||||
} |
||||
} |
||||
var remove func(root common.Hash) |
||||
remove = func(root common.Hash) { |
||||
delete(t.layers, root) |
||||
for _, child := range children[root] { |
||||
remove(child) |
||||
} |
||||
delete(children, root) |
||||
} |
||||
for root, snap := range t.layers { |
||||
if snap.Stale() { |
||||
remove(root) |
||||
} |
||||
} |
||||
// If the disk layer was modified, regenerate all the cumulative blooms
|
||||
if persisted != nil { |
||||
var rebloom func(root common.Hash) |
||||
rebloom = func(root common.Hash) { |
||||
if diff, ok := t.layers[root].(*diffLayer); ok { |
||||
diff.rebloom(persisted) |
||||
} |
||||
for _, child := range children[root] { |
||||
rebloom(child) |
||||
} |
||||
} |
||||
rebloom(persisted.root) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// cap traverses downwards the diff tree until the number of allowed layers are
|
||||
// crossed. All diffs beyond the permitted number are flattened downwards. If the
|
||||
// layer limit is reached, memory cap is also enforced (but not before).
|
||||
//
|
||||
// The method returns the new disk layer if diffs were persisted into it.
|
||||
//
|
||||
// Note, the final diff layer count in general will be one more than the amount
|
||||
// requested. This happens because the bottom-most diff layer is the accumulator
|
||||
// which may or may not overflow and cascade to disk. Since this last layer's
|
||||
// survival is only known *after* capping, we need to omit it from the count if
|
||||
// we want to ensure that *at least* the requested number of diff layers remain.
|
||||
func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { |
||||
// Dive until we run out of layers or reach the persistent database
|
||||
for i := 0; i < layers-1; i++ { |
||||
// If we still have diff layers below, continue down
|
||||
if parent, ok := diff.parent.(*diffLayer); ok { |
||||
diff = parent |
||||
} else { |
||||
// Diff stack too shallow, return without modifications
|
||||
return nil |
||||
} |
||||
} |
||||
// We're out of layers, flatten anything below, stopping if it's the disk or if
|
||||
// the memory limit is not yet exceeded.
|
||||
switch parent := diff.parent.(type) { |
||||
case *diskLayer: |
||||
return nil |
||||
|
||||
case *diffLayer: |
||||
// Hold the write lock until the flattened parent is linked correctly.
|
||||
// Otherwise, the stale layer may be accessed by external reads in the
|
||||
// meantime.
|
||||
diff.lock.Lock() |
||||
defer diff.lock.Unlock() |
||||
|
||||
// Flatten the parent into the grandparent. The flattening internally obtains a
|
||||
// write lock on grandparent.
|
||||
flattened := parent.flatten().(*diffLayer) |
||||
t.layers[flattened.root] = flattened |
||||
|
||||
// Invoke the hook if it's registered. Ugly hack.
|
||||
if t.onFlatten != nil { |
||||
t.onFlatten() |
||||
} |
||||
diff.parent = flattened |
||||
if flattened.memory < aggregatorMemoryLimit { |
||||
// Accumulator layer is smaller than the limit, so we can abort, unless
|
||||
// there's a snapshot being generated currently. In that case, the trie
|
||||
// will move from underneath the generator so we **must** merge all the
|
||||
// partial data down into the snapshot and restart the generation.
|
||||
if flattened.parent.(*diskLayer).genAbort == nil { |
||||
return nil |
||||
} |
||||
} |
||||
default: |
||||
panic(fmt.Sprintf("unknown data layer: %T", parent)) |
||||
} |
||||
// If the bottom-most layer is larger than our memory cap, persist to disk
|
||||
bottom := diff.parent.(*diffLayer) |
||||
|
||||
bottom.lock.RLock() |
||||
base := diffToDisk(bottom) |
||||
bottom.lock.RUnlock() |
||||
|
||||
t.layers[base.root] = base |
||||
diff.parent = base |
||||
return base |
||||
} |
||||
|
||||
// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
|
||||
// it. The method will panic if called onto a non-bottom-most diff layer.
|
||||
//
|
||||
// The disk layer persistence should be operated in an atomic way. All updates should
|
||||
// be discarded if the whole transition if not finished.
|
||||
func diffToDisk(bottom *diffLayer) *diskLayer { |
||||
var ( |
||||
base = bottom.parent.(*diskLayer) |
||||
batch = base.diskdb.NewBatch() |
||||
stats *generatorStats |
||||
) |
||||
// If the disk layer is running a snapshot generator, abort it
|
||||
if base.genAbort != nil { |
||||
abort := make(chan *generatorStats) |
||||
base.genAbort <- abort |
||||
stats = <-abort |
||||
} |
||||
// Put the deletion in the batch writer, flush all updates in the final step.
|
||||
rawdb.DeleteSnapshotRoot(batch) |
||||
|
||||
// Mark the original base as stale as we're going to create a new wrapper
|
||||
base.lock.Lock() |
||||
if base.stale { |
||||
panic("parent disk layer is stale") // we've committed into the same base from two children, boo
|
||||
} |
||||
base.stale = true |
||||
base.lock.Unlock() |
||||
|
||||
// Destroy all the destructed accounts from the database
|
||||
for hash := range bottom.destructSet { |
||||
// Skip any account not covered yet by the snapshot
|
||||
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 { |
||||
continue |
||||
} |
||||
// Remove all storage slots
|
||||
rawdb.DeleteAccountSnapshot(batch, hash) |
||||
base.cache.Set(hash[:], nil) |
||||
|
||||
it := rawdb.IterateStorageSnapshots(base.diskdb, hash) |
||||
for it.Next() { |
||||
key := it.Key() |
||||
batch.Delete(key) |
||||
base.cache.Del(key[1:]) |
||||
snapshotFlushStorageItemMeter.Mark(1) |
||||
|
||||
// Ensure we don't delete too much data blindly (contract can be
|
||||
// huge). It's ok to flush, the root will go missing in case of a
|
||||
// crash and we'll detect and regenerate the snapshot.
|
||||
if batch.ValueSize() > ethdb.IdealBatchSize { |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Fatal().Err(err).Msg("Failed to write storage deletions") |
||||
} |
||||
batch.Reset() |
||||
} |
||||
} |
||||
it.Release() |
||||
} |
||||
// Push all updated accounts into the database
|
||||
for hash, data := range bottom.accountData { |
||||
// Skip any account not covered yet by the snapshot
|
||||
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 { |
||||
continue |
||||
} |
||||
// Push the account to disk
|
||||
rawdb.WriteAccountSnapshot(batch, hash, data) |
||||
base.cache.Set(hash[:], data) |
||||
snapshotCleanAccountWriteMeter.Mark(int64(len(data))) |
||||
|
||||
snapshotFlushAccountItemMeter.Mark(1) |
||||
snapshotFlushAccountSizeMeter.Mark(int64(len(data))) |
||||
|
||||
// Ensure we don't write too much data blindly. It's ok to flush, the
|
||||
// root will go missing in case of a crash and we'll detect and regen
|
||||
// the snapshot.
|
||||
if batch.ValueSize() > ethdb.IdealBatchSize { |
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Fatal().Err(err).Msg("Failed to write storage deletions") |
||||
} |
||||
batch.Reset() |
||||
} |
||||
} |
||||
// Push all the storage slots into the database
|
||||
for accountHash, storage := range bottom.storageData { |
||||
// Skip any account not covered yet by the snapshot
|
||||
if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 { |
||||
continue |
||||
} |
||||
// Generation might be mid-account, track that case too
|
||||
midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength]) |
||||
|
||||
for storageHash, data := range storage { |
||||
// Skip any slot not covered yet by the snapshot
|
||||
if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 { |
||||
continue |
||||
} |
||||
if len(data) > 0 { |
||||
rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data) |
||||
base.cache.Set(append(accountHash[:], storageHash[:]...), data) |
||||
snapshotCleanStorageWriteMeter.Mark(int64(len(data))) |
||||
} else { |
||||
rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash) |
||||
base.cache.Set(append(accountHash[:], storageHash[:]...), nil) |
||||
} |
||||
snapshotFlushStorageItemMeter.Mark(1) |
||||
snapshotFlushStorageSizeMeter.Mark(int64(len(data))) |
||||
} |
||||
} |
||||
// Update the snapshot block marker and write any remainder data
|
||||
rawdb.WriteSnapshotRoot(batch, bottom.root) |
||||
|
||||
// Write out the generator progress marker and report
|
||||
journalProgress(batch, base.genMarker, stats) |
||||
|
||||
// Flush all the updates in the single db operation. Ensure the
|
||||
// disk layer transition is atomic.
|
||||
if err := batch.Write(); err != nil { |
||||
utils.Logger().Fatal().Err(err).Msg("Failed to write leftover snapshot") |
||||
} |
||||
utils.Logger().Debug().Interface("root", bottom.root).Bool("complete", base.genMarker == nil).Msg("Journalled disk layer") |
||||
res := &diskLayer{ |
||||
root: bottom.root, |
||||
cache: base.cache, |
||||
diskdb: base.diskdb, |
||||
triedb: base.triedb, |
||||
genMarker: base.genMarker, |
||||
genPending: base.genPending, |
||||
} |
||||
// If snapshot generation hasn't finished yet, port over all the starts and
|
||||
// continue where the previous round left off.
|
||||
//
|
||||
// Note, the `base.genAbort` comparison is not used normally, it's checked
|
||||
// to allow the tests to play with the marker without triggering this path.
|
||||
if base.genMarker != nil && base.genAbort != nil { |
||||
res.genMarker = base.genMarker |
||||
res.genAbort = make(chan chan *generatorStats) |
||||
go res.generate(stats) |
||||
} |
||||
return res |
||||
} |
||||
|
||||
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
||||
// This is meant to be used during shutdown to persist the snapshot without
|
||||
// flattening everything down (bad for reorgs).
|
||||
//
|
||||
// The method returns the root hash of the base layer that needs to be persisted
|
||||
// to disk as a trie too to allow continuing any pending generation op.
|
||||
func (t *Tree) Journal(root common.Hash) (common.Hash, error) { |
||||
// Retrieve the head snapshot to journal from var snap snapshot
|
||||
snap := t.Snapshot(root) |
||||
if snap == nil { |
||||
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root) |
||||
} |
||||
// Run the journaling
|
||||
t.lock.Lock() |
||||
defer t.lock.Unlock() |
||||
|
||||
// Firstly write out the metadata of journal
|
||||
journal := new(bytes.Buffer) |
||||
if err := rlp.Encode(journal, journalVersion); err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
diskroot := t.diskRoot() |
||||
if diskroot == (common.Hash{}) { |
||||
return common.Hash{}, errors.New("invalid disk root") |
||||
} |
||||
// Secondly write out the disk layer root, ensure the
|
||||
// diff journal is continuous with disk.
|
||||
if err := rlp.Encode(journal, diskroot); err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
// Finally write out the journal of each layer in reverse order.
|
||||
base, err := snap.(snapshot).Journal(journal) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
// Store the journal into the database and return
|
||||
rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes()) |
||||
return base, nil |
||||
} |
||||
|
||||
// Rebuild wipes all available snapshot data from the persistent database and
|
||||
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
||||
// generator with the given root hash.
|
||||
func (t *Tree) Rebuild(root common.Hash) { |
||||
t.lock.Lock() |
||||
defer t.lock.Unlock() |
||||
|
||||
// Firstly delete any recovery flag in the database. Because now we are
|
||||
// building a brand new snapshot. Also reenable the snapshot feature.
|
||||
rawdb.DeleteSnapshotRecoveryNumber(t.diskdb) |
||||
rawdb.DeleteSnapshotDisabled(t.diskdb) |
||||
|
||||
// Iterate over and mark all layers stale
|
||||
for _, layer := range t.layers { |
||||
switch layer := layer.(type) { |
||||
case *diskLayer: |
||||
// If the base layer is generating, abort it and save
|
||||
if layer.genAbort != nil { |
||||
abort := make(chan *generatorStats) |
||||
layer.genAbort <- abort |
||||
<-abort |
||||
} |
||||
// Layer should be inactive now, mark it as stale
|
||||
layer.lock.Lock() |
||||
layer.stale = true |
||||
layer.lock.Unlock() |
||||
|
||||
case *diffLayer: |
||||
// If the layer is a simple diff, simply mark as stale
|
||||
layer.lock.Lock() |
||||
atomic.StoreUint32(&layer.stale, 1) |
||||
layer.lock.Unlock() |
||||
|
||||
default: |
||||
panic(fmt.Sprintf("unknown layer type: %T", layer)) |
||||
} |
||||
} |
||||
// Start generating a new snapshot from scratch on a background thread. The
|
||||
// generator will run a wiper first if there's not one running right now.
|
||||
utils.Logger().Info().Msg("Rebuilding state snapshot") |
||||
t.layers = map[common.Hash]snapshot{ |
||||
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root), |
||||
} |
||||
} |
||||
|
||||
// AccountIterator creates a new account iterator for the specified root hash and
|
||||
// seeks to a starting account hash.
|
||||
func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) { |
||||
ok, err := t.generating() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if ok { |
||||
return nil, ErrNotConstructed |
||||
} |
||||
return newFastAccountIterator(t, root, seek) |
||||
} |
||||
|
||||
// StorageIterator creates a new storage iterator for the specified root hash and
|
||||
// account. The iterator will be move to the specific start position.
|
||||
func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) { |
||||
ok, err := t.generating() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if ok { |
||||
return nil, ErrNotConstructed |
||||
} |
||||
return newFastStorageIterator(t, root, account, seek) |
||||
} |
||||
|
||||
// Verify iterates the whole state(all the accounts as well as the corresponding storages)
|
||||
// with the specific root and compares the re-computed hash with the original one.
|
||||
func (t *Tree) Verify(root common.Hash) error { |
||||
acctIt, err := t.AccountIterator(root, common.Hash{}) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer acctIt.Release() |
||||
|
||||
got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { |
||||
storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
defer storageIt.Release() |
||||
|
||||
hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
return hash, nil |
||||
}, newGenerateStats(), true) |
||||
|
||||
if err != nil { |
||||
return err |
||||
} |
||||
if got != root { |
||||
return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// disklayer is an internal helper function to return the disk layer.
|
||||
// The lock of snapTree is assumed to be held already.
|
||||
func (t *Tree) disklayer() *diskLayer { |
||||
var snap snapshot |
||||
for _, s := range t.layers { |
||||
snap = s |
||||
break |
||||
} |
||||
if snap == nil { |
||||
return nil |
||||
} |
||||
switch layer := snap.(type) { |
||||
case *diskLayer: |
||||
return layer |
||||
case *diffLayer: |
||||
return layer.origin |
||||
default: |
||||
panic(fmt.Sprintf("%T: undefined layer", snap)) |
||||
} |
||||
} |
||||
|
||||
// diskRoot is a internal helper function to return the disk layer root.
|
||||
// The lock of snapTree is assumed to be held already.
|
||||
func (t *Tree) diskRoot() common.Hash { |
||||
disklayer := t.disklayer() |
||||
if disklayer == nil { |
||||
return common.Hash{} |
||||
} |
||||
return disklayer.Root() |
||||
} |
||||
|
||||
// generating is an internal helper function which reports whether the snapshot
|
||||
// is still under the construction.
|
||||
func (t *Tree) generating() (bool, error) { |
||||
t.lock.Lock() |
||||
defer t.lock.Unlock() |
||||
|
||||
layer := t.disklayer() |
||||
if layer == nil { |
||||
return false, errors.New("disk layer is missing") |
||||
} |
||||
layer.lock.RLock() |
||||
defer layer.lock.RUnlock() |
||||
return layer.genMarker != nil, nil |
||||
} |
||||
|
||||
// DiskRoot is a external helper function to return the disk layer root.
|
||||
func (t *Tree) DiskRoot() common.Hash { |
||||
t.lock.Lock() |
||||
defer t.lock.Unlock() |
||||
|
||||
return t.diskRoot() |
||||
} |
@ -0,0 +1,488 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
crand "crypto/rand" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"math/big" |
||||
"math/rand" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
) |
||||
|
||||
// randomHash generates a random blob of data and returns it as a hash.
|
||||
func randomHash() common.Hash { |
||||
var hash common.Hash |
||||
if n, err := crand.Read(hash[:]); n != common.HashLength || err != nil { |
||||
panic(err) |
||||
} |
||||
return hash |
||||
} |
||||
|
||||
// randomAccount generates a random account and returns it RLP encoded.
|
||||
func randomAccount() []byte { |
||||
root := randomHash() |
||||
a := Account{ |
||||
Balance: big.NewInt(rand.Int63()), |
||||
Nonce: rand.Uint64(), |
||||
Root: root[:], |
||||
CodeHash: types.EmptyCodeHash[:], |
||||
} |
||||
data, _ := rlp.EncodeToBytes(a) |
||||
return data |
||||
} |
||||
|
||||
// randomAccountSet generates a set of random accounts with the given strings as
|
||||
// the account address hashes.
|
||||
func randomAccountSet(hashes ...string) map[common.Hash][]byte { |
||||
accounts := make(map[common.Hash][]byte) |
||||
for _, hash := range hashes { |
||||
accounts[common.HexToHash(hash)] = randomAccount() |
||||
} |
||||
return accounts |
||||
} |
||||
|
||||
// randomStorageSet generates a set of random slots with the given strings as
|
||||
// the slot addresses.
|
||||
func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte { |
||||
storages := make(map[common.Hash]map[common.Hash][]byte) |
||||
for index, account := range accounts { |
||||
storages[common.HexToHash(account)] = make(map[common.Hash][]byte) |
||||
|
||||
if index < len(hashes) { |
||||
hashes := hashes[index] |
||||
for _, hash := range hashes { |
||||
storages[common.HexToHash(account)][common.HexToHash(hash)] = randomHash().Bytes() |
||||
} |
||||
} |
||||
if index < len(nilStorage) { |
||||
nils := nilStorage[index] |
||||
for _, hash := range nils { |
||||
storages[common.HexToHash(account)][common.HexToHash(hash)] = nil |
||||
} |
||||
} |
||||
} |
||||
return storages |
||||
} |
||||
|
||||
// Tests that if a disk layer becomes stale, no active external references will
|
||||
// be returned with junk data. This version of the test flattens every diff layer
|
||||
// to check internal corner case around the bottom-most memory accumulator.
|
||||
func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { |
||||
// Create an empty base layer and a snapshot tree out of it
|
||||
base := &diskLayer{ |
||||
diskdb: rawdb.NewMemoryDatabase(), |
||||
root: common.HexToHash("0x01"), |
||||
cache: fastcache.New(1024 * 500), |
||||
} |
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
base.root: base, |
||||
}, |
||||
} |
||||
// Retrieve a reference to the base and commit a diff on top
|
||||
ref := snaps.Snapshot(base.root) |
||||
|
||||
accounts := map[common.Hash][]byte{ |
||||
common.HexToHash("0xa1"): randomAccount(), |
||||
} |
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { |
||||
t.Fatalf("failed to create a diff layer: %v", err) |
||||
} |
||||
if n := len(snaps.layers); n != 2 { |
||||
t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 2) |
||||
} |
||||
// Commit the diff layer onto the disk and ensure it's persisted
|
||||
if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil { |
||||
t.Fatalf("failed to merge diff layer onto disk: %v", err) |
||||
} |
||||
// Since the base layer was modified, ensure that data retrieval on the external reference fail
|
||||
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { |
||||
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) |
||||
} |
||||
if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { |
||||
t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) |
||||
} |
||||
if n := len(snaps.layers); n != 1 { |
||||
t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 1) |
||||
fmt.Println(snaps.layers) |
||||
} |
||||
} |
||||
|
||||
// Tests that if a disk layer becomes stale, no active external references will
|
||||
// be returned with junk data. This version of the test retains the bottom diff
|
||||
// layer to check the usual mode of operation where the accumulator is retained.
|
||||
func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { |
||||
// Create an empty base layer and a snapshot tree out of it
|
||||
base := &diskLayer{ |
||||
diskdb: rawdb.NewMemoryDatabase(), |
||||
root: common.HexToHash("0x01"), |
||||
cache: fastcache.New(1024 * 500), |
||||
} |
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
base.root: base, |
||||
}, |
||||
} |
||||
// Retrieve a reference to the base and commit two diffs on top
|
||||
ref := snaps.Snapshot(base.root) |
||||
|
||||
accounts := map[common.Hash][]byte{ |
||||
common.HexToHash("0xa1"): randomAccount(), |
||||
} |
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { |
||||
t.Fatalf("failed to create a diff layer: %v", err) |
||||
} |
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { |
||||
t.Fatalf("failed to create a diff layer: %v", err) |
||||
} |
||||
if n := len(snaps.layers); n != 3 { |
||||
t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3) |
||||
} |
||||
// Commit the diff layer onto the disk and ensure it's persisted
|
||||
defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) |
||||
aggregatorMemoryLimit = 0 |
||||
|
||||
if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil { |
||||
t.Fatalf("failed to merge accumulator onto disk: %v", err) |
||||
} |
||||
// Since the base layer was modified, ensure that data retrievals on the external reference fail
|
||||
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { |
||||
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) |
||||
} |
||||
if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { |
||||
t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) |
||||
} |
||||
if n := len(snaps.layers); n != 2 { |
||||
t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2) |
||||
fmt.Println(snaps.layers) |
||||
} |
||||
} |
||||
|
||||
// Tests that if a diff layer becomes stale, no active external references will
|
||||
// be returned with junk data. This version of the test retains the bottom diff
|
||||
// layer to check the usual mode of operation where the accumulator is retained.
|
||||
func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { |
||||
// Create an empty base layer and a snapshot tree out of it
|
||||
base := &diskLayer{ |
||||
diskdb: rawdb.NewMemoryDatabase(), |
||||
root: common.HexToHash("0x01"), |
||||
cache: fastcache.New(1024 * 500), |
||||
} |
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
base.root: base, |
||||
}, |
||||
} |
||||
// Commit three diffs on top and retrieve a reference to the bottommost
|
||||
accounts := map[common.Hash][]byte{ |
||||
common.HexToHash("0xa1"): randomAccount(), |
||||
} |
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { |
||||
t.Fatalf("failed to create a diff layer: %v", err) |
||||
} |
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { |
||||
t.Fatalf("failed to create a diff layer: %v", err) |
||||
} |
||||
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil { |
||||
t.Fatalf("failed to create a diff layer: %v", err) |
||||
} |
||||
if n := len(snaps.layers); n != 4 { |
||||
t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 4) |
||||
} |
||||
ref := snaps.Snapshot(common.HexToHash("0x02")) |
||||
|
||||
// Doing a Cap operation with many allowed layers should be a no-op
|
||||
exp := len(snaps.layers) |
||||
if err := snaps.Cap(common.HexToHash("0x04"), 2000); err != nil { |
||||
t.Fatalf("failed to flatten diff layer into accumulator: %v", err) |
||||
} |
||||
if got := len(snaps.layers); got != exp { |
||||
t.Errorf("layers modified, got %d exp %d", got, exp) |
||||
} |
||||
// Flatten the diff layer into the bottom accumulator
|
||||
if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil { |
||||
t.Fatalf("failed to flatten diff layer into accumulator: %v", err) |
||||
} |
||||
// Since the accumulator diff layer was modified, ensure that data retrievals on the external reference fail
|
||||
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { |
||||
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) |
||||
} |
||||
if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { |
||||
t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) |
||||
} |
||||
if n := len(snaps.layers); n != 3 { |
||||
t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 3) |
||||
fmt.Println(snaps.layers) |
||||
} |
||||
} |
||||
|
||||
// TestPostCapBasicDataAccess tests some functionality regarding capping/flattening.
|
||||
func TestPostCapBasicDataAccess(t *testing.T) { |
||||
// setAccount is a helper to construct a random account entry and assign it to
|
||||
// an account slot in a snapshot
|
||||
setAccount := func(accKey string) map[common.Hash][]byte { |
||||
return map[common.Hash][]byte{ |
||||
common.HexToHash(accKey): randomAccount(), |
||||
} |
||||
} |
||||
// Create a starting base layer and a snapshot tree out of it
|
||||
base := &diskLayer{ |
||||
diskdb: rawdb.NewMemoryDatabase(), |
||||
root: common.HexToHash("0x01"), |
||||
cache: fastcache.New(1024 * 500), |
||||
} |
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
base.root: base, |
||||
}, |
||||
} |
||||
// The lowest difflayer
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) |
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) |
||||
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil) |
||||
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) |
||||
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) |
||||
|
||||
// checkExist verifies if an account exists in a snapshot
|
||||
checkExist := func(layer *diffLayer, key string) error { |
||||
if data, _ := layer.Account(common.HexToHash(key)); data == nil { |
||||
return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key)) |
||||
} |
||||
return nil |
||||
} |
||||
// shouldErr checks that an account access errors as expected
|
||||
shouldErr := func(layer *diffLayer, key string) error { |
||||
if data, err := layer.Account(common.HexToHash(key)); err == nil { |
||||
return fmt.Errorf("expected error, got data %x", data) |
||||
} |
||||
return nil |
||||
} |
||||
// check basics
|
||||
snap := snaps.Snapshot(common.HexToHash("0xb3")).(*diffLayer) |
||||
|
||||
if err := checkExist(snap, "0xa1"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
if err := checkExist(snap, "0xb2"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
if err := checkExist(snap, "0xb3"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
// Cap to a bad root should fail
|
||||
if err := snaps.Cap(common.HexToHash("0x1337"), 0); err == nil { |
||||
t.Errorf("expected error, got none") |
||||
} |
||||
// Now, merge the a-chain
|
||||
snaps.Cap(common.HexToHash("0xa3"), 0) |
||||
|
||||
// At this point, a2 got merged into a1. Thus, a1 is now modified, and as a1 is
|
||||
// the parent of b2, b2 should no longer be able to iterate into parent.
|
||||
|
||||
// These should still be accessible
|
||||
if err := checkExist(snap, "0xb2"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
if err := checkExist(snap, "0xb3"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
// But these would need iteration into the modified parent
|
||||
if err := shouldErr(snap, "0xa1"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
if err := shouldErr(snap, "0xa2"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
if err := shouldErr(snap, "0xa3"); err != nil { |
||||
t.Error(err) |
||||
} |
||||
// Now, merge it again, just for fun. It should now error, since a3
|
||||
// is a disk layer
|
||||
if err := snaps.Cap(common.HexToHash("0xa3"), 0); err == nil { |
||||
t.Error("expected error capping the disk layer, got none") |
||||
} |
||||
} |
||||
|
||||
// TestSnaphots tests the functionality for retrieving the snapshot
|
||||
// with given head root and the desired depth.
|
||||
func TestSnaphots(t *testing.T) { |
||||
// setAccount is a helper to construct a random account entry and assign it to
|
||||
// an account slot in a snapshot
|
||||
setAccount := func(accKey string) map[common.Hash][]byte { |
||||
return map[common.Hash][]byte{ |
||||
common.HexToHash(accKey): randomAccount(), |
||||
} |
||||
} |
||||
makeRoot := func(height uint64) common.Hash { |
||||
var buffer [8]byte |
||||
binary.BigEndian.PutUint64(buffer[:], height) |
||||
return common.BytesToHash(buffer[:]) |
||||
} |
||||
// Create a starting base layer and a snapshot tree out of it
|
||||
base := &diskLayer{ |
||||
diskdb: rawdb.NewMemoryDatabase(), |
||||
root: makeRoot(1), |
||||
cache: fastcache.New(1024 * 500), |
||||
} |
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
base.root: base, |
||||
}, |
||||
} |
||||
// Construct the snapshots with 129 layers, flattening whatever's above that
|
||||
var ( |
||||
last = common.HexToHash("0x01") |
||||
head common.Hash |
||||
) |
||||
for i := 0; i < 129; i++ { |
||||
head = makeRoot(uint64(i + 2)) |
||||
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil) |
||||
last = head |
||||
snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
|
||||
} |
||||
var cases = []struct { |
||||
headRoot common.Hash |
||||
limit int |
||||
nodisk bool |
||||
expected int |
||||
expectBottom common.Hash |
||||
}{ |
||||
{head, 0, false, 0, common.Hash{}}, |
||||
{head, 64, false, 64, makeRoot(129 + 2 - 64)}, |
||||
{head, 128, false, 128, makeRoot(3)}, // Normal diff layers, no accumulator
|
||||
{head, 129, true, 129, makeRoot(2)}, // All diff layers, including accumulator
|
||||
{head, 130, false, 130, makeRoot(1)}, // All diff layers + disk layer
|
||||
} |
||||
for i, c := range cases { |
||||
layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk) |
||||
if len(layers) != c.expected { |
||||
t.Errorf("non-overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers)) |
||||
} |
||||
if len(layers) == 0 { |
||||
continue |
||||
} |
||||
bottommost := layers[len(layers)-1] |
||||
if bottommost.Root() != c.expectBottom { |
||||
t.Errorf("non-overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root()) |
||||
} |
||||
} |
||||
// Above we've tested the normal capping, which leaves the accumulator live.
|
||||
// Test that if the bottommost accumulator diff layer overflows the allowed
|
||||
// memory limit, the snapshot tree gets capped to one less layer.
|
||||
// Commit the diff layer onto the disk and ensure it's persisted
|
||||
defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) |
||||
aggregatorMemoryLimit = 0 |
||||
|
||||
snaps.Cap(head, 128) // 129 (128 diffs + 1 overflown accumulator + 1 disk)
|
||||
|
||||
cases = []struct { |
||||
headRoot common.Hash |
||||
limit int |
||||
nodisk bool |
||||
expected int |
||||
expectBottom common.Hash |
||||
}{ |
||||
{head, 0, false, 0, common.Hash{}}, |
||||
{head, 64, false, 64, makeRoot(129 + 2 - 64)}, |
||||
{head, 128, false, 128, makeRoot(3)}, // All diff layers, accumulator was flattened
|
||||
{head, 129, true, 128, makeRoot(3)}, // All diff layers, accumulator was flattened
|
||||
{head, 130, false, 129, makeRoot(2)}, // All diff layers + disk layer
|
||||
} |
||||
for i, c := range cases { |
||||
layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk) |
||||
if len(layers) != c.expected { |
||||
t.Errorf("overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers)) |
||||
} |
||||
if len(layers) == 0 { |
||||
continue |
||||
} |
||||
bottommost := layers[len(layers)-1] |
||||
if bottommost.Root() != c.expectBottom { |
||||
t.Errorf("overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root()) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// TestReadStateDuringFlattening tests the scenario that, during the
|
||||
// bottom diff layers are merging which tags these as stale, the read
|
||||
// happens via a pre-created top snapshot layer which tries to access
|
||||
// the state in these stale layers. Ensure this read can retrieve the
|
||||
// right state back(block until the flattening is finished) instead of
|
||||
// an unexpected error(snapshot layer is stale).
|
||||
func TestReadStateDuringFlattening(t *testing.T) { |
||||
// setAccount is a helper to construct a random account entry and assign it to
|
||||
// an account slot in a snapshot
|
||||
setAccount := func(accKey string) map[common.Hash][]byte { |
||||
return map[common.Hash][]byte{ |
||||
common.HexToHash(accKey): randomAccount(), |
||||
} |
||||
} |
||||
// Create a starting base layer and a snapshot tree out of it
|
||||
base := &diskLayer{ |
||||
diskdb: rawdb.NewMemoryDatabase(), |
||||
root: common.HexToHash("0x01"), |
||||
cache: fastcache.New(1024 * 500), |
||||
} |
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
base.root: base, |
||||
}, |
||||
} |
||||
// 4 layers in total, 3 diff layers and 1 disk layers
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) |
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) |
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) |
||||
|
||||
// Obtain the topmost snapshot handler for state accessing
|
||||
snap := snaps.Snapshot(common.HexToHash("0xa3")) |
||||
|
||||
// Register the testing hook to access the state after flattening
|
||||
var result = make(chan *Account) |
||||
snaps.onFlatten = func() { |
||||
// Spin up a thread to read the account from the pre-created
|
||||
// snapshot handler. It's expected to be blocked.
|
||||
go func() { |
||||
account, _ := snap.Account(common.HexToHash("0xa1")) |
||||
result <- account |
||||
}() |
||||
select { |
||||
case res := <-result: |
||||
t.Fatalf("Unexpected return %v", res) |
||||
case <-time.NewTimer(time.Millisecond * 300).C: |
||||
} |
||||
} |
||||
// Cap the snap tree, which will mark the bottom-most layer as stale.
|
||||
snaps.Cap(common.HexToHash("0xa3"), 1) |
||||
select { |
||||
case account := <-result: |
||||
if account == nil { |
||||
t.Fatal("Failed to retrieve account") |
||||
} |
||||
case <-time.NewTimer(time.Millisecond * 300).C: |
||||
t.Fatal("Unexpected blocker") |
||||
} |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// hashes is a helper to implement sort.Interface.
|
||||
type hashes []common.Hash |
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (hs hashes) Len() int { return len(hs) } |
||||
|
||||
// Less reports whether the element with index i should sort before the element
|
||||
// with index j.
|
||||
func (hs hashes) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 } |
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (hs hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } |
@ -0,0 +1,165 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// CheckDanglingStorage iterates the snap storage data, and verifies that all
|
||||
// storage also has corresponding account data.
|
||||
func CheckDanglingStorage(chaindb ethdb.KeyValueStore) error { |
||||
if err := checkDanglingDiskStorage(chaindb); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("Database check error") |
||||
} |
||||
return checkDanglingMemStorage(chaindb) |
||||
} |
||||
|
||||
// checkDanglingDiskStorage checks if there is any 'dangling' storage data in the
|
||||
// disk-backed snapshot layer.
|
||||
func checkDanglingDiskStorage(chaindb ethdb.KeyValueStore) error { |
||||
var ( |
||||
lastReport = time.Now() |
||||
start = time.Now() |
||||
lastKey []byte |
||||
it = rawdb.NewKeyLengthIterator(chaindb.NewIterator(rawdb.SnapshotStoragePrefix, nil), 1+2*common.HashLength) |
||||
) |
||||
utils.Logger().Info().Msg("Checking dangling snapshot disk storage") |
||||
|
||||
defer it.Release() |
||||
for it.Next() { |
||||
k := it.Key() |
||||
accKey := k[1:33] |
||||
if bytes.Equal(accKey, lastKey) { |
||||
// No need to look up for every slot
|
||||
continue |
||||
} |
||||
lastKey = common.CopyBytes(accKey) |
||||
if time.Since(lastReport) > time.Second*8 { |
||||
utils.Logger().Info(). |
||||
Str("at", fmt.Sprintf("%#x", accKey)). |
||||
Interface("elapsed", common.PrettyDuration(time.Since(start))). |
||||
Msg("Iterating snap storage") |
||||
lastReport = time.Now() |
||||
} |
||||
if data := rawdb.ReadAccountSnapshot(chaindb, common.BytesToHash(accKey)); len(data) == 0 { |
||||
utils.Logger().Warn(). |
||||
Str("account", fmt.Sprintf("%#x", accKey)). |
||||
Str("storagekey", fmt.Sprintf("%#x", k)). |
||||
Msg("Dangling storage - missing account") |
||||
|
||||
return fmt.Errorf("dangling snapshot storage account %#x", accKey) |
||||
} |
||||
} |
||||
utils.Logger().Info().Err(it.Error()). |
||||
Interface("time", common.PrettyDuration(time.Since(start))). |
||||
Msg("Verified the snapshot disk storage") |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// checkDanglingMemStorage checks if there is any 'dangling' storage in the journalled
|
||||
// snapshot difflayers.
|
||||
func checkDanglingMemStorage(db ethdb.KeyValueStore) error { |
||||
start := time.Now() |
||||
utils.Logger().Info().Msg("Checking dangling journalled storage") |
||||
err := iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { |
||||
for accHash := range storage { |
||||
if _, ok := accounts[accHash]; !ok { |
||||
utils.Logger().Error(). |
||||
Str("account", fmt.Sprintf("%#x", accHash)). |
||||
Interface("root", root). |
||||
Msg("Dangling storage - missing account") |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
if err != nil { |
||||
utils.Logger().Info().Err(err).Msg("Failed to resolve snapshot journal") |
||||
return err |
||||
} |
||||
utils.Logger().Info().Interface("time", common.PrettyDuration(time.Since(start))).Msg("Verified the snapshot journalled storage") |
||||
return nil |
||||
} |
||||
|
||||
// CheckJournalAccount shows information about an account, from the disk layer and
|
||||
// up through the diff layers.
|
||||
func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error { |
||||
// Look up the disk layer first
|
||||
baseRoot := rawdb.ReadSnapshotRoot(db) |
||||
fmt.Printf("Disklayer: Root: %x\n", baseRoot) |
||||
if data := rawdb.ReadAccountSnapshot(db, hash); data != nil { |
||||
account := new(Account) |
||||
if err := rlp.DecodeBytes(data, account); err != nil { |
||||
panic(err) |
||||
} |
||||
fmt.Printf("\taccount.nonce: %d\n", account.Nonce) |
||||
fmt.Printf("\taccount.balance: %x\n", account.Balance) |
||||
fmt.Printf("\taccount.root: %x\n", account.Root) |
||||
fmt.Printf("\taccount.codehash: %x\n", account.CodeHash) |
||||
} |
||||
// Check storage
|
||||
{ |
||||
it := rawdb.NewKeyLengthIterator(db.NewIterator(append(rawdb.SnapshotStoragePrefix, hash.Bytes()...), nil), 1+2*common.HashLength) |
||||
fmt.Printf("\tStorage:\n") |
||||
for it.Next() { |
||||
slot := it.Key()[33:] |
||||
fmt.Printf("\t\t%x: %x\n", slot, it.Value()) |
||||
} |
||||
it.Release() |
||||
} |
||||
var depth = 0 |
||||
|
||||
return iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { |
||||
_, a := accounts[hash] |
||||
_, b := destructs[hash] |
||||
_, c := storage[hash] |
||||
depth++ |
||||
if !a && !b && !c { |
||||
return nil |
||||
} |
||||
fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot) |
||||
if data, ok := accounts[hash]; ok { |
||||
account := new(Account) |
||||
if err := rlp.DecodeBytes(data, account); err != nil { |
||||
panic(err) |
||||
} |
||||
fmt.Printf("\taccount.nonce: %d\n", account.Nonce) |
||||
fmt.Printf("\taccount.balance: %x\n", account.Balance) |
||||
fmt.Printf("\taccount.root: %x\n", account.Root) |
||||
fmt.Printf("\taccount.codehash: %x\n", account.CodeHash) |
||||
} |
||||
if _, ok := destructs[hash]; ok { |
||||
fmt.Printf("\t Destructed!") |
||||
} |
||||
if data, ok := storage[hash]; ok { |
||||
fmt.Printf("\tStorage\n") |
||||
for k, v := range data { |
||||
fmt.Printf("\t\t%x: %x\n", k, v) |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
@ -0,0 +1,46 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
func BenchmarkCutOriginal(b *testing.B) { |
||||
value := common.HexToHash("0x01") |
||||
for i := 0; i < b.N; i++ { |
||||
bytes.TrimLeft(value[:], "\x00") |
||||
} |
||||
} |
||||
|
||||
func BenchmarkCutsetterFn(b *testing.B) { |
||||
value := common.HexToHash("0x01") |
||||
cutSetFn := func(r rune) bool { return r == 0 } |
||||
for i := 0; i < b.N; i++ { |
||||
bytes.TrimLeftFunc(value[:], cutSetFn) |
||||
} |
||||
} |
||||
|
||||
func BenchmarkCutCustomTrim(b *testing.B) { |
||||
value := common.HexToHash("0x01") |
||||
for i := 0; i < b.N; i++ { |
||||
common.TrimLeftZeroes(value[:]) |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,56 @@ |
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"bytes" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
) |
||||
|
||||
// NewStateSync create a new state trie download scheduler.
|
||||
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync { |
||||
// Register the storage slot callback if the external callback is specified.
|
||||
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error |
||||
if onLeaf != nil { |
||||
onSlot = func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error { |
||||
return onLeaf(keys, leaf) |
||||
} |
||||
} |
||||
// Register the account callback to connect the state trie and the storage
|
||||
// trie belongs to the contract.
|
||||
var syncer *trie.Sync |
||||
onAccount := func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error { |
||||
if onLeaf != nil { |
||||
if err := onLeaf(keys, leaf); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
var obj Account |
||||
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil { |
||||
return err |
||||
} |
||||
syncer.AddSubTrie(obj.Root, path, parent, parentPath, onSlot) |
||||
syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent, parentPath) |
||||
return nil |
||||
} |
||||
syncer = trie.NewSync(root, database, onAccount, scheme) |
||||
return syncer |
||||
} |
@ -0,0 +1,55 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// transientStorage is a representation of EIP-1153 "Transient Storage".
|
||||
type transientStorage map[common.Address]Storage |
||||
|
||||
// newTransientStorage creates a new instance of a transientStorage.
|
||||
func newTransientStorage() transientStorage { |
||||
return make(transientStorage) |
||||
} |
||||
|
||||
// Set sets the transient-storage `value` for `key` at the given `addr`.
|
||||
func (t transientStorage) Set(addr common.Address, key, value common.Hash) { |
||||
if _, ok := t[addr]; !ok { |
||||
t[addr] = make(Storage) |
||||
} |
||||
t[addr][key] = value |
||||
} |
||||
|
||||
// Get gets the transient storage for `key` at the given `addr`.
|
||||
func (t transientStorage) Get(addr common.Address, key common.Hash) common.Hash { |
||||
val, ok := t[addr] |
||||
if !ok { |
||||
return common.Hash{} |
||||
} |
||||
return val[key] |
||||
} |
||||
|
||||
// Copy does a deep copy of the transientStorage
|
||||
func (t transientStorage) Copy() transientStorage { |
||||
storage := make(transientStorage) |
||||
for key, value := range t { |
||||
storage[key] = value.Copy() |
||||
} |
||||
return storage |
||||
} |
@ -0,0 +1,354 @@ |
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"sync" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
var ( |
||||
// triePrefetchMetricsPrefix is the prefix under which to publish the metrics.
|
||||
triePrefetchMetricsPrefix = "trie/prefetch/" |
||||
) |
||||
|
||||
// triePrefetcher is an active prefetcher, which receives accounts or storage
|
||||
// items and does trie-loading of them. The goal is to get as much useful content
|
||||
// into the caches as possible.
|
||||
//
|
||||
// Note, the prefetcher's API is not thread safe.
|
||||
type triePrefetcher struct { |
||||
db Database // Database to fetch trie nodes through
|
||||
root common.Hash // Root hash of the account trie for metrics
|
||||
fetches map[string]Trie // Partially or fully fetcher tries
|
||||
fetchers map[string]*subfetcher // Subfetchers for each trie
|
||||
|
||||
deliveryMissMeter metrics.Meter |
||||
accountLoadMeter metrics.Meter |
||||
accountDupMeter metrics.Meter |
||||
accountSkipMeter metrics.Meter |
||||
accountWasteMeter metrics.Meter |
||||
storageLoadMeter metrics.Meter |
||||
storageDupMeter metrics.Meter |
||||
storageSkipMeter metrics.Meter |
||||
storageWasteMeter metrics.Meter |
||||
} |
||||
|
||||
func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { |
||||
prefix := triePrefetchMetricsPrefix + namespace |
||||
p := &triePrefetcher{ |
||||
db: db, |
||||
root: root, |
||||
fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map
|
||||
|
||||
deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), |
||||
accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), |
||||
accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), |
||||
accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), |
||||
accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), |
||||
storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), |
||||
storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), |
||||
storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), |
||||
storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), |
||||
} |
||||
return p |
||||
} |
||||
|
||||
// close iterates over all the subfetchers, aborts any that were left spinning
|
||||
// and reports the stats to the metrics subsystem.
|
||||
func (p *triePrefetcher) close() { |
||||
for _, fetcher := range p.fetchers { |
||||
fetcher.abort() // safe to do multiple times
|
||||
|
||||
if metrics.Enabled { |
||||
if fetcher.root == p.root { |
||||
p.accountLoadMeter.Mark(int64(len(fetcher.seen))) |
||||
p.accountDupMeter.Mark(int64(fetcher.dups)) |
||||
p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) |
||||
|
||||
for _, key := range fetcher.used { |
||||
delete(fetcher.seen, string(key)) |
||||
} |
||||
p.accountWasteMeter.Mark(int64(len(fetcher.seen))) |
||||
} else { |
||||
p.storageLoadMeter.Mark(int64(len(fetcher.seen))) |
||||
p.storageDupMeter.Mark(int64(fetcher.dups)) |
||||
p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) |
||||
|
||||
for _, key := range fetcher.used { |
||||
delete(fetcher.seen, string(key)) |
||||
} |
||||
p.storageWasteMeter.Mark(int64(len(fetcher.seen))) |
||||
} |
||||
} |
||||
} |
||||
// Clear out all fetchers (will crash on a second call, deliberate)
|
||||
p.fetchers = nil |
||||
} |
||||
|
||||
// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data
|
||||
// already loaded will be copied over, but no goroutines will be started. This
|
||||
// is mostly used in the miner which creates a copy of it's actively mutated
|
||||
// state to be sealed while it may further mutate the state.
|
||||
func (p *triePrefetcher) copy() *triePrefetcher { |
||||
copy := &triePrefetcher{ |
||||
db: p.db, |
||||
root: p.root, |
||||
fetches: make(map[string]Trie), // Active prefetchers use the fetches map
|
||||
|
||||
deliveryMissMeter: p.deliveryMissMeter, |
||||
accountLoadMeter: p.accountLoadMeter, |
||||
accountDupMeter: p.accountDupMeter, |
||||
accountSkipMeter: p.accountSkipMeter, |
||||
accountWasteMeter: p.accountWasteMeter, |
||||
storageLoadMeter: p.storageLoadMeter, |
||||
storageDupMeter: p.storageDupMeter, |
||||
storageSkipMeter: p.storageSkipMeter, |
||||
storageWasteMeter: p.storageWasteMeter, |
||||
} |
||||
// If the prefetcher is already a copy, duplicate the data
|
||||
if p.fetches != nil { |
||||
for root, fetch := range p.fetches { |
||||
if fetch == nil { |
||||
continue |
||||
} |
||||
copy.fetches[root] = p.db.CopyTrie(fetch) |
||||
} |
||||
return copy |
||||
} |
||||
// Otherwise we're copying an active fetcher, retrieve the current states
|
||||
for id, fetcher := range p.fetchers { |
||||
copy.fetches[id] = fetcher.peek() |
||||
} |
||||
return copy |
||||
} |
||||
|
||||
// prefetch schedules a batch of trie items to prefetch.
|
||||
func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) { |
||||
// If the prefetcher is an inactive one, bail out
|
||||
if p.fetches != nil { |
||||
return |
||||
} |
||||
// Active fetcher, schedule the retrievals
|
||||
id := p.trieID(owner, root) |
||||
fetcher := p.fetchers[id] |
||||
if fetcher == nil { |
||||
fetcher = newSubfetcher(p.db, p.root, owner, root) |
||||
p.fetchers[id] = fetcher |
||||
} |
||||
fetcher.schedule(keys) |
||||
} |
||||
|
||||
// trie returns the trie matching the root hash, or nil if the prefetcher doesn't
|
||||
// have it.
|
||||
func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { |
||||
// If the prefetcher is inactive, return from existing deep copies
|
||||
id := p.trieID(owner, root) |
||||
if p.fetches != nil { |
||||
trie := p.fetches[id] |
||||
if trie == nil { |
||||
p.deliveryMissMeter.Mark(1) |
||||
return nil |
||||
} |
||||
return p.db.CopyTrie(trie) |
||||
} |
||||
// Otherwise the prefetcher is active, bail if no trie was prefetched for this root
|
||||
fetcher := p.fetchers[id] |
||||
if fetcher == nil { |
||||
p.deliveryMissMeter.Mark(1) |
||||
return nil |
||||
} |
||||
// Interrupt the prefetcher if it's by any chance still running and return
|
||||
// a copy of any pre-loaded trie.
|
||||
fetcher.abort() // safe to do multiple times
|
||||
|
||||
trie := fetcher.peek() |
||||
if trie == nil { |
||||
p.deliveryMissMeter.Mark(1) |
||||
return nil |
||||
} |
||||
return trie |
||||
} |
||||
|
||||
// used marks a batch of state items used to allow creating statistics as to
|
||||
// how useful or wasteful the prefetcher is.
|
||||
func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { |
||||
if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { |
||||
fetcher.used = used |
||||
} |
||||
} |
||||
|
||||
// trieID returns an unique trie identifier consists the trie owner and root hash.
|
||||
func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { |
||||
return string(append(owner.Bytes(), root.Bytes()...)) |
||||
} |
||||
|
||||
// subfetcher is a trie fetcher goroutine responsible for pulling entries for a
|
||||
// single trie. It is spawned when a new root is encountered and lives until the
|
||||
// main prefetcher is paused and either all requested items are processed or if
|
||||
// the trie being worked on is retrieved from the prefetcher.
|
||||
type subfetcher struct { |
||||
db Database // Database to load trie nodes through
|
||||
state common.Hash // Root hash of the state to prefetch
|
||||
owner common.Hash // Owner of the trie, usually account hash
|
||||
root common.Hash // Root hash of the trie to prefetch
|
||||
trie Trie // Trie being populated with nodes
|
||||
|
||||
tasks [][]byte // Items queued up for retrieval
|
||||
lock sync.Mutex // Lock protecting the task queue
|
||||
|
||||
wake chan struct{} // Wake channel if a new task is scheduled
|
||||
stop chan struct{} // Channel to interrupt processing
|
||||
term chan struct{} // Channel to signal interruption
|
||||
copy chan chan Trie // Channel to request a copy of the current trie
|
||||
|
||||
seen map[string]struct{} // Tracks the entries already loaded
|
||||
dups int // Number of duplicate preload tasks
|
||||
used [][]byte // Tracks the entries used in the end
|
||||
} |
||||
|
||||
// newSubfetcher creates a goroutine to prefetch state items belonging to a
|
||||
// particular root hash.
|
||||
func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash) *subfetcher { |
||||
sf := &subfetcher{ |
||||
db: db, |
||||
state: state, |
||||
owner: owner, |
||||
root: root, |
||||
wake: make(chan struct{}, 1), |
||||
stop: make(chan struct{}), |
||||
term: make(chan struct{}), |
||||
copy: make(chan chan Trie), |
||||
seen: make(map[string]struct{}), |
||||
} |
||||
go sf.loop() |
||||
return sf |
||||
} |
||||
|
||||
// schedule adds a batch of trie keys to the queue to prefetch.
|
||||
func (sf *subfetcher) schedule(keys [][]byte) { |
||||
// Append the tasks to the current queue
|
||||
sf.lock.Lock() |
||||
sf.tasks = append(sf.tasks, keys...) |
||||
sf.lock.Unlock() |
||||
|
||||
// Notify the prefetcher, it's fine if it's already terminated
|
||||
select { |
||||
case sf.wake <- struct{}{}: |
||||
default: |
||||
} |
||||
} |
||||
|
||||
// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it
|
||||
// is currently.
|
||||
func (sf *subfetcher) peek() Trie { |
||||
ch := make(chan Trie) |
||||
select { |
||||
case sf.copy <- ch: |
||||
// Subfetcher still alive, return copy from it
|
||||
return <-ch |
||||
|
||||
case <-sf.term: |
||||
// Subfetcher already terminated, return a copy directly
|
||||
if sf.trie == nil { |
||||
return nil |
||||
} |
||||
return sf.db.CopyTrie(sf.trie) |
||||
} |
||||
} |
||||
|
||||
// abort interrupts the subfetcher immediately. It is safe to call abort multiple
|
||||
// times but it is not thread safe.
|
||||
func (sf *subfetcher) abort() { |
||||
select { |
||||
case <-sf.stop: |
||||
default: |
||||
close(sf.stop) |
||||
} |
||||
<-sf.term |
||||
} |
||||
|
||||
// loop waits for new tasks to be scheduled and keeps loading them until it runs
|
||||
// out of tasks or its underlying trie is retrieved for committing.
|
||||
func (sf *subfetcher) loop() { |
||||
// No matter how the loop stops, signal anyone waiting that it's terminated
|
||||
defer close(sf.term) |
||||
|
||||
// Start by opening the trie and stop processing if it fails
|
||||
if sf.owner == (common.Hash{}) { |
||||
trie, err := sf.db.OpenTrie(sf.root) |
||||
if err != nil { |
||||
utils.Logger().Warn().Err(err).Interface("root", sf.root).Msg("Trie prefetcher failed opening trie") |
||||
return |
||||
} |
||||
sf.trie = trie |
||||
} else { |
||||
trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) |
||||
if err != nil { |
||||
utils.Logger().Warn().Err(err).Interface("root", sf.root).Msg("Trie prefetcher failed opening trie") |
||||
return |
||||
} |
||||
sf.trie = trie |
||||
} |
||||
// Trie opened successfully, keep prefetching items
|
||||
for { |
||||
select { |
||||
case <-sf.wake: |
||||
// Subfetcher was woken up, retrieve any tasks to avoid spinning the lock
|
||||
sf.lock.Lock() |
||||
tasks := sf.tasks |
||||
sf.tasks = nil |
||||
sf.lock.Unlock() |
||||
|
||||
// Prefetch any tasks until the loop is interrupted
|
||||
for i, task := range tasks { |
||||
select { |
||||
case <-sf.stop: |
||||
// If termination is requested, add any leftover back and return
|
||||
sf.lock.Lock() |
||||
sf.tasks = append(sf.tasks, tasks[i:]...) |
||||
sf.lock.Unlock() |
||||
return |
||||
|
||||
case ch := <-sf.copy: |
||||
// Somebody wants a copy of the current trie, grant them
|
||||
ch <- sf.db.CopyTrie(sf.trie) |
||||
|
||||
default: |
||||
// No termination request yet, prefetch the next entry
|
||||
if _, ok := sf.seen[string(task)]; ok { |
||||
sf.dups++ |
||||
} else { |
||||
sf.trie.TryGet(task) |
||||
sf.seen[string(task)] = struct{}{} |
||||
} |
||||
} |
||||
} |
||||
|
||||
case ch := <-sf.copy: |
||||
// Somebody wants a copy of the current trie, grant them
|
||||
ch <- sf.db.CopyTrie(sf.trie) |
||||
|
||||
case <-sf.stop: |
||||
// Termination is requested, abort and leave remaining tasks
|
||||
return |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,110 @@ |
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state |
||||
|
||||
import ( |
||||
"math/big" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
) |
||||
|
||||
func filledStateDB() *DB { |
||||
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) |
||||
|
||||
// Create an account and check if the retrieved balance is correct
|
||||
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") |
||||
skey := common.HexToHash("aaa") |
||||
sval := common.HexToHash("bbb") |
||||
|
||||
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
||||
state.SetCode(addr, []byte("hello"), false) // Change an external metadata
|
||||
state.SetState(addr, skey, sval) // Change the storage trie
|
||||
for i := 0; i < 100; i++ { |
||||
sk := common.BigToHash(big.NewInt(int64(i))) |
||||
state.SetState(addr, sk, sk) // Change the storage trie
|
||||
} |
||||
return state |
||||
} |
||||
|
||||
func TestCopyAndClose(t *testing.T) { |
||||
db := filledStateDB() |
||||
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") |
||||
skey := common.HexToHash("aaa") |
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
time.Sleep(1 * time.Second) |
||||
a := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
b := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||
cpy := prefetcher.copy() |
||||
cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
c := cpy.trie(common.Hash{}, db.originalRoot) |
||||
prefetcher.close() |
||||
cpy2 := cpy.copy() |
||||
cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
d := cpy2.trie(common.Hash{}, db.originalRoot) |
||||
cpy.close() |
||||
cpy2.close() |
||||
if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() { |
||||
t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash()) |
||||
} |
||||
} |
||||
|
||||
func TestUseAfterClose(t *testing.T) { |
||||
db := filledStateDB() |
||||
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") |
||||
skey := common.HexToHash("aaa") |
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
a := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||
prefetcher.close() |
||||
b := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||
if a == nil { |
||||
t.Fatal("Prefetching before close should not return nil") |
||||
} |
||||
if b != nil { |
||||
t.Fatal("Trie after close should return nil") |
||||
} |
||||
} |
||||
|
||||
func TestCopyClose(t *testing.T) { |
||||
db := filledStateDB() |
||||
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") |
||||
skey := common.HexToHash("aaa") |
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||
cpy := prefetcher.copy() |
||||
a := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||
b := cpy.trie(common.Hash{}, db.originalRoot) |
||||
prefetcher.close() |
||||
c := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||
d := cpy.trie(common.Hash{}, db.originalRoot) |
||||
if a == nil { |
||||
t.Fatal("Prefetching before close should not return nil") |
||||
} |
||||
if b == nil { |
||||
t.Fatal("Copy trie should return nil") |
||||
} |
||||
if c != nil { |
||||
t.Fatal("Trie after close should return nil") |
||||
} |
||||
if d == nil { |
||||
t.Fatal("Copy trie should not return nil") |
||||
} |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue