Upgrade rawdb and statedb codes to add the latest functionalities of ethdb (#4374)
* added bloom filter * upgrade rawdb and statedb * change var name and remove extra comments * return back fake storage in case if we need it for test later * add the previous change back * remove some extra entries from go.mod * fix WritePreimages to use batch * mark unused functions which are ported over from eth --------- Co-authored-by: Casey Gardiner <117784577+ONECasey@users.noreply.github.com>pull/4395/head
parent
a1775465d7
commit
d21dc7f200
@ -0,0 +1,210 @@ |
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/harmony-one/harmony/internal/utils" |
||||||
|
) |
||||||
|
|
||||||
|
// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
|
||||||
|
func ReadSnapshotDisabled(db ethdb.KeyValueReader) bool { |
||||||
|
disabled, _ := db.Has(snapshotDisabledKey) |
||||||
|
return disabled |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSnapshotDisabled stores the snapshot pause flag.
|
||||||
|
func WriteSnapshotDisabled(db ethdb.KeyValueWriter) { |
||||||
|
if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store snapshot disabled flag") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSnapshotDisabled deletes the flag keeping the snapshot maintenance disabled.
|
||||||
|
func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) { |
||||||
|
if err := db.Delete(snapshotDisabledKey); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot disabled flag") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadSnapshotRoot retrieves the root of the block whose state is contained in
|
||||||
|
// the persisted snapshot.
|
||||||
|
func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { |
||||||
|
data, _ := db.Get(SnapshotRootKey) |
||||||
|
if len(data) != common.HashLength { |
||||||
|
return common.Hash{} |
||||||
|
} |
||||||
|
return common.BytesToHash(data) |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSnapshotRoot stores the root of the block whose state is contained in
|
||||||
|
// the persisted snapshot.
|
||||||
|
func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { |
||||||
|
if err := db.Put(SnapshotRootKey, root[:]); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store snapshot root") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSnapshotRoot deletes the hash of the block whose state is contained in
|
||||||
|
// the persisted snapshot. Since snapshots are not immutable, this method can
|
||||||
|
// be used during updates, so a crash or failure will mark the entire snapshot
|
||||||
|
// invalid.
|
||||||
|
func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { |
||||||
|
if err := db.Delete(SnapshotRootKey); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot root") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
|
||||||
|
func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||||
|
data, _ := db.Get(accountSnapshotKey(hash)) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
|
||||||
|
func WriteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash, entry []byte) { |
||||||
|
if err := db.Put(accountSnapshotKey(hash), entry); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store account snapshot") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
|
||||||
|
func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) { |
||||||
|
if err := db.Delete(accountSnapshotKey(hash)); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to delete account snapshot") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
|
||||||
|
func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte { |
||||||
|
data, _ := db.Get(storageSnapshotKey(accountHash, storageHash)) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
|
||||||
|
func WriteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) { |
||||||
|
if err := db.Put(storageSnapshotKey(accountHash, storageHash), entry); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store storage snapshot") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
|
||||||
|
func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash) { |
||||||
|
if err := db.Delete(storageSnapshotKey(accountHash, storageHash)); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to delete storage snapshot") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// IterateStorageSnapshots returns an iterator for walking the entire storage
|
||||||
|
// space of a specific account.
|
||||||
|
func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator { |
||||||
|
return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength) |
||||||
|
} |
||||||
|
|
||||||
|
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
|
||||||
|
// the last shutdown. The blob is expected to be max a few 10s of megabytes.
|
||||||
|
func ReadSnapshotJournal(db ethdb.KeyValueReader) []byte { |
||||||
|
data, _ := db.Get(snapshotJournalKey) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSnapshotJournal stores the serialized in-memory diff layers to save at
|
||||||
|
// shutdown. The blob is expected to be max a few 10s of megabytes.
|
||||||
|
func WriteSnapshotJournal(db ethdb.KeyValueWriter, journal []byte) { |
||||||
|
if err := db.Put(snapshotJournalKey, journal); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store snapshot journal") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSnapshotJournal deletes the serialized in-memory diff layers saved at
|
||||||
|
// the last shutdown
|
||||||
|
func DeleteSnapshotJournal(db ethdb.KeyValueWriter) { |
||||||
|
if err := db.Delete(snapshotJournalKey); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot journal") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
|
||||||
|
// the last shutdown.
|
||||||
|
func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte { |
||||||
|
data, _ := db.Get(snapshotGeneratorKey) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSnapshotGenerator stores the serialized snapshot generator to save at
|
||||||
|
// shutdown.
|
||||||
|
func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) { |
||||||
|
if err := db.Put(snapshotGeneratorKey, generator); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store snapshot generator") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
|
||||||
|
// the last shutdown
|
||||||
|
func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) { |
||||||
|
if err := db.Delete(snapshotGeneratorKey); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot generator") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
|
||||||
|
// snapshot layer.
|
||||||
|
func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 { |
||||||
|
data, _ := db.Get(snapshotRecoveryKey) |
||||||
|
if len(data) == 0 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
if len(data) != 8 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
number := binary.BigEndian.Uint64(data) |
||||||
|
return &number |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSnapshotRecoveryNumber stores the block number of the last persisted
|
||||||
|
// snapshot layer.
|
||||||
|
func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) { |
||||||
|
var buf [8]byte |
||||||
|
binary.BigEndian.PutUint64(buf[:], number) |
||||||
|
if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store snapshot recovery number") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
|
||||||
|
// snapshot layer.
|
||||||
|
func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) { |
||||||
|
if err := db.Delete(snapshotRecoveryKey); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to remove snapshot recovery number") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadSnapshotSyncStatus retrieves the serialized sync status saved at shutdown.
|
||||||
|
func ReadSnapshotSyncStatus(db ethdb.KeyValueReader) []byte { |
||||||
|
data, _ := db.Get(snapshotSyncStatusKey) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSnapshotSyncStatus stores the serialized sync status to save at shutdown.
|
||||||
|
func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) { |
||||||
|
if err := db.Put(snapshotSyncStatusKey, status); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store snapshot sync status") |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,95 @@ |
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/harmony-one/harmony/internal/utils" |
||||||
|
) |
||||||
|
|
||||||
|
// ReadPreimage retrieves a single preimage of the provided hash.
|
||||||
|
func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||||
|
data, _ := db.Get(preimageKey(hash)) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// WritePreimages writes the provided set of preimages to the database.
|
||||||
|
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) error { |
||||||
|
for hash, preimage := range preimages { |
||||||
|
if err := db.Put(preimageKey(hash), preimage); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store trie preimage") |
||||||
|
} |
||||||
|
} |
||||||
|
preimageCounter.Inc(int64(len(preimages))) |
||||||
|
preimageHitCounter.Inc(int64(len(preimages))) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// ReadCode retrieves the contract code of the provided code hash.
|
||||||
|
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||||
|
// Try with the prefixed code scheme first, if not then try with legacy
|
||||||
|
// scheme.
|
||||||
|
data := ReadCodeWithPrefix(db, hash) |
||||||
|
if len(data) != 0 { |
||||||
|
return data |
||||||
|
} |
||||||
|
data, _ = db.Get(hash.Bytes()) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
|
||||||
|
// The main difference between this function and ReadCode is this function
|
||||||
|
// will only check the existence with latest scheme(with prefix).
|
||||||
|
func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||||
|
data, _ := db.Get(codeKey(hash)) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// HasCode checks if the contract code corresponding to the
|
||||||
|
// provided code hash is present in the db.
|
||||||
|
func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||||
|
// Try with the prefixed code scheme first, if not then try with legacy
|
||||||
|
// scheme.
|
||||||
|
if ok := HasCodeWithPrefix(db, hash); ok { |
||||||
|
return true |
||||||
|
} |
||||||
|
ok, _ := db.Has(hash.Bytes()) |
||||||
|
return ok |
||||||
|
} |
||||||
|
|
||||||
|
// HasCodeWithPrefix checks if the contract code corresponding to the
|
||||||
|
// provided code hash is present in the db. This function will only check
|
||||||
|
// presence using the prefix-scheme.
|
||||||
|
func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||||
|
ok, _ := db.Has(codeKey(hash)) |
||||||
|
return ok |
||||||
|
} |
||||||
|
|
||||||
|
// WriteCode writes the provided contract code database.
|
||||||
|
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { |
||||||
|
if err := db.Put(codeKey(hash), code); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store contract code") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteCode deletes the specified contract code from the database.
|
||||||
|
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { |
||||||
|
if err := db.Delete(codeKey(hash)); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to delete contract code") |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,80 @@ |
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/rlp" |
||||||
|
"github.com/harmony-one/harmony/internal/utils" |
||||||
|
) |
||||||
|
|
||||||
|
// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
|
||||||
|
func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte { |
||||||
|
data, _ := db.Get(skeletonSyncStatusKey) |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
|
||||||
|
func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) { |
||||||
|
if err := db.Put(skeletonSyncStatusKey, status); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store skeleton sync status") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
|
||||||
|
// shutdown
|
||||||
|
func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) { |
||||||
|
if err := db.Delete(skeletonSyncStatusKey); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to remove skeleton sync status") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
|
||||||
|
func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header { |
||||||
|
data, _ := db.Get(skeletonHeaderKey(number)) |
||||||
|
if len(data) == 0 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
header := new(types.Header) |
||||||
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Uint64("number", number).Msg("Invalid skeleton header RLP") |
||||||
|
return nil |
||||||
|
} |
||||||
|
return header |
||||||
|
} |
||||||
|
|
||||||
|
// WriteSkeletonHeader stores a block header into the skeleton sync store.
|
||||||
|
func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) { |
||||||
|
data, err := rlp.EncodeToBytes(header) |
||||||
|
if err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to RLP encode header") |
||||||
|
} |
||||||
|
key := skeletonHeaderKey(header.Number.Uint64()) |
||||||
|
if err := db.Put(key, data); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store skeleton header") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSkeletonHeader removes all block header data associated with a hash.
|
||||||
|
func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) { |
||||||
|
if err := db.Delete(skeletonHeaderKey(number)); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to delete skeleton header") |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,263 @@ |
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"sync" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/crypto" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/harmony-one/harmony/internal/utils" |
||||||
|
"golang.org/x/crypto/sha3" |
||||||
|
) |
||||||
|
|
||||||
|
// HashScheme is the legacy hash-based state scheme with which trie nodes are
|
||||||
|
// stored in the disk with node hash as the database key. The advantage of this
|
||||||
|
// scheme is that different versions of trie nodes can be stored in disk, which
|
||||||
|
// is very beneficial for constructing archive nodes. The drawback is it will
|
||||||
|
// store different trie nodes on the same path to different locations on the disk
|
||||||
|
// with no data locality, and it's unfriendly for designing state pruning.
|
||||||
|
//
|
||||||
|
// Now this scheme is still kept for backward compatibility, and it will be used
|
||||||
|
// for archive node and some other tries(e.g. light trie).
|
||||||
|
const HashScheme = "hashScheme" |
||||||
|
|
||||||
|
// PathScheme is the new path-based state scheme with which trie nodes are stored
|
||||||
|
// in the disk with node path as the database key. This scheme will only store one
|
||||||
|
// version of state data in the disk, which means that the state pruning operation
|
||||||
|
// is native. At the same time, this scheme will put adjacent trie nodes in the same
|
||||||
|
// area of the disk with good data locality property. But this scheme needs to rely
|
||||||
|
// on extra state diffs to survive deep reorg.
|
||||||
|
const PathScheme = "pathScheme" |
||||||
|
|
||||||
|
// nodeHasher used to derive the hash of trie node.
|
||||||
|
type nodeHasher struct{ sha crypto.KeccakState } |
||||||
|
|
||||||
|
var hasherPool = sync.Pool{ |
||||||
|
New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, |
||||||
|
} |
||||||
|
|
||||||
|
func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) } |
||||||
|
func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) } |
||||||
|
|
||||||
|
func (h *nodeHasher) hashData(data []byte) (n common.Hash) { |
||||||
|
h.sha.Reset() |
||||||
|
h.sha.Write(data) |
||||||
|
h.sha.Read(n[:]) |
||||||
|
return n |
||||||
|
} |
||||||
|
|
||||||
|
// ReadAccountTrieNode retrieves the account trie node and the associated node
|
||||||
|
// hash with the specified node path.
|
||||||
|
func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) { |
||||||
|
data, err := db.Get(accountTrieNodeKey(path)) |
||||||
|
if err != nil { |
||||||
|
return nil, common.Hash{} |
||||||
|
} |
||||||
|
hasher := newNodeHasher() |
||||||
|
defer returnHasherToPool(hasher) |
||||||
|
return data, hasher.hashData(data) |
||||||
|
} |
||||||
|
|
||||||
|
// HasAccountTrieNode checks the account trie node presence with the specified
|
||||||
|
// node path and the associated node hash.
|
||||||
|
func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool { |
||||||
|
data, err := db.Get(accountTrieNodeKey(path)) |
||||||
|
if err != nil { |
||||||
|
return false |
||||||
|
} |
||||||
|
hasher := newNodeHasher() |
||||||
|
defer returnHasherToPool(hasher) |
||||||
|
return hasher.hashData(data) == hash |
||||||
|
} |
||||||
|
|
||||||
|
// WriteAccountTrieNode writes the provided account trie node into database.
|
||||||
|
func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { |
||||||
|
if err := db.Put(accountTrieNodeKey(path), node); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store account trie node") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteAccountTrieNode deletes the specified account trie node from the database.
|
||||||
|
func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) { |
||||||
|
if err := db.Delete(accountTrieNodeKey(path)); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to delete account trie node") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadStorageTrieNode retrieves the storage trie node and the associated node
|
||||||
|
// hash with the specified node path.
|
||||||
|
func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) { |
||||||
|
data, err := db.Get(storageTrieNodeKey(accountHash, path)) |
||||||
|
if err != nil { |
||||||
|
return nil, common.Hash{} |
||||||
|
} |
||||||
|
hasher := newNodeHasher() |
||||||
|
defer returnHasherToPool(hasher) |
||||||
|
return data, hasher.hashData(data) |
||||||
|
} |
||||||
|
|
||||||
|
// HasStorageTrieNode checks the storage trie node presence with the provided
|
||||||
|
// node path and the associated node hash.
|
||||||
|
func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool { |
||||||
|
data, err := db.Get(storageTrieNodeKey(accountHash, path)) |
||||||
|
if err != nil { |
||||||
|
return false |
||||||
|
} |
||||||
|
hasher := newNodeHasher() |
||||||
|
defer returnHasherToPool(hasher) |
||||||
|
return hasher.hashData(data) == hash |
||||||
|
} |
||||||
|
|
||||||
|
// WriteStorageTrieNode writes the provided storage trie node into database.
|
||||||
|
func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { |
||||||
|
if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store storage trie node") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteStorageTrieNode deletes the specified storage trie node from the database.
|
||||||
|
func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) { |
||||||
|
if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to delete storage trie node") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadLegacyTrieNode retrieves the legacy trie node with the given
|
||||||
|
// associated node hash.
|
||||||
|
func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { |
||||||
|
data, err := db.Get(hash.Bytes()) |
||||||
|
if err != nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return data |
||||||
|
} |
||||||
|
|
||||||
|
// HasLegacyTrieNode checks if the trie node with the provided hash is present in db.
|
||||||
|
func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { |
||||||
|
ok, _ := db.Has(hash.Bytes()) |
||||||
|
return ok |
||||||
|
} |
||||||
|
|
||||||
|
// WriteLegacyTrieNode writes the provided legacy trie node to database.
|
||||||
|
func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { |
||||||
|
if err := db.Put(hash.Bytes(), node); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to store legacy trie node") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteLegacyTrieNode deletes the specified legacy trie node from database.
|
||||||
|
func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { |
||||||
|
if err := db.Delete(hash.Bytes()); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to delete legacy trie node") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// HasTrieNode checks the trie node presence with the provided node info and
|
||||||
|
// the associated node hash.
|
||||||
|
func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool { |
||||||
|
switch scheme { |
||||||
|
case HashScheme: |
||||||
|
return HasLegacyTrieNode(db, hash) |
||||||
|
case PathScheme: |
||||||
|
if owner == (common.Hash{}) { |
||||||
|
return HasAccountTrieNode(db, path, hash) |
||||||
|
} |
||||||
|
return HasStorageTrieNode(db, owner, path, hash) |
||||||
|
default: |
||||||
|
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReadTrieNode retrieves the trie node from database with the provided node info
|
||||||
|
// and associated node hash.
|
||||||
|
// hashScheme-based lookup requires the following:
|
||||||
|
// - hash
|
||||||
|
//
|
||||||
|
// pathScheme-based lookup requires the following:
|
||||||
|
// - owner
|
||||||
|
// - path
|
||||||
|
func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte { |
||||||
|
switch scheme { |
||||||
|
case HashScheme: |
||||||
|
return ReadLegacyTrieNode(db, hash) |
||||||
|
case PathScheme: |
||||||
|
var ( |
||||||
|
blob []byte |
||||||
|
nHash common.Hash |
||||||
|
) |
||||||
|
if owner == (common.Hash{}) { |
||||||
|
blob, nHash = ReadAccountTrieNode(db, path) |
||||||
|
} else { |
||||||
|
blob, nHash = ReadStorageTrieNode(db, owner, path) |
||||||
|
} |
||||||
|
if nHash != hash { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return blob |
||||||
|
default: |
||||||
|
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WriteTrieNode writes the trie node into database with the provided node info
|
||||||
|
// and associated node hash.
|
||||||
|
// hashScheme-based lookup requires the following:
|
||||||
|
// - hash
|
||||||
|
//
|
||||||
|
// pathScheme-based lookup requires the following:
|
||||||
|
// - owner
|
||||||
|
// - path
|
||||||
|
func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) { |
||||||
|
switch scheme { |
||||||
|
case HashScheme: |
||||||
|
WriteLegacyTrieNode(db, hash, node) |
||||||
|
case PathScheme: |
||||||
|
if owner == (common.Hash{}) { |
||||||
|
WriteAccountTrieNode(db, path, node) |
||||||
|
} else { |
||||||
|
WriteStorageTrieNode(db, owner, path, node) |
||||||
|
} |
||||||
|
default: |
||||||
|
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteTrieNode deletes the trie node from database with the provided node info
|
||||||
|
// and associated node hash.
|
||||||
|
// hashScheme-based lookup requires the following:
|
||||||
|
// - hash
|
||||||
|
//
|
||||||
|
// pathScheme-based lookup requires the following:
|
||||||
|
// - owner
|
||||||
|
// - path
|
||||||
|
func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) { |
||||||
|
switch scheme { |
||||||
|
case HashScheme: |
||||||
|
DeleteLegacyTrieNode(db, hash) |
||||||
|
case PathScheme: |
||||||
|
if owner == (common.Hash{}) { |
||||||
|
DeleteAccountTrieNode(db, path) |
||||||
|
} else { |
||||||
|
DeleteStorageTrieNode(db, owner, path) |
||||||
|
} |
||||||
|
default: |
||||||
|
panic(fmt.Sprintf("Unknown scheme %v", scheme)) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,55 @@ |
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
// The list of table names of chain freezer.
|
||||||
|
// This variables is NOT used, just ported over from the Ethereum
|
||||||
|
const ( |
||||||
|
// ChainFreezerHeaderTable indicates the name of the freezer header table.
|
||||||
|
ChainFreezerHeaderTable = "headers" |
||||||
|
|
||||||
|
// ChainFreezerHashTable indicates the name of the freezer canonical hash table.
|
||||||
|
ChainFreezerHashTable = "hashes" |
||||||
|
|
||||||
|
// ChainFreezerBodiesTable indicates the name of the freezer block body table.
|
||||||
|
ChainFreezerBodiesTable = "bodies" |
||||||
|
|
||||||
|
// ChainFreezerReceiptTable indicates the name of the freezer receipts table.
|
||||||
|
ChainFreezerReceiptTable = "receipts" |
||||||
|
|
||||||
|
// ChainFreezerDifficultyTable indicates the name of the freezer total difficulty table.
|
||||||
|
ChainFreezerDifficultyTable = "diffs" |
||||||
|
) |
||||||
|
|
||||||
|
// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
|
||||||
|
// Hashes and difficulties don't compress well.
|
||||||
|
// This function is NOT used, just ported over from the Ethereum
|
||||||
|
var chainFreezerNoSnappy = map[string]bool{ |
||||||
|
ChainFreezerHeaderTable: false, |
||||||
|
ChainFreezerHashTable: true, |
||||||
|
ChainFreezerBodiesTable: false, |
||||||
|
ChainFreezerReceiptTable: false, |
||||||
|
ChainFreezerDifficultyTable: true, |
||||||
|
} |
||||||
|
|
||||||
|
// The list of identifiers of ancient stores.
|
||||||
|
var ( |
||||||
|
chainFreezerName = "chain" // the folder name of chain segment ancient store.
|
||||||
|
) |
||||||
|
|
||||||
|
// freezers the collections of all builtin freezers.
|
||||||
|
var freezers = []string{chainFreezerName} |
@ -0,0 +1,91 @@ |
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
) |
||||||
|
|
||||||
|
type tableSize struct { |
||||||
|
name string |
||||||
|
size common.StorageSize |
||||||
|
} |
||||||
|
|
||||||
|
// freezerInfo contains the basic information of the freezer.
|
||||||
|
type freezerInfo struct { |
||||||
|
name string // The identifier of freezer
|
||||||
|
head uint64 // The number of last stored item in the freezer
|
||||||
|
tail uint64 // The number of first stored item in the freezer
|
||||||
|
sizes []tableSize // The storage size per table
|
||||||
|
} |
||||||
|
|
||||||
|
// count returns the number of stored items in the freezer.
|
||||||
|
func (info *freezerInfo) count() uint64 { |
||||||
|
return info.head - info.tail + 1 |
||||||
|
} |
||||||
|
|
||||||
|
// size returns the storage size of the entire freezer.
|
||||||
|
func (info *freezerInfo) size() common.StorageSize { |
||||||
|
var total common.StorageSize |
||||||
|
for _, table := range info.sizes { |
||||||
|
total += table.size |
||||||
|
} |
||||||
|
return total |
||||||
|
} |
||||||
|
|
||||||
|
// inspectFreezers inspects all freezers registered in the system.
|
||||||
|
// This function is NOT used, just ported over from the Ethereum
|
||||||
|
func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { |
||||||
|
var infos []freezerInfo |
||||||
|
for _, freezer := range freezers { |
||||||
|
switch freezer { |
||||||
|
case chainFreezerName: |
||||||
|
// Chain ancient store is a bit special. It's always opened along
|
||||||
|
// with the key-value store, inspect the chain store directly.
|
||||||
|
info := freezerInfo{name: freezer} |
||||||
|
// Retrieve storage size of every contained table.
|
||||||
|
for table := range chainFreezerNoSnappy { |
||||||
|
size, err := db.AncientSize(table) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
info.sizes = append(info.sizes, tableSize{name: table, size: common.StorageSize(size)}) |
||||||
|
} |
||||||
|
// Retrieve the number of last stored item
|
||||||
|
ancients, err := db.Ancients() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
info.head = ancients - 1 |
||||||
|
|
||||||
|
// Retrieve the number of first stored item
|
||||||
|
tail, err := db.Tail() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
info.tail = tail |
||||||
|
infos = append(infos, info) |
||||||
|
|
||||||
|
default: |
||||||
|
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers) |
||||||
|
} |
||||||
|
} |
||||||
|
return infos, nil |
||||||
|
} |
@ -0,0 +1,358 @@ |
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"runtime" |
||||||
|
"sync/atomic" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/common/prque" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/rlp" |
||||||
|
"github.com/harmony-one/harmony/internal/utils" |
||||||
|
) |
||||||
|
|
||||||
|
// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
|
||||||
|
// of frozen ancient blocks. The method iterates over all the frozen blocks and
|
||||||
|
// injects into the database the block hash->number mappings.
|
||||||
|
// This function is NOT used, just ported over from the Ethereum
|
||||||
|
func InitDatabaseFromFreezer(db ethdb.Database) { |
||||||
|
// If we can't access the freezer or it's empty, abort
|
||||||
|
frozen, err := db.Ancients() |
||||||
|
if err != nil || frozen == 0 { |
||||||
|
return |
||||||
|
} |
||||||
|
var ( |
||||||
|
batch = db.NewBatch() |
||||||
|
start = time.Now() |
||||||
|
logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
|
||||||
|
hash common.Hash |
||||||
|
) |
||||||
|
for i := uint64(0); i < frozen; { |
||||||
|
// We read 100K hashes at a time, for a total of 3.2M
|
||||||
|
count := uint64(100_000) |
||||||
|
if i+count > frozen { |
||||||
|
count = frozen - i |
||||||
|
} |
||||||
|
data, err := db.AncientRange(ChainFreezerHashTable, i, count, 32*count) |
||||||
|
if err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to init database from freezer") |
||||||
|
} |
||||||
|
for j, h := range data { |
||||||
|
number := i + uint64(j) |
||||||
|
hash = common.BytesToHash(h) |
||||||
|
WriteHeaderNumber(batch, hash, number) |
||||||
|
// If enough data was accumulated in memory or we're at the last block, dump to disk
|
||||||
|
if batch.ValueSize() > ethdb.IdealBatchSize { |
||||||
|
if err := batch.Write(); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to write data to db") |
||||||
|
} |
||||||
|
batch.Reset() |
||||||
|
} |
||||||
|
} |
||||||
|
i += uint64(len(data)) |
||||||
|
// If we've spent too much time already, notify the user of what we're doing
|
||||||
|
if time.Since(logged) > 8*time.Second { |
||||||
|
log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
logged = time.Now() |
||||||
|
} |
||||||
|
} |
||||||
|
if err := batch.Write(); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed to write data to db") |
||||||
|
} |
||||||
|
batch.Reset() |
||||||
|
|
||||||
|
WriteHeadHeaderHash(db, hash) |
||||||
|
WriteHeadFastBlockHash(db, hash) |
||||||
|
log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
} |
||||||
|
|
||||||
|
type blockTxHashes struct { |
||||||
|
number uint64 |
||||||
|
hashes []common.Hash |
||||||
|
} |
||||||
|
|
||||||
|
// iterateTransactions iterates over all transactions in the (canon) block
|
||||||
|
// number(s) given, and yields the hashes on a channel. If there is a signal
|
||||||
|
// received from interrupt channel, the iteration will be aborted and result
|
||||||
|
// channel will be closed.
|
||||||
|
func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool, interrupt chan struct{}) chan *blockTxHashes { |
||||||
|
// One thread sequentially reads data from db
|
||||||
|
type numberRlp struct { |
||||||
|
number uint64 |
||||||
|
rlp rlp.RawValue |
||||||
|
} |
||||||
|
if to == from { |
||||||
|
return nil |
||||||
|
} |
||||||
|
threads := to - from |
||||||
|
if cpus := runtime.NumCPU(); threads > uint64(cpus) { |
||||||
|
threads = uint64(cpus) |
||||||
|
} |
||||||
|
var ( |
||||||
|
rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
|
||||||
|
hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
|
||||||
|
) |
||||||
|
// lookup runs in one instance
|
||||||
|
lookup := func() { |
||||||
|
n, end := from, to |
||||||
|
if reverse { |
||||||
|
n, end = to-1, from-1 |
||||||
|
} |
||||||
|
defer close(rlpCh) |
||||||
|
for n != end { |
||||||
|
data := ReadCanonicalBodyRLP(db, n) |
||||||
|
// Feed the block to the aggregator, or abort on interrupt
|
||||||
|
select { |
||||||
|
case rlpCh <- &numberRlp{n, data}: |
||||||
|
case <-interrupt: |
||||||
|
return |
||||||
|
} |
||||||
|
if reverse { |
||||||
|
n-- |
||||||
|
} else { |
||||||
|
n++ |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// process runs in parallel
|
||||||
|
nThreadsAlive := int32(threads) |
||||||
|
process := func() { |
||||||
|
defer func() { |
||||||
|
// Last processor closes the result channel
|
||||||
|
if atomic.AddInt32(&nThreadsAlive, -1) == 0 { |
||||||
|
close(hashesCh) |
||||||
|
} |
||||||
|
}() |
||||||
|
for data := range rlpCh { |
||||||
|
var body types.Body |
||||||
|
if err := rlp.DecodeBytes(data.rlp, &body); err != nil { |
||||||
|
utils.Logger().Warn().Err(err).Uint64("block", data.number).Msg("Failed to decode block body") |
||||||
|
return |
||||||
|
} |
||||||
|
var hashes []common.Hash |
||||||
|
for _, tx := range body.Transactions { |
||||||
|
hashes = append(hashes, tx.Hash()) |
||||||
|
} |
||||||
|
result := &blockTxHashes{ |
||||||
|
hashes: hashes, |
||||||
|
number: data.number, |
||||||
|
} |
||||||
|
// Feed the block to the aggregator, or abort on interrupt
|
||||||
|
select { |
||||||
|
case hashesCh <- result: |
||||||
|
case <-interrupt: |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
go lookup() // start the sequential db accessor
|
||||||
|
for i := 0; i < int(threads); i++ { |
||||||
|
go process() |
||||||
|
} |
||||||
|
return hashesCh |
||||||
|
} |
||||||
|
|
||||||
|
// indexTransactions creates txlookup indices of the specified block range.
|
||||||
|
//
|
||||||
|
// This function iterates canonical chain in reverse order, it has one main advantage:
|
||||||
|
// We can write tx index tail flag periodically even without the whole indexing
|
||||||
|
// procedure is finished. So that we can resume indexing procedure next time quickly.
|
||||||
|
//
|
||||||
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
|
// signal received.
|
||||||
|
// This function is NOT used, just ported over from the Ethereum
|
||||||
|
func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||||
|
// short circuit for invalid range
|
||||||
|
if from >= to { |
||||||
|
return |
||||||
|
} |
||||||
|
var ( |
||||||
|
hashesCh = iterateTransactions(db, from, to, true, interrupt) |
||||||
|
batch = db.NewBatch() |
||||||
|
start = time.Now() |
||||||
|
logged = start.Add(-7 * time.Second) |
||||||
|
// Since we iterate in reverse, we expect the first number to come
|
||||||
|
// in to be [to-1]. Therefore, setting lastNum to means that the
|
||||||
|
// prqueue gap-evaluation will work correctly
|
||||||
|
lastNum = to |
||||||
|
queue = prque.New[int64, *blockTxHashes](nil) |
||||||
|
// for stats reporting
|
||||||
|
blocks, txs = 0, 0 |
||||||
|
) |
||||||
|
for chanDelivery := range hashesCh { |
||||||
|
// Push the delivery into the queue and process contiguous ranges.
|
||||||
|
// Since we iterate in reverse, so lower numbers have lower prio, and
|
||||||
|
// we can use the number directly as prio marker
|
||||||
|
queue.Push(chanDelivery, int64(chanDelivery.number)) |
||||||
|
for !queue.Empty() { |
||||||
|
// If the next available item is gapped, return
|
||||||
|
if _, priority := queue.Peek(); priority != int64(lastNum-1) { |
||||||
|
break |
||||||
|
} |
||||||
|
// For testing
|
||||||
|
if hook != nil && !hook(lastNum-1) { |
||||||
|
break |
||||||
|
} |
||||||
|
// Next block available, pop it off and index it
|
||||||
|
delivery := queue.PopItem() |
||||||
|
lastNum = delivery.number |
||||||
|
WriteTxLookupEntries(batch, delivery.number, delivery.hashes) |
||||||
|
blocks++ |
||||||
|
txs += len(delivery.hashes) |
||||||
|
// If enough data was accumulated in memory or we're at the last block, dump to disk
|
||||||
|
if batch.ValueSize() > ethdb.IdealBatchSize { |
||||||
|
WriteTxIndexTail(batch, lastNum) // Also write the tail here
|
||||||
|
if err := batch.Write(); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||||
|
return |
||||||
|
} |
||||||
|
batch.Reset() |
||||||
|
} |
||||||
|
// If we've spent too much time already, notify the user of what we're doing
|
||||||
|
if time.Since(logged) > 8*time.Second { |
||||||
|
log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
logged = time.Now() |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// Flush the new indexing tail and the last committed data. It can also happen
|
||||||
|
// that the last batch is empty because nothing to index, but the tail has to
|
||||||
|
// be flushed anyway.
|
||||||
|
WriteTxIndexTail(batch, lastNum) |
||||||
|
if err := batch.Write(); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||||
|
return |
||||||
|
} |
||||||
|
select { |
||||||
|
case <-interrupt: |
||||||
|
log.Debug("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
default: |
||||||
|
log.Debug("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// IndexTransactions creates txlookup indices of the specified block range. The from
|
||||||
|
// is included while to is excluded.
|
||||||
|
//
|
||||||
|
// This function iterates canonical chain in reverse order, it has one main advantage:
|
||||||
|
// We can write tx index tail flag periodically even without the whole indexing
|
||||||
|
// procedure is finished. So that we can resume indexing procedure next time quickly.
|
||||||
|
//
|
||||||
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
|
// signal received.
|
||||||
|
func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) { |
||||||
|
indexTransactions(db, from, to, interrupt, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// indexTransactionsForTesting is the internal debug version with an additional hook.
|
||||||
|
func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||||
|
indexTransactions(db, from, to, interrupt, hook) |
||||||
|
} |
||||||
|
|
||||||
|
// unindexTransactions removes txlookup indices of the specified block range.
|
||||||
|
//
|
||||||
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
|
// signal received.
|
||||||
|
// This function is NOT used, just ported over from the Ethereum
|
||||||
|
func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||||
|
// short circuit for invalid range
|
||||||
|
if from >= to { |
||||||
|
return |
||||||
|
} |
||||||
|
var ( |
||||||
|
hashesCh = iterateTransactions(db, from, to, false, interrupt) |
||||||
|
batch = db.NewBatch() |
||||||
|
start = time.Now() |
||||||
|
logged = start.Add(-7 * time.Second) |
||||||
|
// we expect the first number to come in to be [from]. Therefore, setting
|
||||||
|
// nextNum to from means that the prqueue gap-evaluation will work correctly
|
||||||
|
nextNum = from |
||||||
|
queue = prque.New[int64, *blockTxHashes](nil) |
||||||
|
// for stats reporting
|
||||||
|
blocks, txs = 0, 0 |
||||||
|
) |
||||||
|
// Otherwise spin up the concurrent iterator and unindexer
|
||||||
|
for delivery := range hashesCh { |
||||||
|
// Push the delivery into the queue and process contiguous ranges.
|
||||||
|
queue.Push(delivery, -int64(delivery.number)) |
||||||
|
for !queue.Empty() { |
||||||
|
// If the next available item is gapped, return
|
||||||
|
if _, priority := queue.Peek(); -priority != int64(nextNum) { |
||||||
|
break |
||||||
|
} |
||||||
|
// For testing
|
||||||
|
if hook != nil && !hook(nextNum) { |
||||||
|
break |
||||||
|
} |
||||||
|
delivery := queue.PopItem() |
||||||
|
nextNum = delivery.number + 1 |
||||||
|
DeleteTxLookupEntries(batch, delivery.hashes) |
||||||
|
txs += len(delivery.hashes) |
||||||
|
blocks++ |
||||||
|
|
||||||
|
// If enough data was accumulated in memory or we're at the last block, dump to disk
|
||||||
|
// A batch counts the size of deletion as '1', so we need to flush more
|
||||||
|
// often than that.
|
||||||
|
if blocks%1000 == 0 { |
||||||
|
WriteTxIndexTail(batch, nextNum) |
||||||
|
if err := batch.Write(); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||||
|
return |
||||||
|
} |
||||||
|
batch.Reset() |
||||||
|
} |
||||||
|
// If we've spent too much time already, notify the user of what we're doing
|
||||||
|
if time.Since(logged) > 8*time.Second { |
||||||
|
log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
logged = time.Now() |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// Flush the new indexing tail and the last committed data. It can also happen
|
||||||
|
// that the last batch is empty because nothing to unindex, but the tail has to
|
||||||
|
// be flushed anyway.
|
||||||
|
WriteTxIndexTail(batch, nextNum) |
||||||
|
if err := batch.Write(); err != nil { |
||||||
|
utils.Logger().Error().Err(err).Msg("Failed writing batch to db") |
||||||
|
return |
||||||
|
} |
||||||
|
select { |
||||||
|
case <-interrupt: |
||||||
|
log.Debug("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
default: |
||||||
|
log.Debug("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// UnindexTransactions removes txlookup indices of the specified block range.
|
||||||
|
// The from is included while to is excluded.
|
||||||
|
//
|
||||||
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
|
// signal received.
|
||||||
|
func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) { |
||||||
|
unindexTransactions(db, from, to, interrupt, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// unindexTransactionsForTesting is the internal debug version with an additional hook.
|
||||||
|
func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { |
||||||
|
unindexTransactions(db, from, to, interrupt, hook) |
||||||
|
} |
@ -0,0 +1,464 @@ |
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
"path" |
||||||
|
"path/filepath" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb/leveldb" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/harmony-one/harmony/internal/utils" |
||||||
|
"github.com/olekukonko/tablewriter" |
||||||
|
) |
||||||
|
|
||||||
|
var errNotSupported = errors.New("not supported") |
||||||
|
|
||||||
|
// convertLegacyFn takes a raw freezer entry in an older format and
|
||||||
|
// returns it in the new format.
|
||||||
|
type convertLegacyFn = func([]byte) ([]byte, error) |
||||||
|
|
||||||
|
// freezerdb is a database wrapper that enabled freezer data retrievals.
|
||||||
|
type freezerdb struct { |
||||||
|
ancientRoot string |
||||||
|
ethdb.KeyValueStore |
||||||
|
ethdb.AncientStore |
||||||
|
} |
||||||
|
|
||||||
|
// AncientDatadir returns the path of root ancient directory.
|
||||||
|
func (frdb *freezerdb) AncientDatadir() (string, error) { |
||||||
|
return frdb.ancientRoot, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Close implements io.Closer, closing both the fast key-value store as well as
|
||||||
|
// the slow ancient tables.
|
||||||
|
func (frdb *freezerdb) Close() error { |
||||||
|
var errs []error |
||||||
|
if err := frdb.AncientStore.Close(); err != nil { |
||||||
|
errs = append(errs, err) |
||||||
|
} |
||||||
|
if err := frdb.KeyValueStore.Close(); err != nil { |
||||||
|
errs = append(errs, err) |
||||||
|
} |
||||||
|
if len(errs) != 0 { |
||||||
|
return fmt.Errorf("%v", errs) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// nofreezedb is a database wrapper that disables freezer data retrievals.
|
||||||
|
type nofreezedb struct { |
||||||
|
ethdb.KeyValueStore |
||||||
|
} |
||||||
|
|
||||||
|
// HasAncient returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) { |
||||||
|
return false, errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// Ancient returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) { |
||||||
|
return nil, errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// AncientRange returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) { |
||||||
|
return nil, errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// Ancients returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) Ancients() (uint64, error) { |
||||||
|
return 0, errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// Tail returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) Tail() (uint64, error) { |
||||||
|
return 0, errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// AncientSize returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) AncientSize(kind string) (uint64, error) { |
||||||
|
return 0, errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// ModifyAncients is not supported.
|
||||||
|
func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { |
||||||
|
return 0, errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// TruncateHead returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) TruncateHead(items uint64) error { |
||||||
|
return errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// TruncateTail returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) TruncateTail(items uint64) error { |
||||||
|
return errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// Sync returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) Sync() error { |
||||||
|
return errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { |
||||||
|
// Unlike other ancient-related methods, this method does not return
|
||||||
|
// errNotSupported when invoked.
|
||||||
|
// The reason for this is that the caller might want to do several things:
|
||||||
|
// 1. Check if something is in freezer,
|
||||||
|
// 2. If not, check leveldb.
|
||||||
|
//
|
||||||
|
// This will work, since the ancient-checks inside 'fn' will return errors,
|
||||||
|
// and the leveldb work will continue.
|
||||||
|
//
|
||||||
|
// If we instead were to return errNotSupported here, then the caller would
|
||||||
|
// have to explicitly check for that, having an extra clause to do the
|
||||||
|
// non-ancient operations.
|
||||||
|
return fn(db) |
||||||
|
} |
||||||
|
|
||||||
|
// MigrateTable processes the entries in a given table in sequence
|
||||||
|
// converting them to a new format if they're of an old format.
|
||||||
|
func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error { |
||||||
|
return errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// AncientDatadir returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) AncientDatadir() (string, error) { |
||||||
|
return "", errNotSupported |
||||||
|
} |
||||||
|
|
||||||
|
// NewDatabase creates a high level database on top of a given key-value data
|
||||||
|
// store without a freezer moving immutable chain segments into cold storage.
|
||||||
|
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { |
||||||
|
return &nofreezedb{KeyValueStore: db} |
||||||
|
} |
||||||
|
|
||||||
|
// resolveChainFreezerDir is a helper function which resolves the absolute path
|
||||||
|
// of chain freezer by considering backward compatibility.
|
||||||
|
// This function is NOT used, just ported over from the Ethereum
|
||||||
|
func resolveChainFreezerDir(ancient string) string { |
||||||
|
// Check if the chain freezer is already present in the specified
|
||||||
|
// sub folder, if not then two possibilities:
|
||||||
|
// - chain freezer is not initialized
|
||||||
|
// - chain freezer exists in legacy location (root ancient folder)
|
||||||
|
freezer := path.Join(ancient, chainFreezerName) |
||||||
|
if !common.FileExist(freezer) { |
||||||
|
if !common.FileExist(ancient) { |
||||||
|
// The entire ancient store is not initialized, still use the sub
|
||||||
|
// folder for initialization.
|
||||||
|
} else { |
||||||
|
// Ancient root is already initialized, then we hold the assumption
|
||||||
|
// that chain freezer is also initialized and located in root folder.
|
||||||
|
// In this case fallback to legacy location.
|
||||||
|
freezer = ancient |
||||||
|
log.Info("Found legacy ancient chain path", "location", ancient) |
||||||
|
} |
||||||
|
} |
||||||
|
return freezer |
||||||
|
} |
||||||
|
|
||||||
|
// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
|
||||||
|
// freezer moving immutable chain segments into cold storage.
|
||||||
|
func NewMemoryDatabase() ethdb.Database { |
||||||
|
return NewDatabase(memorydb.New()) |
||||||
|
} |
||||||
|
|
||||||
|
// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
|
||||||
|
// with an initial starting capacity, but without a freezer moving immutable
|
||||||
|
// chain segments into cold storage.
|
||||||
|
func NewMemoryDatabaseWithCap(size int) ethdb.Database { |
||||||
|
return NewDatabase(memorydb.NewWithCap(size)) |
||||||
|
} |
||||||
|
|
||||||
|
// NewLevelDBDatabase creates a persistent key-value database without a freezer
|
||||||
|
// moving immutable chain segments into cold storage.
|
||||||
|
func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { |
||||||
|
db, err := leveldb.New(file, cache, handles, namespace, readonly) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
log.Info("Using LevelDB as the backing database") |
||||||
|
return NewDatabase(db), nil |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
dbPebble = "pebble" |
||||||
|
dbLeveldb = "leveldb" |
||||||
|
) |
||||||
|
|
||||||
|
// hasPreexistingDb checks the given data directory whether a database is already
|
||||||
|
// instantiated at that location, and if so, returns the type of database (or the
|
||||||
|
// empty string).
|
||||||
|
func hasPreexistingDb(path string) string { |
||||||
|
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { |
||||||
|
return "" // No pre-existing db
|
||||||
|
} |
||||||
|
if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil { |
||||||
|
if err != nil { |
||||||
|
panic(err) // only possible if the pattern is malformed
|
||||||
|
} |
||||||
|
return dbPebble |
||||||
|
} |
||||||
|
return dbLeveldb |
||||||
|
} |
||||||
|
|
||||||
|
// OpenOptions contains the options to apply when opening a database.
|
||||||
|
// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
|
||||||
|
type OpenOptions struct { |
||||||
|
Type string // "leveldb" | "pebble"
|
||||||
|
Directory string // the datadir
|
||||||
|
AncientsDirectory string // the ancients-dir
|
||||||
|
Namespace string // the namespace for database relevant metrics
|
||||||
|
Cache int // the capacity(in megabytes) of the data caching
|
||||||
|
Handles int // number of files to be open simultaneously
|
||||||
|
ReadOnly bool |
||||||
|
} |
||||||
|
|
||||||
|
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
|
||||||
|
//
|
||||||
|
// type == null type != null
|
||||||
|
// +----------------------------------------
|
||||||
|
// db is non-existent | leveldb default | specified type
|
||||||
|
// db is existent | from db | specified type (if compatible)
|
||||||
|
func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { |
||||||
|
existingDb := hasPreexistingDb(o.Directory) |
||||||
|
if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { |
||||||
|
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) |
||||||
|
} |
||||||
|
if o.Type == dbPebble || existingDb == dbPebble { |
||||||
|
if PebbleEnabled { |
||||||
|
log.Info("Using pebble as the backing database") |
||||||
|
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) |
||||||
|
} else { |
||||||
|
return nil, errors.New("db.engine 'pebble' not supported on this platform") |
||||||
|
} |
||||||
|
} |
||||||
|
if len(o.Type) != 0 && o.Type != dbLeveldb { |
||||||
|
return nil, fmt.Errorf("unknown db.engine %v", o.Type) |
||||||
|
} |
||||||
|
log.Info("Using leveldb as the backing database") |
||||||
|
// Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly
|
||||||
|
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) |
||||||
|
} |
||||||
|
|
||||||
|
type counter uint64 |
||||||
|
|
||||||
|
func (c counter) String() string { |
||||||
|
return fmt.Sprintf("%d", c) |
||||||
|
} |
||||||
|
|
||||||
|
func (c counter) Percentage(current uint64) string { |
||||||
|
return fmt.Sprintf("%d", current*100/uint64(c)) |
||||||
|
} |
||||||
|
|
||||||
|
// stat stores sizes and count for a parameter
|
||||||
|
type stat struct { |
||||||
|
size common.StorageSize |
||||||
|
count counter |
||||||
|
} |
||||||
|
|
||||||
|
// Add size to the stat and increase the counter by 1
|
||||||
|
func (s *stat) Add(size common.StorageSize) { |
||||||
|
s.size += size |
||||||
|
s.count++ |
||||||
|
} |
||||||
|
|
||||||
|
func (s *stat) Size() string { |
||||||
|
return s.size.String() |
||||||
|
} |
||||||
|
|
||||||
|
func (s *stat) Count() string { |
||||||
|
return s.count.String() |
||||||
|
} |
||||||
|
|
||||||
|
// InspectDatabase traverses the entire database and checks the size
|
||||||
|
// of all different categories of data.
|
||||||
|
// This function is NOT used, just ported over from the Ethereum
|
||||||
|
func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { |
||||||
|
it := db.NewIterator(keyPrefix, keyStart) |
||||||
|
defer it.Release() |
||||||
|
|
||||||
|
var ( |
||||||
|
count int64 |
||||||
|
start = time.Now() |
||||||
|
logged = time.Now() |
||||||
|
|
||||||
|
// Key-value store statistics
|
||||||
|
headers stat |
||||||
|
bodies stat |
||||||
|
receipts stat |
||||||
|
tds stat |
||||||
|
numHashPairings stat |
||||||
|
hashNumPairings stat |
||||||
|
tries stat |
||||||
|
codes stat |
||||||
|
txLookups stat |
||||||
|
accountSnaps stat |
||||||
|
storageSnaps stat |
||||||
|
preimages stat |
||||||
|
bloomBits stat |
||||||
|
beaconHeaders stat |
||||||
|
cliqueSnaps stat |
||||||
|
|
||||||
|
// Les statistic
|
||||||
|
chtTrieNodes stat |
||||||
|
bloomTrieNodes stat |
||||||
|
|
||||||
|
// Meta- and unaccounted data
|
||||||
|
metadata stat |
||||||
|
unaccounted stat |
||||||
|
|
||||||
|
// Totals
|
||||||
|
total common.StorageSize |
||||||
|
) |
||||||
|
// Inspect key-value database first.
|
||||||
|
for it.Next() { |
||||||
|
var ( |
||||||
|
key = it.Key() |
||||||
|
size = common.StorageSize(len(key) + len(it.Value())) |
||||||
|
) |
||||||
|
total += size |
||||||
|
switch { |
||||||
|
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength): |
||||||
|
headers.Add(size) |
||||||
|
case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength): |
||||||
|
bodies.Add(size) |
||||||
|
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength): |
||||||
|
receipts.Add(size) |
||||||
|
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix): |
||||||
|
tds.Add(size) |
||||||
|
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix): |
||||||
|
numHashPairings.Add(size) |
||||||
|
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): |
||||||
|
hashNumPairings.Add(size) |
||||||
|
case len(key) == common.HashLength: |
||||||
|
tries.Add(size) |
||||||
|
case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength: |
||||||
|
codes.Add(size) |
||||||
|
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): |
||||||
|
txLookups.Add(size) |
||||||
|
case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength): |
||||||
|
accountSnaps.Add(size) |
||||||
|
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): |
||||||
|
storageSnaps.Add(size) |
||||||
|
case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength): |
||||||
|
preimages.Add(size) |
||||||
|
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): |
||||||
|
metadata.Add(size) |
||||||
|
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength): |
||||||
|
metadata.Add(size) |
||||||
|
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): |
||||||
|
bloomBits.Add(size) |
||||||
|
case bytes.HasPrefix(key, BloomBitsIndexPrefix): |
||||||
|
bloomBits.Add(size) |
||||||
|
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8): |
||||||
|
beaconHeaders.Add(size) |
||||||
|
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength: |
||||||
|
cliqueSnaps.Add(size) |
||||||
|
case bytes.HasPrefix(key, ChtTablePrefix) || |
||||||
|
bytes.HasPrefix(key, ChtIndexTablePrefix) || |
||||||
|
bytes.HasPrefix(key, ChtPrefix): // Canonical hash trie
|
||||||
|
chtTrieNodes.Add(size) |
||||||
|
case bytes.HasPrefix(key, BloomTrieTablePrefix) || |
||||||
|
bytes.HasPrefix(key, BloomTrieIndexPrefix) || |
||||||
|
bytes.HasPrefix(key, BloomTriePrefix): // Bloomtrie sub
|
||||||
|
bloomTrieNodes.Add(size) |
||||||
|
default: |
||||||
|
var accounted bool |
||||||
|
for _, meta := range [][]byte{ |
||||||
|
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey, |
||||||
|
lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, |
||||||
|
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, |
||||||
|
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, |
||||||
|
} { |
||||||
|
if bytes.Equal(key, meta) { |
||||||
|
metadata.Add(size) |
||||||
|
accounted = true |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
if !accounted { |
||||||
|
unaccounted.Add(size) |
||||||
|
} |
||||||
|
} |
||||||
|
count++ |
||||||
|
if count%1000 == 0 && time.Since(logged) > 8*time.Second { |
||||||
|
log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) |
||||||
|
logged = time.Now() |
||||||
|
} |
||||||
|
} |
||||||
|
// Display the database statistic of key-value store.
|
||||||
|
stats := [][]string{ |
||||||
|
{"Key-Value store", "Headers", headers.Size(), headers.Count()}, |
||||||
|
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()}, |
||||||
|
{"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()}, |
||||||
|
{"Key-Value store", "Difficulties", tds.Size(), tds.Count()}, |
||||||
|
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()}, |
||||||
|
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()}, |
||||||
|
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()}, |
||||||
|
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()}, |
||||||
|
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()}, |
||||||
|
{"Key-Value store", "Trie nodes", tries.Size(), tries.Count()}, |
||||||
|
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, |
||||||
|
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, |
||||||
|
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, |
||||||
|
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()}, |
||||||
|
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()}, |
||||||
|
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()}, |
||||||
|
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()}, |
||||||
|
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()}, |
||||||
|
} |
||||||
|
// Inspect all registered append-only file store then.
|
||||||
|
ancients, err := inspectFreezers(db) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
for _, ancient := range ancients { |
||||||
|
for _, table := range ancient.sizes { |
||||||
|
stats = append(stats, []string{ |
||||||
|
fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)), |
||||||
|
strings.Title(table.name), |
||||||
|
table.size.String(), |
||||||
|
fmt.Sprintf("%d", ancient.count()), |
||||||
|
}) |
||||||
|
} |
||||||
|
total += ancient.size() |
||||||
|
} |
||||||
|
table := tablewriter.NewWriter(os.Stdout) |
||||||
|
table.SetHeader([]string{"Database", "Category", "Size", "Items"}) |
||||||
|
table.SetFooter([]string{"", "Total", total.String(), " "}) |
||||||
|
table.AppendBulk(stats) |
||||||
|
table.Render() |
||||||
|
|
||||||
|
if unaccounted.size > 0 { |
||||||
|
utils.Logger().Error(). |
||||||
|
Interface("size", unaccounted.size). |
||||||
|
Interface("count", unaccounted.count). |
||||||
|
Msg("Database contains unaccounted data") |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,17 @@ |
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
@ -0,0 +1,37 @@ |
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
//go:build arm64 || amd64
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb/pebble" |
||||||
|
) |
||||||
|
|
||||||
|
// Pebble is unsuported on 32bit architecture
|
||||||
|
const PebbleEnabled = true |
||||||
|
|
||||||
|
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
||||||
|
// moving immutable chain segments into cold storage.
|
||||||
|
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { |
||||||
|
db, err := pebble.New(file, cache, handles, namespace, readonly) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return NewDatabase(db), nil |
||||||
|
} |
@ -0,0 +1,34 @@ |
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !(arm64 || amd64)
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
) |
||||||
|
|
||||||
|
// Pebble is unsuported on 32bit architecture
|
||||||
|
const PebbleEnabled = false |
||||||
|
|
||||||
|
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
||||||
|
// moving immutable chain segments into cold storage.
|
||||||
|
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { |
||||||
|
return nil, errors.New("pebble is not supported on this platform") |
||||||
|
} |
@ -0,0 +1,47 @@ |
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/ethdb" |
||||||
|
|
||||||
|
// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
|
||||||
|
// with a specific key length will be returned.
|
||||||
|
type KeyLengthIterator struct { |
||||||
|
requiredKeyLength int |
||||||
|
ethdb.Iterator |
||||||
|
} |
||||||
|
|
||||||
|
// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
|
||||||
|
// pairs where keys with a specific key length will be returned.
|
||||||
|
func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator { |
||||||
|
return &KeyLengthIterator{ |
||||||
|
Iterator: it, |
||||||
|
requiredKeyLength: keyLen, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (it *KeyLengthIterator) Next() bool { |
||||||
|
// Return true as soon as a key with the required key length is discovered
|
||||||
|
for it.Iterator.Next() { |
||||||
|
if len(it.Iterator.Key()) == it.requiredKeyLength { |
||||||
|
return true |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Return false when we exhaust the keys in the underlying iterator.
|
||||||
|
return false |
||||||
|
} |
@ -0,0 +1,60 @@ |
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
"testing" |
||||||
|
) |
||||||
|
|
||||||
|
func TestKeyLengthIterator(t *testing.T) { |
||||||
|
db := NewMemoryDatabase() |
||||||
|
|
||||||
|
keyLen := 8 |
||||||
|
expectedKeys := make(map[string]struct{}) |
||||||
|
for i := 0; i < 100; i++ { |
||||||
|
key := make([]byte, keyLen) |
||||||
|
binary.BigEndian.PutUint64(key, uint64(i)) |
||||||
|
if err := db.Put(key, []byte{0x1}); err != nil { |
||||||
|
t.Fatal(err) |
||||||
|
} |
||||||
|
expectedKeys[string(key)] = struct{}{} |
||||||
|
|
||||||
|
longerKey := make([]byte, keyLen*2) |
||||||
|
binary.BigEndian.PutUint64(longerKey, uint64(i)) |
||||||
|
if err := db.Put(longerKey, []byte{0x1}); err != nil { |
||||||
|
t.Fatal(err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen) |
||||||
|
for it.Next() { |
||||||
|
key := it.Key() |
||||||
|
_, exists := expectedKeys[string(key)] |
||||||
|
if !exists { |
||||||
|
t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key)) |
||||||
|
} |
||||||
|
delete(expectedKeys, string(key)) |
||||||
|
if len(key) != keyLen { |
||||||
|
t.Fatalf("Found unexpected key in key length iterator with length %d", len(key)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if len(expectedKeys) != 0 { |
||||||
|
t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,313 @@ |
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
) |
||||||
|
|
||||||
|
// table is a wrapper around a database that prefixes each key access with a pre-
|
||||||
|
// configured string.
|
||||||
|
type table struct { |
||||||
|
db ethdb.Database |
||||||
|
prefix string |
||||||
|
} |
||||||
|
|
||||||
|
// NewTable returns a database object that prefixes all keys with a given string.
|
||||||
|
func NewTable(db ethdb.Database, prefix string) ethdb.Database { |
||||||
|
return &table{ |
||||||
|
db: db, |
||||||
|
prefix: prefix, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Close is a noop to implement the Database interface.
|
||||||
|
func (t *table) Close() error { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Has retrieves if a prefixed version of a key is present in the database.
|
||||||
|
func (t *table) Has(key []byte) (bool, error) { |
||||||
|
return t.db.Has(append([]byte(t.prefix), key...)) |
||||||
|
} |
||||||
|
|
||||||
|
// Get retrieves the given prefixed key if it's present in the database.
|
||||||
|
func (t *table) Get(key []byte) ([]byte, error) { |
||||||
|
return t.db.Get(append([]byte(t.prefix), key...)) |
||||||
|
} |
||||||
|
|
||||||
|
// HasAncient is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) HasAncient(kind string, number uint64) (bool, error) { |
||||||
|
return t.db.HasAncient(kind, number) |
||||||
|
} |
||||||
|
|
||||||
|
// Ancient is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) Ancient(kind string, number uint64) ([]byte, error) { |
||||||
|
return t.db.Ancient(kind, number) |
||||||
|
} |
||||||
|
|
||||||
|
// AncientRange is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { |
||||||
|
return t.db.AncientRange(kind, start, count, maxBytes) |
||||||
|
} |
||||||
|
|
||||||
|
// Ancients is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) Ancients() (uint64, error) { |
||||||
|
return t.db.Ancients() |
||||||
|
} |
||||||
|
|
||||||
|
// Tail is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) Tail() (uint64, error) { |
||||||
|
return t.db.Tail() |
||||||
|
} |
||||||
|
|
||||||
|
// AncientSize is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) AncientSize(kind string) (uint64, error) { |
||||||
|
return t.db.AncientSize(kind) |
||||||
|
} |
||||||
|
|
||||||
|
// ModifyAncients runs an ancient write operation on the underlying database.
|
||||||
|
func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { |
||||||
|
return t.db.ModifyAncients(fn) |
||||||
|
} |
||||||
|
|
||||||
|
func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { |
||||||
|
return t.db.ReadAncients(fn) |
||||||
|
} |
||||||
|
|
||||||
|
// TruncateHead is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) TruncateHead(items uint64) error { |
||||||
|
return t.db.TruncateHead(items) |
||||||
|
} |
||||||
|
|
||||||
|
// TruncateTail is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) TruncateTail(items uint64) error { |
||||||
|
return t.db.TruncateTail(items) |
||||||
|
} |
||||||
|
|
||||||
|
// Sync is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) Sync() error { |
||||||
|
return t.db.Sync() |
||||||
|
} |
||||||
|
|
||||||
|
// MigrateTable processes the entries in a given table in sequence
|
||||||
|
// converting them to a new format if they're of an old format.
|
||||||
|
func (t *table) MigrateTable(kind string, convert convertLegacyFn) error { |
||||||
|
return t.db.MigrateTable(kind, convert) |
||||||
|
} |
||||||
|
|
||||||
|
// AncientDatadir returns the ancient datadir of the underlying database.
|
||||||
|
func (t *table) AncientDatadir() (string, error) { |
||||||
|
return t.db.AncientDatadir() |
||||||
|
} |
||||||
|
|
||||||
|
// Put inserts the given value into the database at a prefixed version of the
|
||||||
|
// provided key.
|
||||||
|
func (t *table) Put(key []byte, value []byte) error { |
||||||
|
return t.db.Put(append([]byte(t.prefix), key...), value) |
||||||
|
} |
||||||
|
|
||||||
|
// Delete removes the given prefixed key from the database.
|
||||||
|
func (t *table) Delete(key []byte) error { |
||||||
|
return t.db.Delete(append([]byte(t.prefix), key...)) |
||||||
|
} |
||||||
|
|
||||||
|
// NewIterator creates a binary-alphabetical iterator over a subset
|
||||||
|
// of database content with a particular key prefix, starting at a particular
|
||||||
|
// initial key (or after, if it does not exist).
|
||||||
|
func (t *table) NewIterator(prefix []byte, start []byte) ethdb.Iterator { |
||||||
|
innerPrefix := append([]byte(t.prefix), prefix...) |
||||||
|
iter := t.db.NewIterator(innerPrefix, start) |
||||||
|
return &tableIterator{ |
||||||
|
iter: iter, |
||||||
|
prefix: t.prefix, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
|
||||||
|
// of database content with a particular key prefix.
|
||||||
|
func (t *table) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator { |
||||||
|
return t.NewIterator(prefix, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// Stat returns a particular internal stat of the database.
|
||||||
|
func (t *table) Stat(property string) (string, error) { |
||||||
|
return t.db.Stat(property) |
||||||
|
} |
||||||
|
|
||||||
|
// Compact flattens the underlying data store for the given key range. In essence,
|
||||||
|
// deleted and overwritten versions are discarded, and the data is rearranged to
|
||||||
|
// reduce the cost of operations needed to access them.
|
||||||
|
//
|
||||||
|
// A nil start is treated as a key before all keys in the data store; a nil limit
|
||||||
|
// is treated as a key after all keys in the data store. If both is nil then it
|
||||||
|
// will compact entire data store.
|
||||||
|
func (t *table) Compact(start []byte, limit []byte) error { |
||||||
|
// If no start was specified, use the table prefix as the first value
|
||||||
|
if start == nil { |
||||||
|
start = []byte(t.prefix) |
||||||
|
} else { |
||||||
|
start = append([]byte(t.prefix), start...) |
||||||
|
} |
||||||
|
// If no limit was specified, use the first element not matching the prefix
|
||||||
|
// as the limit
|
||||||
|
if limit == nil { |
||||||
|
limit = []byte(t.prefix) |
||||||
|
for i := len(limit) - 1; i >= 0; i-- { |
||||||
|
// Bump the current character, stopping if it doesn't overflow
|
||||||
|
limit[i]++ |
||||||
|
if limit[i] > 0 { |
||||||
|
break |
||||||
|
} |
||||||
|
// Character overflown, proceed to the next or nil if the last
|
||||||
|
if i == 0 { |
||||||
|
limit = nil |
||||||
|
} |
||||||
|
} |
||||||
|
} else { |
||||||
|
limit = append([]byte(t.prefix), limit...) |
||||||
|
} |
||||||
|
// Range correctly calculated based on table prefix, delegate down
|
||||||
|
return t.db.Compact(start, limit) |
||||||
|
} |
||||||
|
|
||||||
|
// NewBatch creates a write-only database that buffers changes to its host db
|
||||||
|
// until a final write is called, each operation prefixing all keys with the
|
||||||
|
// pre-configured string.
|
||||||
|
func (t *table) NewBatch() ethdb.Batch { |
||||||
|
return &tableBatch{t.db.NewBatch(), t.prefix} |
||||||
|
} |
||||||
|
|
||||||
|
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
|
||||||
|
func (t *table) NewBatchWithSize(size int) ethdb.Batch { |
||||||
|
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix} |
||||||
|
} |
||||||
|
|
||||||
|
// NewSnapshot creates a database snapshot based on the current state.
|
||||||
|
// The created snapshot will not be affected by all following mutations
|
||||||
|
// happened on the database.
|
||||||
|
func (t *table) NewSnapshot() (ethdb.Snapshot, error) { |
||||||
|
return t.db.NewSnapshot() |
||||||
|
} |
||||||
|
|
||||||
|
// tableBatch is a wrapper around a database batch that prefixes each key access
|
||||||
|
// with a pre-configured string.
|
||||||
|
type tableBatch struct { |
||||||
|
batch ethdb.Batch |
||||||
|
prefix string |
||||||
|
} |
||||||
|
|
||||||
|
// Put inserts the given value into the batch for later committing.
|
||||||
|
func (b *tableBatch) Put(key, value []byte) error { |
||||||
|
return b.batch.Put(append([]byte(b.prefix), key...), value) |
||||||
|
} |
||||||
|
|
||||||
|
// Delete inserts the a key removal into the batch for later committing.
|
||||||
|
func (b *tableBatch) Delete(key []byte) error { |
||||||
|
return b.batch.Delete(append([]byte(b.prefix), key...)) |
||||||
|
} |
||||||
|
|
||||||
|
// ValueSize retrieves the amount of data queued up for writing.
|
||||||
|
func (b *tableBatch) ValueSize() int { |
||||||
|
return b.batch.ValueSize() |
||||||
|
} |
||||||
|
|
||||||
|
// Write flushes any accumulated data to disk.
|
||||||
|
func (b *tableBatch) Write() error { |
||||||
|
return b.batch.Write() |
||||||
|
} |
||||||
|
|
||||||
|
// Reset resets the batch for reuse.
|
||||||
|
func (b *tableBatch) Reset() { |
||||||
|
b.batch.Reset() |
||||||
|
} |
||||||
|
|
||||||
|
// tableReplayer is a wrapper around a batch replayer which truncates
|
||||||
|
// the added prefix.
|
||||||
|
type tableReplayer struct { |
||||||
|
w ethdb.KeyValueWriter |
||||||
|
prefix string |
||||||
|
} |
||||||
|
|
||||||
|
// Put implements the interface KeyValueWriter.
|
||||||
|
func (r *tableReplayer) Put(key []byte, value []byte) error { |
||||||
|
trimmed := key[len(r.prefix):] |
||||||
|
return r.w.Put(trimmed, value) |
||||||
|
} |
||||||
|
|
||||||
|
// Delete implements the interface KeyValueWriter.
|
||||||
|
func (r *tableReplayer) Delete(key []byte) error { |
||||||
|
trimmed := key[len(r.prefix):] |
||||||
|
return r.w.Delete(trimmed) |
||||||
|
} |
||||||
|
|
||||||
|
// Replay replays the batch contents.
|
||||||
|
func (b *tableBatch) Replay(w ethdb.KeyValueWriter) error { |
||||||
|
return b.batch.Replay(&tableReplayer{w: w, prefix: b.prefix}) |
||||||
|
} |
||||||
|
|
||||||
|
// tableIterator is a wrapper around a database iterator that prefixes each key access
|
||||||
|
// with a pre-configured string.
|
||||||
|
type tableIterator struct { |
||||||
|
iter ethdb.Iterator |
||||||
|
prefix string |
||||||
|
} |
||||||
|
|
||||||
|
// Next moves the iterator to the next key/value pair. It returns whether the
|
||||||
|
// iterator is exhausted.
|
||||||
|
func (iter *tableIterator) Next() bool { |
||||||
|
return iter.iter.Next() |
||||||
|
} |
||||||
|
|
||||||
|
// Error returns any accumulated error. Exhausting all the key/value pairs
|
||||||
|
// is not considered to be an error.
|
||||||
|
func (iter *tableIterator) Error() error { |
||||||
|
return iter.iter.Error() |
||||||
|
} |
||||||
|
|
||||||
|
// Key returns the key of the current key/value pair, or nil if done. The caller
|
||||||
|
// should not modify the contents of the returned slice, and its contents may
|
||||||
|
// change on the next call to Next.
|
||||||
|
func (iter *tableIterator) Key() []byte { |
||||||
|
key := iter.iter.Key() |
||||||
|
if key == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return key[len(iter.prefix):] |
||||||
|
} |
||||||
|
|
||||||
|
// Value returns the value of the current key/value pair, or nil if done. The
|
||||||
|
// caller should not modify the contents of the returned slice, and its contents
|
||||||
|
// may change on the next call to Next.
|
||||||
|
func (iter *tableIterator) Value() []byte { |
||||||
|
return iter.iter.Value() |
||||||
|
} |
||||||
|
|
||||||
|
// Release releases associated resources. Release should always succeed and can
|
||||||
|
// be called multiple times without causing error.
|
||||||
|
func (iter *tableIterator) Release() { |
||||||
|
iter.iter.Release() |
||||||
|
} |
@ -0,0 +1,128 @@ |
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"testing" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
) |
||||||
|
|
||||||
|
func TestTableDatabase(t *testing.T) { testTableDatabase(t, "prefix") } |
||||||
|
func TestEmptyPrefixTableDatabase(t *testing.T) { testTableDatabase(t, "") } |
||||||
|
|
||||||
|
type testReplayer struct { |
||||||
|
puts [][]byte |
||||||
|
dels [][]byte |
||||||
|
} |
||||||
|
|
||||||
|
func (r *testReplayer) Put(key []byte, value []byte) error { |
||||||
|
r.puts = append(r.puts, key) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (r *testReplayer) Delete(key []byte) error { |
||||||
|
r.dels = append(r.dels, key) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func testTableDatabase(t *testing.T, prefix string) { |
||||||
|
db := NewTable(NewMemoryDatabase(), prefix) |
||||||
|
|
||||||
|
var entries = []struct { |
||||||
|
key []byte |
||||||
|
value []byte |
||||||
|
}{ |
||||||
|
{[]byte{0x01, 0x02}, []byte{0x0a, 0x0b}}, |
||||||
|
{[]byte{0x03, 0x04}, []byte{0x0c, 0x0d}}, |
||||||
|
{[]byte{0x05, 0x06}, []byte{0x0e, 0x0f}}, |
||||||
|
|
||||||
|
{[]byte{0xff, 0xff, 0x01}, []byte{0x1a, 0x1b}}, |
||||||
|
{[]byte{0xff, 0xff, 0x02}, []byte{0x1c, 0x1d}}, |
||||||
|
{[]byte{0xff, 0xff, 0x03}, []byte{0x1e, 0x1f}}, |
||||||
|
} |
||||||
|
|
||||||
|
// Test Put/Get operation
|
||||||
|
for _, entry := range entries { |
||||||
|
db.Put(entry.key, entry.value) |
||||||
|
} |
||||||
|
for _, entry := range entries { |
||||||
|
got, err := db.Get(entry.key) |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("Failed to get value: %v", err) |
||||||
|
} |
||||||
|
if !bytes.Equal(got, entry.value) { |
||||||
|
t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Test batch operation
|
||||||
|
db = NewTable(NewMemoryDatabase(), prefix) |
||||||
|
batch := db.NewBatch() |
||||||
|
for _, entry := range entries { |
||||||
|
batch.Put(entry.key, entry.value) |
||||||
|
} |
||||||
|
batch.Write() |
||||||
|
for _, entry := range entries { |
||||||
|
got, err := db.Get(entry.key) |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("Failed to get value: %v", err) |
||||||
|
} |
||||||
|
if !bytes.Equal(got, entry.value) { |
||||||
|
t.Fatalf("Value mismatch: want=%v, got=%v", entry.value, got) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Test batch replayer
|
||||||
|
r := &testReplayer{} |
||||||
|
batch.Replay(r) |
||||||
|
for index, entry := range entries { |
||||||
|
got := r.puts[index] |
||||||
|
if !bytes.Equal(got, entry.key) { |
||||||
|
t.Fatalf("Key mismatch: want=%v, got=%v", entry.key, got) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
check := func(iter ethdb.Iterator, expCount, index int) { |
||||||
|
count := 0 |
||||||
|
for iter.Next() { |
||||||
|
key, value := iter.Key(), iter.Value() |
||||||
|
if !bytes.Equal(key, entries[index].key) { |
||||||
|
t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key) |
||||||
|
} |
||||||
|
if !bytes.Equal(value, entries[index].value) { |
||||||
|
t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value) |
||||||
|
} |
||||||
|
index += 1 |
||||||
|
count++ |
||||||
|
} |
||||||
|
if count != expCount { |
||||||
|
t.Fatalf("Wrong number of elems, exp %d got %d", expCount, count) |
||||||
|
} |
||||||
|
iter.Release() |
||||||
|
} |
||||||
|
// Test iterators
|
||||||
|
check(db.NewIterator(nil, nil), 6, 0) |
||||||
|
// Test iterators with prefix
|
||||||
|
check(db.NewIterator([]byte{0xff, 0xff}, nil), 3, 3) |
||||||
|
// Test iterators with start point
|
||||||
|
check(db.NewIterator(nil, []byte{0xff, 0xff, 0x02}), 2, 4) |
||||||
|
// Test iterators with prefix and start point
|
||||||
|
check(db.NewIterator([]byte{0xee}, nil), 0, 0) |
||||||
|
check(db.NewIterator(nil, []byte{0x00}), 6, 0) |
||||||
|
} |
Binary file not shown.
@ -0,0 +1,136 @@ |
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
) |
||||||
|
|
||||||
|
type accessList struct { |
||||||
|
addresses map[common.Address]int |
||||||
|
slots []map[common.Hash]struct{} |
||||||
|
} |
||||||
|
|
||||||
|
// ContainsAddress returns true if the address is in the access list.
|
||||||
|
func (al *accessList) ContainsAddress(address common.Address) bool { |
||||||
|
_, ok := al.addresses[address] |
||||||
|
return ok |
||||||
|
} |
||||||
|
|
||||||
|
// Contains checks if a slot within an account is present in the access list, returning
|
||||||
|
// separate flags for the presence of the account and the slot respectively.
|
||||||
|
func (al *accessList) Contains(address common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { |
||||||
|
idx, ok := al.addresses[address] |
||||||
|
if !ok { |
||||||
|
// no such address (and hence zero slots)
|
||||||
|
return false, false |
||||||
|
} |
||||||
|
if idx == -1 { |
||||||
|
// address yes, but no slots
|
||||||
|
return true, false |
||||||
|
} |
||||||
|
_, slotPresent = al.slots[idx][slot] |
||||||
|
return true, slotPresent |
||||||
|
} |
||||||
|
|
||||||
|
// newAccessList creates a new accessList.
|
||||||
|
func newAccessList() *accessList { |
||||||
|
return &accessList{ |
||||||
|
addresses: make(map[common.Address]int), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Copy creates an independent copy of an accessList.
|
||||||
|
func (a *accessList) Copy() *accessList { |
||||||
|
cp := newAccessList() |
||||||
|
for k, v := range a.addresses { |
||||||
|
cp.addresses[k] = v |
||||||
|
} |
||||||
|
cp.slots = make([]map[common.Hash]struct{}, len(a.slots)) |
||||||
|
for i, slotMap := range a.slots { |
||||||
|
newSlotmap := make(map[common.Hash]struct{}, len(slotMap)) |
||||||
|
for k := range slotMap { |
||||||
|
newSlotmap[k] = struct{}{} |
||||||
|
} |
||||||
|
cp.slots[i] = newSlotmap |
||||||
|
} |
||||||
|
return cp |
||||||
|
} |
||||||
|
|
||||||
|
// AddAddress adds an address to the access list, and returns 'true' if the operation
|
||||||
|
// caused a change (addr was not previously in the list).
|
||||||
|
func (al *accessList) AddAddress(address common.Address) bool { |
||||||
|
if _, present := al.addresses[address]; present { |
||||||
|
return false |
||||||
|
} |
||||||
|
al.addresses[address] = -1 |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// AddSlot adds the specified (addr, slot) combo to the access list.
|
||||||
|
// Return values are:
|
||||||
|
// - address added
|
||||||
|
// - slot added
|
||||||
|
// For any 'true' value returned, a corresponding journal entry must be made.
|
||||||
|
func (al *accessList) AddSlot(address common.Address, slot common.Hash) (addrChange bool, slotChange bool) { |
||||||
|
idx, addrPresent := al.addresses[address] |
||||||
|
if !addrPresent || idx == -1 { |
||||||
|
// Address not present, or addr present but no slots there
|
||||||
|
al.addresses[address] = len(al.slots) |
||||||
|
slotmap := map[common.Hash]struct{}{slot: {}} |
||||||
|
al.slots = append(al.slots, slotmap) |
||||||
|
return !addrPresent, true |
||||||
|
} |
||||||
|
// There is already an (address,slot) mapping
|
||||||
|
slotmap := al.slots[idx] |
||||||
|
if _, ok := slotmap[slot]; !ok { |
||||||
|
slotmap[slot] = struct{}{} |
||||||
|
// Journal add slot change
|
||||||
|
return false, true |
||||||
|
} |
||||||
|
// No changes required
|
||||||
|
return false, false |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteSlot removes an (address, slot)-tuple from the access list.
|
||||||
|
// This operation needs to be performed in the same order as the addition happened.
|
||||||
|
// This method is meant to be used by the journal, which maintains ordering of
|
||||||
|
// operations.
|
||||||
|
func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { |
||||||
|
idx, addrOk := al.addresses[address] |
||||||
|
// There are two ways this can fail
|
||||||
|
if !addrOk { |
||||||
|
panic("reverting slot change, address not present in list") |
||||||
|
} |
||||||
|
slotmap := al.slots[idx] |
||||||
|
delete(slotmap, slot) |
||||||
|
// If that was the last (first) slot, remove it
|
||||||
|
// Since additions and rollbacks are always performed in order,
|
||||||
|
// we can delete the item without worrying about screwing up later indices
|
||||||
|
if len(slotmap) == 0 { |
||||||
|
al.slots = al.slots[:idx] |
||||||
|
al.addresses[address] = -1 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteAddress removes an address from the access list. This operation
|
||||||
|
// needs to be performed in the same order as the addition happened.
|
||||||
|
// This method is meant to be used by the journal, which maintains ordering of
|
||||||
|
// operations.
|
||||||
|
func (al *accessList) DeleteAddress(address common.Address) { |
||||||
|
delete(al.addresses, address) |
||||||
|
} |
@ -0,0 +1,155 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/rlp" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
// NodeIterator is an iterator to traverse the entire state trie post-order,
|
||||||
|
// including all of the contract code and contract state tries.
|
||||||
|
type NodeIterator struct { |
||||||
|
state *DB // State being iterated
|
||||||
|
|
||||||
|
stateIt trie.NodeIterator // Primary iterator for the global state trie
|
||||||
|
dataIt trie.NodeIterator // Secondary iterator for the data trie of a contract
|
||||||
|
|
||||||
|
accountHash common.Hash // Hash of the node containing the account
|
||||||
|
codeHash common.Hash // Hash of the contract source code
|
||||||
|
code []byte // Source code associated with a contract
|
||||||
|
|
||||||
|
Hash common.Hash // Hash of the current entry being iterated (nil if not standalone)
|
||||||
|
Parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
|
||||||
|
|
||||||
|
Error error // Failure set in case of an internal error in the iterator
|
||||||
|
} |
||||||
|
|
||||||
|
// NewNodeIterator creates an post-order state node iterator.
|
||||||
|
func NewNodeIterator(state *DB) *NodeIterator { |
||||||
|
return &NodeIterator{ |
||||||
|
state: state, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Next moves the iterator to the next node, returning whether there are any
|
||||||
|
// further nodes. In case of an internal error this method returns false and
|
||||||
|
// sets the Error field to the encountered failure.
|
||||||
|
func (it *NodeIterator) Next() bool { |
||||||
|
// If the iterator failed previously, don't do anything
|
||||||
|
if it.Error != nil { |
||||||
|
return false |
||||||
|
} |
||||||
|
// Otherwise step forward with the iterator and report any errors
|
||||||
|
if err := it.step(); err != nil { |
||||||
|
it.Error = err |
||||||
|
return false |
||||||
|
} |
||||||
|
return it.retrieve() |
||||||
|
} |
||||||
|
|
||||||
|
// step moves the iterator to the next entry of the state trie.
|
||||||
|
func (it *NodeIterator) step() error { |
||||||
|
// Abort if we reached the end of the iteration
|
||||||
|
if it.state == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
// Initialize the iterator if we've just started
|
||||||
|
if it.stateIt == nil { |
||||||
|
it.stateIt = it.state.trie.NodeIterator(nil) |
||||||
|
} |
||||||
|
// If we had data nodes previously, we surely have at least state nodes
|
||||||
|
if it.dataIt != nil { |
||||||
|
if cont := it.dataIt.Next(true); !cont { |
||||||
|
if it.dataIt.Error() != nil { |
||||||
|
return it.dataIt.Error() |
||||||
|
} |
||||||
|
it.dataIt = nil |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
// If we had source code previously, discard that
|
||||||
|
if it.code != nil { |
||||||
|
it.code = nil |
||||||
|
return nil |
||||||
|
} |
||||||
|
// Step to the next state trie node, terminating if we're out of nodes
|
||||||
|
if cont := it.stateIt.Next(true); !cont { |
||||||
|
if it.stateIt.Error() != nil { |
||||||
|
return it.stateIt.Error() |
||||||
|
} |
||||||
|
it.state, it.stateIt = nil, nil |
||||||
|
return nil |
||||||
|
} |
||||||
|
// If the state trie node is an internal entry, leave as is
|
||||||
|
if !it.stateIt.Leaf() { |
||||||
|
return nil |
||||||
|
} |
||||||
|
// Otherwise we've reached an account node, initiate data iteration
|
||||||
|
var account Account |
||||||
|
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
it.dataIt = dataTrie.NodeIterator(nil) |
||||||
|
if !it.dataIt.Next(true) { |
||||||
|
it.dataIt = nil |
||||||
|
} |
||||||
|
if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { |
||||||
|
it.codeHash = common.BytesToHash(account.CodeHash) |
||||||
|
addrHash := common.BytesToHash(it.stateIt.LeafKey()) |
||||||
|
it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash)) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("code %x: %v", account.CodeHash, err) |
||||||
|
} |
||||||
|
} |
||||||
|
it.accountHash = it.stateIt.Parent() |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// retrieve pulls and caches the current state entry the iterator is traversing.
|
||||||
|
// The method returns whether there are any more data left for inspection.
|
||||||
|
func (it *NodeIterator) retrieve() bool { |
||||||
|
// Clear out any previously set values
|
||||||
|
it.Hash = common.Hash{} |
||||||
|
|
||||||
|
// If the iteration's done, return no available data
|
||||||
|
if it.state == nil { |
||||||
|
return false |
||||||
|
} |
||||||
|
// Otherwise retrieve the current entry
|
||||||
|
switch { |
||||||
|
case it.dataIt != nil: |
||||||
|
it.Hash, it.Parent = it.dataIt.Hash(), it.dataIt.Parent() |
||||||
|
if it.Parent == (common.Hash{}) { |
||||||
|
it.Parent = it.accountHash |
||||||
|
} |
||||||
|
case it.code != nil: |
||||||
|
it.Hash, it.Parent = it.codeHash, it.accountHash |
||||||
|
case it.stateIt != nil: |
||||||
|
it.Hash, it.Parent = it.stateIt.Hash(), it.stateIt.Parent() |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
@ -0,0 +1,142 @@ |
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"math/big" |
||||||
|
"testing" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/crypto" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/harmony-one/harmony/core/rawdb" |
||||||
|
) |
||||||
|
|
||||||
|
// testAccount is the data associated with an account used by the state tests.
|
||||||
|
type testAccount struct { |
||||||
|
address common.Address |
||||||
|
balance *big.Int |
||||||
|
nonce uint64 |
||||||
|
code []byte |
||||||
|
} |
||||||
|
|
||||||
|
// makeTestState create a sample test state to test node-wise reconstruction.
|
||||||
|
func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { |
||||||
|
// Create an empty state
|
||||||
|
db := rawdb.NewMemoryDatabase() |
||||||
|
sdb := NewDatabase(db) |
||||||
|
state, _ := New(common.Hash{}, sdb, nil) |
||||||
|
|
||||||
|
// Fill it with some arbitrary data
|
||||||
|
var accounts []*testAccount |
||||||
|
for i := byte(0); i < 96; i++ { |
||||||
|
obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i})) |
||||||
|
acc := &testAccount{address: common.BytesToAddress([]byte{i})} |
||||||
|
|
||||||
|
obj.AddBalance(big.NewInt(int64(11 * i))) |
||||||
|
acc.balance = big.NewInt(int64(11 * i)) |
||||||
|
|
||||||
|
obj.SetNonce(uint64(42 * i)) |
||||||
|
acc.nonce = uint64(42 * i) |
||||||
|
|
||||||
|
if i%3 == 0 { |
||||||
|
obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}) |
||||||
|
acc.code = []byte{i, i, i, i, i} |
||||||
|
} |
||||||
|
if i%5 == 0 { |
||||||
|
for j := byte(0); j < 5; j++ { |
||||||
|
hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) |
||||||
|
obj.SetState(sdb, hash, hash) |
||||||
|
} |
||||||
|
} |
||||||
|
state.updateStateObject(obj) |
||||||
|
accounts = append(accounts, acc) |
||||||
|
} |
||||||
|
root, _ := state.Commit(false) |
||||||
|
|
||||||
|
// Return the generated state
|
||||||
|
return db, sdb, root, accounts |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that the node iterator indeed walks over the entire database contents.
|
||||||
|
func TestNodeIteratorCoverage(t *testing.T) { |
||||||
|
// Create some arbitrary test state to iterate
|
||||||
|
db, sdb, root, _ := makeTestState() |
||||||
|
sdb.TrieDB().Commit(root, false) |
||||||
|
|
||||||
|
state, err := New(root, sdb, nil) |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("failed to create state trie at %x: %v", root, err) |
||||||
|
} |
||||||
|
// Gather all the node hashes found by the iterator
|
||||||
|
hashes := make(map[common.Hash]struct{}) |
||||||
|
for it := NewNodeIterator(state); it.Next(); { |
||||||
|
if it.Hash != (common.Hash{}) { |
||||||
|
hashes[it.Hash] = struct{}{} |
||||||
|
} |
||||||
|
} |
||||||
|
// Check in-disk nodes
|
||||||
|
var ( |
||||||
|
seenNodes = make(map[common.Hash]struct{}) |
||||||
|
seenCodes = make(map[common.Hash]struct{}) |
||||||
|
) |
||||||
|
it := db.NewIterator(nil, nil) |
||||||
|
for it.Next() { |
||||||
|
ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value()) |
||||||
|
if !ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
seenNodes[hash] = struct{}{} |
||||||
|
} |
||||||
|
it.Release() |
||||||
|
|
||||||
|
// Check in-disk codes
|
||||||
|
it = db.NewIterator(nil, nil) |
||||||
|
for it.Next() { |
||||||
|
ok, hash := rawdb.IsCodeKey(it.Key()) |
||||||
|
if !ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
if _, ok := hashes[common.BytesToHash(hash)]; !ok { |
||||||
|
t.Errorf("state entry not reported %x", it.Key()) |
||||||
|
} |
||||||
|
seenCodes[common.BytesToHash(hash)] = struct{}{} |
||||||
|
} |
||||||
|
it.Release() |
||||||
|
|
||||||
|
// Cross check the iterated hashes and the database/nodepool content
|
||||||
|
for hash := range hashes { |
||||||
|
_, ok := seenNodes[hash] |
||||||
|
if !ok { |
||||||
|
_, ok = seenCodes[hash] |
||||||
|
} |
||||||
|
if !ok { |
||||||
|
t.Errorf("failed to retrieve reported node %x", hash) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// isTrieNode is a helper function which reports if the provided
|
||||||
|
// database entry belongs to a trie node or not.
|
||||||
|
func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) { |
||||||
|
if scheme == rawdb.HashScheme { |
||||||
|
if len(key) == common.HashLength { |
||||||
|
return true, common.BytesToHash(key) |
||||||
|
} |
||||||
|
} |
||||||
|
return false, common.Hash{} |
||||||
|
} |
@ -0,0 +1,30 @@ |
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/metrics" |
||||||
|
|
||||||
|
var ( |
||||||
|
accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) |
||||||
|
storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) |
||||||
|
accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) |
||||||
|
storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) |
||||||
|
accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil) |
||||||
|
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) |
||||||
|
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) |
||||||
|
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) |
||||||
|
) |
@ -0,0 +1,46 @@ |
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"testing" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
) |
||||||
|
|
||||||
|
func BenchmarkCutOriginal(b *testing.B) { |
||||||
|
value := common.HexToHash("0x01") |
||||||
|
for i := 0; i < b.N; i++ { |
||||||
|
bytes.TrimLeft(value[:], "\x00") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func BenchmarkCutsetterFn(b *testing.B) { |
||||||
|
value := common.HexToHash("0x01") |
||||||
|
cutSetFn := func(r rune) bool { return r == 0 } |
||||||
|
for i := 0; i < b.N; i++ { |
||||||
|
bytes.TrimLeftFunc(value[:], cutSetFn) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func BenchmarkCutCustomTrim(b *testing.B) { |
||||||
|
value := common.HexToHash("0x01") |
||||||
|
for i := 0; i < b.N; i++ { |
||||||
|
common.TrimLeftZeroes(value[:]) |
||||||
|
} |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,56 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/rlp" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
// NewStateSync create a new state trie download scheduler.
|
||||||
|
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync { |
||||||
|
// Register the storage slot callback if the external callback is specified.
|
||||||
|
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error |
||||||
|
if onLeaf != nil { |
||||||
|
onSlot = func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error { |
||||||
|
return onLeaf(keys, leaf) |
||||||
|
} |
||||||
|
} |
||||||
|
// Register the account callback to connect the state trie and the storage
|
||||||
|
// trie belongs to the contract.
|
||||||
|
var syncer *trie.Sync |
||||||
|
onAccount := func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error { |
||||||
|
if onLeaf != nil { |
||||||
|
if err := onLeaf(keys, leaf); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
var obj Account |
||||||
|
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
syncer.AddSubTrie(obj.Root, path, parent, parentPath, onSlot) |
||||||
|
syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent, parentPath) |
||||||
|
return nil |
||||||
|
} |
||||||
|
syncer = trie.NewSync(root, database, onAccount, scheme) |
||||||
|
return syncer |
||||||
|
} |
@ -0,0 +1,55 @@ |
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
) |
||||||
|
|
||||||
|
// transientStorage is a representation of EIP-1153 "Transient Storage".
|
||||||
|
type transientStorage map[common.Address]Storage |
||||||
|
|
||||||
|
// newTransientStorage creates a new instance of a transientStorage.
|
||||||
|
func newTransientStorage() transientStorage { |
||||||
|
return make(transientStorage) |
||||||
|
} |
||||||
|
|
||||||
|
// Set sets the transient-storage `value` for `key` at the given `addr`.
|
||||||
|
func (t transientStorage) Set(addr common.Address, key, value common.Hash) { |
||||||
|
if _, ok := t[addr]; !ok { |
||||||
|
t[addr] = make(Storage) |
||||||
|
} |
||||||
|
t[addr][key] = value |
||||||
|
} |
||||||
|
|
||||||
|
// Get gets the transient storage for `key` at the given `addr`.
|
||||||
|
func (t transientStorage) Get(addr common.Address, key common.Hash) common.Hash { |
||||||
|
val, ok := t[addr] |
||||||
|
if !ok { |
||||||
|
return common.Hash{} |
||||||
|
} |
||||||
|
return val[key] |
||||||
|
} |
||||||
|
|
||||||
|
// Copy does a deep copy of the transientStorage
|
||||||
|
func (t transientStorage) Copy() transientStorage { |
||||||
|
storage := make(transientStorage) |
||||||
|
for key, value := range t { |
||||||
|
storage[key] = value.Copy() |
||||||
|
} |
||||||
|
return storage |
||||||
|
} |
@ -0,0 +1,354 @@ |
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"sync" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/metrics" |
||||||
|
"github.com/harmony-one/harmony/internal/utils" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
// triePrefetchMetricsPrefix is the prefix under which to publish the metrics.
|
||||||
|
triePrefetchMetricsPrefix = "trie/prefetch/" |
||||||
|
) |
||||||
|
|
||||||
|
// triePrefetcher is an active prefetcher, which receives accounts or storage
|
||||||
|
// items and does trie-loading of them. The goal is to get as much useful content
|
||||||
|
// into the caches as possible.
|
||||||
|
//
|
||||||
|
// Note, the prefetcher's API is not thread safe.
|
||||||
|
type triePrefetcher struct { |
||||||
|
db Database // Database to fetch trie nodes through
|
||||||
|
root common.Hash // Root hash of the account trie for metrics
|
||||||
|
fetches map[string]Trie // Partially or fully fetcher tries
|
||||||
|
fetchers map[string]*subfetcher // Subfetchers for each trie
|
||||||
|
|
||||||
|
deliveryMissMeter metrics.Meter |
||||||
|
accountLoadMeter metrics.Meter |
||||||
|
accountDupMeter metrics.Meter |
||||||
|
accountSkipMeter metrics.Meter |
||||||
|
accountWasteMeter metrics.Meter |
||||||
|
storageLoadMeter metrics.Meter |
||||||
|
storageDupMeter metrics.Meter |
||||||
|
storageSkipMeter metrics.Meter |
||||||
|
storageWasteMeter metrics.Meter |
||||||
|
} |
||||||
|
|
||||||
|
func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { |
||||||
|
prefix := triePrefetchMetricsPrefix + namespace |
||||||
|
p := &triePrefetcher{ |
||||||
|
db: db, |
||||||
|
root: root, |
||||||
|
fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map
|
||||||
|
|
||||||
|
deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), |
||||||
|
accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), |
||||||
|
accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), |
||||||
|
accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), |
||||||
|
accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), |
||||||
|
storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), |
||||||
|
storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), |
||||||
|
storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), |
||||||
|
storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), |
||||||
|
} |
||||||
|
return p |
||||||
|
} |
||||||
|
|
||||||
|
// close iterates over all the subfetchers, aborts any that were left spinning
|
||||||
|
// and reports the stats to the metrics subsystem.
|
||||||
|
func (p *triePrefetcher) close() { |
||||||
|
for _, fetcher := range p.fetchers { |
||||||
|
fetcher.abort() // safe to do multiple times
|
||||||
|
|
||||||
|
if metrics.Enabled { |
||||||
|
if fetcher.root == p.root { |
||||||
|
p.accountLoadMeter.Mark(int64(len(fetcher.seen))) |
||||||
|
p.accountDupMeter.Mark(int64(fetcher.dups)) |
||||||
|
p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) |
||||||
|
|
||||||
|
for _, key := range fetcher.used { |
||||||
|
delete(fetcher.seen, string(key)) |
||||||
|
} |
||||||
|
p.accountWasteMeter.Mark(int64(len(fetcher.seen))) |
||||||
|
} else { |
||||||
|
p.storageLoadMeter.Mark(int64(len(fetcher.seen))) |
||||||
|
p.storageDupMeter.Mark(int64(fetcher.dups)) |
||||||
|
p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) |
||||||
|
|
||||||
|
for _, key := range fetcher.used { |
||||||
|
delete(fetcher.seen, string(key)) |
||||||
|
} |
||||||
|
p.storageWasteMeter.Mark(int64(len(fetcher.seen))) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// Clear out all fetchers (will crash on a second call, deliberate)
|
||||||
|
p.fetchers = nil |
||||||
|
} |
||||||
|
|
||||||
|
// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data
|
||||||
|
// already loaded will be copied over, but no goroutines will be started. This
|
||||||
|
// is mostly used in the miner which creates a copy of it's actively mutated
|
||||||
|
// state to be sealed while it may further mutate the state.
|
||||||
|
func (p *triePrefetcher) copy() *triePrefetcher { |
||||||
|
copy := &triePrefetcher{ |
||||||
|
db: p.db, |
||||||
|
root: p.root, |
||||||
|
fetches: make(map[string]Trie), // Active prefetchers use the fetches map
|
||||||
|
|
||||||
|
deliveryMissMeter: p.deliveryMissMeter, |
||||||
|
accountLoadMeter: p.accountLoadMeter, |
||||||
|
accountDupMeter: p.accountDupMeter, |
||||||
|
accountSkipMeter: p.accountSkipMeter, |
||||||
|
accountWasteMeter: p.accountWasteMeter, |
||||||
|
storageLoadMeter: p.storageLoadMeter, |
||||||
|
storageDupMeter: p.storageDupMeter, |
||||||
|
storageSkipMeter: p.storageSkipMeter, |
||||||
|
storageWasteMeter: p.storageWasteMeter, |
||||||
|
} |
||||||
|
// If the prefetcher is already a copy, duplicate the data
|
||||||
|
if p.fetches != nil { |
||||||
|
for root, fetch := range p.fetches { |
||||||
|
if fetch == nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
copy.fetches[root] = p.db.CopyTrie(fetch) |
||||||
|
} |
||||||
|
return copy |
||||||
|
} |
||||||
|
// Otherwise we're copying an active fetcher, retrieve the current states
|
||||||
|
for id, fetcher := range p.fetchers { |
||||||
|
copy.fetches[id] = fetcher.peek() |
||||||
|
} |
||||||
|
return copy |
||||||
|
} |
||||||
|
|
||||||
|
// prefetch schedules a batch of trie items to prefetch.
|
||||||
|
func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) { |
||||||
|
// If the prefetcher is an inactive one, bail out
|
||||||
|
if p.fetches != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
// Active fetcher, schedule the retrievals
|
||||||
|
id := p.trieID(owner, root) |
||||||
|
fetcher := p.fetchers[id] |
||||||
|
if fetcher == nil { |
||||||
|
fetcher = newSubfetcher(p.db, p.root, owner, root) |
||||||
|
p.fetchers[id] = fetcher |
||||||
|
} |
||||||
|
fetcher.schedule(keys) |
||||||
|
} |
||||||
|
|
||||||
|
// trie returns the trie matching the root hash, or nil if the prefetcher doesn't
|
||||||
|
// have it.
|
||||||
|
func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { |
||||||
|
// If the prefetcher is inactive, return from existing deep copies
|
||||||
|
id := p.trieID(owner, root) |
||||||
|
if p.fetches != nil { |
||||||
|
trie := p.fetches[id] |
||||||
|
if trie == nil { |
||||||
|
p.deliveryMissMeter.Mark(1) |
||||||
|
return nil |
||||||
|
} |
||||||
|
return p.db.CopyTrie(trie) |
||||||
|
} |
||||||
|
// Otherwise the prefetcher is active, bail if no trie was prefetched for this root
|
||||||
|
fetcher := p.fetchers[id] |
||||||
|
if fetcher == nil { |
||||||
|
p.deliveryMissMeter.Mark(1) |
||||||
|
return nil |
||||||
|
} |
||||||
|
// Interrupt the prefetcher if it's by any chance still running and return
|
||||||
|
// a copy of any pre-loaded trie.
|
||||||
|
fetcher.abort() // safe to do multiple times
|
||||||
|
|
||||||
|
trie := fetcher.peek() |
||||||
|
if trie == nil { |
||||||
|
p.deliveryMissMeter.Mark(1) |
||||||
|
return nil |
||||||
|
} |
||||||
|
return trie |
||||||
|
} |
||||||
|
|
||||||
|
// used marks a batch of state items used to allow creating statistics as to
|
||||||
|
// how useful or wasteful the prefetcher is.
|
||||||
|
func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { |
||||||
|
if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { |
||||||
|
fetcher.used = used |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// trieID returns an unique trie identifier consists the trie owner and root hash.
|
||||||
|
func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { |
||||||
|
return string(append(owner.Bytes(), root.Bytes()...)) |
||||||
|
} |
||||||
|
|
||||||
|
// subfetcher is a trie fetcher goroutine responsible for pulling entries for a
|
||||||
|
// single trie. It is spawned when a new root is encountered and lives until the
|
||||||
|
// main prefetcher is paused and either all requested items are processed or if
|
||||||
|
// the trie being worked on is retrieved from the prefetcher.
|
||||||
|
type subfetcher struct { |
||||||
|
db Database // Database to load trie nodes through
|
||||||
|
state common.Hash // Root hash of the state to prefetch
|
||||||
|
owner common.Hash // Owner of the trie, usually account hash
|
||||||
|
root common.Hash // Root hash of the trie to prefetch
|
||||||
|
trie Trie // Trie being populated with nodes
|
||||||
|
|
||||||
|
tasks [][]byte // Items queued up for retrieval
|
||||||
|
lock sync.Mutex // Lock protecting the task queue
|
||||||
|
|
||||||
|
wake chan struct{} // Wake channel if a new task is scheduled
|
||||||
|
stop chan struct{} // Channel to interrupt processing
|
||||||
|
term chan struct{} // Channel to signal interruption
|
||||||
|
copy chan chan Trie // Channel to request a copy of the current trie
|
||||||
|
|
||||||
|
seen map[string]struct{} // Tracks the entries already loaded
|
||||||
|
dups int // Number of duplicate preload tasks
|
||||||
|
used [][]byte // Tracks the entries used in the end
|
||||||
|
} |
||||||
|
|
||||||
|
// newSubfetcher creates a goroutine to prefetch state items belonging to a
|
||||||
|
// particular root hash.
|
||||||
|
func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash) *subfetcher { |
||||||
|
sf := &subfetcher{ |
||||||
|
db: db, |
||||||
|
state: state, |
||||||
|
owner: owner, |
||||||
|
root: root, |
||||||
|
wake: make(chan struct{}, 1), |
||||||
|
stop: make(chan struct{}), |
||||||
|
term: make(chan struct{}), |
||||||
|
copy: make(chan chan Trie), |
||||||
|
seen: make(map[string]struct{}), |
||||||
|
} |
||||||
|
go sf.loop() |
||||||
|
return sf |
||||||
|
} |
||||||
|
|
||||||
|
// schedule adds a batch of trie keys to the queue to prefetch.
|
||||||
|
func (sf *subfetcher) schedule(keys [][]byte) { |
||||||
|
// Append the tasks to the current queue
|
||||||
|
sf.lock.Lock() |
||||||
|
sf.tasks = append(sf.tasks, keys...) |
||||||
|
sf.lock.Unlock() |
||||||
|
|
||||||
|
// Notify the prefetcher, it's fine if it's already terminated
|
||||||
|
select { |
||||||
|
case sf.wake <- struct{}{}: |
||||||
|
default: |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it
|
||||||
|
// is currently.
|
||||||
|
func (sf *subfetcher) peek() Trie { |
||||||
|
ch := make(chan Trie) |
||||||
|
select { |
||||||
|
case sf.copy <- ch: |
||||||
|
// Subfetcher still alive, return copy from it
|
||||||
|
return <-ch |
||||||
|
|
||||||
|
case <-sf.term: |
||||||
|
// Subfetcher already terminated, return a copy directly
|
||||||
|
if sf.trie == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return sf.db.CopyTrie(sf.trie) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// abort interrupts the subfetcher immediately. It is safe to call abort multiple
|
||||||
|
// times but it is not thread safe.
|
||||||
|
func (sf *subfetcher) abort() { |
||||||
|
select { |
||||||
|
case <-sf.stop: |
||||||
|
default: |
||||||
|
close(sf.stop) |
||||||
|
} |
||||||
|
<-sf.term |
||||||
|
} |
||||||
|
|
||||||
|
// loop waits for new tasks to be scheduled and keeps loading them until it runs
|
||||||
|
// out of tasks or its underlying trie is retrieved for committing.
|
||||||
|
func (sf *subfetcher) loop() { |
||||||
|
// No matter how the loop stops, signal anyone waiting that it's terminated
|
||||||
|
defer close(sf.term) |
||||||
|
|
||||||
|
// Start by opening the trie and stop processing if it fails
|
||||||
|
if sf.owner == (common.Hash{}) { |
||||||
|
trie, err := sf.db.OpenTrie(sf.root) |
||||||
|
if err != nil { |
||||||
|
utils.Logger().Warn().Err(err).Interface("root", sf.root).Msg("Trie prefetcher failed opening trie") |
||||||
|
return |
||||||
|
} |
||||||
|
sf.trie = trie |
||||||
|
} else { |
||||||
|
trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) |
||||||
|
if err != nil { |
||||||
|
utils.Logger().Warn().Err(err).Interface("root", sf.root).Msg("Trie prefetcher failed opening trie") |
||||||
|
return |
||||||
|
} |
||||||
|
sf.trie = trie |
||||||
|
} |
||||||
|
// Trie opened successfully, keep prefetching items
|
||||||
|
for { |
||||||
|
select { |
||||||
|
case <-sf.wake: |
||||||
|
// Subfetcher was woken up, retrieve any tasks to avoid spinning the lock
|
||||||
|
sf.lock.Lock() |
||||||
|
tasks := sf.tasks |
||||||
|
sf.tasks = nil |
||||||
|
sf.lock.Unlock() |
||||||
|
|
||||||
|
// Prefetch any tasks until the loop is interrupted
|
||||||
|
for i, task := range tasks { |
||||||
|
select { |
||||||
|
case <-sf.stop: |
||||||
|
// If termination is requested, add any leftover back and return
|
||||||
|
sf.lock.Lock() |
||||||
|
sf.tasks = append(sf.tasks, tasks[i:]...) |
||||||
|
sf.lock.Unlock() |
||||||
|
return |
||||||
|
|
||||||
|
case ch := <-sf.copy: |
||||||
|
// Somebody wants a copy of the current trie, grant them
|
||||||
|
ch <- sf.db.CopyTrie(sf.trie) |
||||||
|
|
||||||
|
default: |
||||||
|
// No termination request yet, prefetch the next entry
|
||||||
|
if _, ok := sf.seen[string(task)]; ok { |
||||||
|
sf.dups++ |
||||||
|
} else { |
||||||
|
sf.trie.TryGet(task) |
||||||
|
sf.seen[string(task)] = struct{}{} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
case ch := <-sf.copy: |
||||||
|
// Somebody wants a copy of the current trie, grant them
|
||||||
|
ch <- sf.db.CopyTrie(sf.trie) |
||||||
|
|
||||||
|
case <-sf.stop: |
||||||
|
// Termination is requested, abort and leave remaining tasks
|
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,110 @@ |
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state |
||||||
|
|
||||||
|
import ( |
||||||
|
"math/big" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/harmony-one/harmony/core/rawdb" |
||||||
|
) |
||||||
|
|
||||||
|
func filledStateDB() *DB { |
||||||
|
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) |
||||||
|
|
||||||
|
// Create an account and check if the retrieved balance is correct
|
||||||
|
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") |
||||||
|
skey := common.HexToHash("aaa") |
||||||
|
sval := common.HexToHash("bbb") |
||||||
|
|
||||||
|
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
||||||
|
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||||
|
state.SetState(addr, skey, sval) // Change the storage trie
|
||||||
|
for i := 0; i < 100; i++ { |
||||||
|
sk := common.BigToHash(big.NewInt(int64(i))) |
||||||
|
state.SetState(addr, sk, sk) // Change the storage trie
|
||||||
|
} |
||||||
|
return state |
||||||
|
} |
||||||
|
|
||||||
|
func TestCopyAndClose(t *testing.T) { |
||||||
|
db := filledStateDB() |
||||||
|
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") |
||||||
|
skey := common.HexToHash("aaa") |
||||||
|
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
time.Sleep(1 * time.Second) |
||||||
|
a := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||||
|
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
b := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||||
|
cpy := prefetcher.copy() |
||||||
|
cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
c := cpy.trie(common.Hash{}, db.originalRoot) |
||||||
|
prefetcher.close() |
||||||
|
cpy2 := cpy.copy() |
||||||
|
cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
d := cpy2.trie(common.Hash{}, db.originalRoot) |
||||||
|
cpy.close() |
||||||
|
cpy2.close() |
||||||
|
if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() { |
||||||
|
t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash()) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func TestUseAfterClose(t *testing.T) { |
||||||
|
db := filledStateDB() |
||||||
|
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") |
||||||
|
skey := common.HexToHash("aaa") |
||||||
|
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
a := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||||
|
prefetcher.close() |
||||||
|
b := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||||
|
if a == nil { |
||||||
|
t.Fatal("Prefetching before close should not return nil") |
||||||
|
} |
||||||
|
if b != nil { |
||||||
|
t.Fatal("Trie after close should return nil") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func TestCopyClose(t *testing.T) { |
||||||
|
db := filledStateDB() |
||||||
|
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") |
||||||
|
skey := common.HexToHash("aaa") |
||||||
|
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) |
||||||
|
cpy := prefetcher.copy() |
||||||
|
a := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||||
|
b := cpy.trie(common.Hash{}, db.originalRoot) |
||||||
|
prefetcher.close() |
||||||
|
c := prefetcher.trie(common.Hash{}, db.originalRoot) |
||||||
|
d := cpy.trie(common.Hash{}, db.originalRoot) |
||||||
|
if a == nil { |
||||||
|
t.Fatal("Prefetching before close should not return nil") |
||||||
|
} |
||||||
|
if b == nil { |
||||||
|
t.Fatal("Copy trie should return nil") |
||||||
|
} |
||||||
|
if c != nil { |
||||||
|
t.Fatal("Trie after close should return nil") |
||||||
|
} |
||||||
|
if d == nil { |
||||||
|
t.Fatal("Copy trie should not return nil") |
||||||
|
} |
||||||
|
} |
Loading…
Reference in new issue