|
|
|
// Copyright 2016 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package state
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"math"
|
|
|
|
"math/big"
|
|
|
|
"math/rand"
|
|
|
|
"reflect"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"testing/quick"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/harmony-one/harmony/core/rawdb"
|
|
|
|
"github.com/harmony-one/harmony/core/types"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Tests that updating a state trie does not leak any database writes prior to
|
|
|
|
// actually committing the state.
|
|
|
|
func TestUpdateLeaks(t *testing.T) {
|
|
|
|
// Create an empty state database
|
|
|
|
db := rawdb.NewMemoryDatabase()
|
|
|
|
state, _ := New(common.Hash{}, NewDatabase(db), nil)
|
|
|
|
|
|
|
|
// Update it with some accounts
|
|
|
|
for i := byte(0); i < 255; i++ {
|
|
|
|
addr := common.BytesToAddress([]byte{i})
|
|
|
|
state.AddBalance(addr, big.NewInt(int64(11*i)))
|
|
|
|
state.SetNonce(addr, uint64(42*i))
|
|
|
|
if i%2 == 0 {
|
|
|
|
state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
|
|
|
|
}
|
|
|
|
if i%3 == 0 {
|
|
|
|
state.SetCode(addr, []byte{i, i, i, i, i}, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
root := state.IntermediateRoot(false)
|
|
|
|
if err := state.Database().TrieDB().Commit(root, false); err != nil {
|
|
|
|
t.Errorf("can not commit trie %v to persistent database", root.Hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that no data was leaked into the database
|
|
|
|
it := db.NewIterator(nil, nil)
|
|
|
|
for it.Next() {
|
|
|
|
t.Errorf("State leaked into database: %x -> %x", it.Key(), it.Value())
|
|
|
|
}
|
|
|
|
it.Release()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that no intermediate state of an object is stored into the database,
|
|
|
|
// only the one right before the commit.
|
|
|
|
func TestIntermediateLeaks(t *testing.T) {
|
|
|
|
// Create two state databases, one transitioning to the final state, the other final from the beginning
|
|
|
|
transDb := rawdb.NewMemoryDatabase()
|
|
|
|
finalDb := rawdb.NewMemoryDatabase()
|
|
|
|
transState, _ := New(common.Hash{}, NewDatabase(transDb), nil)
|
|
|
|
finalState, _ := New(common.Hash{}, NewDatabase(finalDb), nil)
|
|
|
|
|
|
|
|
modify := func(state *DB, addr common.Address, i, tweak byte) {
|
|
|
|
state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak)))
|
|
|
|
state.SetNonce(addr, uint64(42*i+tweak))
|
|
|
|
if i%2 == 0 {
|
|
|
|
state.SetState(addr, common.Hash{i, i, i, 0}, common.Hash{})
|
|
|
|
state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak})
|
|
|
|
}
|
|
|
|
if i%3 == 0 {
|
|
|
|
state.SetCode(addr, []byte{i, i, i, i, i, tweak}, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Modify the transient state.
|
|
|
|
for i := byte(0); i < 255; i++ {
|
|
|
|
modify(transState, common.Address{i}, i, 0)
|
|
|
|
}
|
|
|
|
// Write modifications to trie.
|
|
|
|
transState.IntermediateRoot(false)
|
|
|
|
|
|
|
|
// Overwrite all the data with new values in the transient database.
|
|
|
|
for i := byte(0); i < 255; i++ {
|
|
|
|
modify(transState, common.Address{i}, i, 99)
|
|
|
|
modify(finalState, common.Address{i}, i, 99)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit and cross check the databases.
|
|
|
|
transRoot, err := transState.Commit(false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to commit transition state: %v", err)
|
|
|
|
}
|
|
|
|
if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil {
|
|
|
|
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
finalRoot, err := finalState.Commit(false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to commit final state: %v", err)
|
|
|
|
}
|
|
|
|
if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil {
|
|
|
|
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
it := finalDb.NewIterator(nil, nil)
|
|
|
|
for it.Next() {
|
|
|
|
key, fvalue := it.Key(), it.Value()
|
|
|
|
tvalue, err := transDb.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("entry missing from the transition database: %x -> %x", key, fvalue)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(fvalue, tvalue) {
|
|
|
|
t.Errorf("value mismatch at key %x: %x in transition database, %x in final database", key, tvalue, fvalue)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
it.Release()
|
|
|
|
|
|
|
|
it = transDb.NewIterator(nil, nil)
|
|
|
|
for it.Next() {
|
|
|
|
key, tvalue := it.Key(), it.Value()
|
|
|
|
fvalue, err := finalDb.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("extra entry in the transition database: %x -> %x", key, it.Value())
|
|
|
|
}
|
|
|
|
if !bytes.Equal(fvalue, tvalue) {
|
|
|
|
t.Errorf("value mismatch at key %x: %x in transition database, %x in final database", key, tvalue, fvalue)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestCopy tests that copying a DB object indeed makes the original and
|
|
|
|
// the copy independent of each other. This test is a regression test against
|
|
|
|
// https://github.com/ethereum/go-ethereum/pull/15549.
|
|
|
|
func TestCopy(t *testing.T) {
|
|
|
|
// Create a random state test to copy and modify "independently"
|
|
|
|
orig, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
|
|
|
|
|
|
|
for i := byte(0); i < 255; i++ {
|
|
|
|
obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
|
|
|
obj.AddBalance(big.NewInt(int64(i)))
|
|
|
|
orig.updateStateObject(obj)
|
|
|
|
}
|
|
|
|
orig.Finalise(false)
|
|
|
|
|
|
|
|
// Copy the state
|
|
|
|
copy := orig.Copy()
|
|
|
|
|
|
|
|
// Copy the copy state
|
|
|
|
ccopy := copy.Copy()
|
|
|
|
|
|
|
|
// modify all in memory
|
|
|
|
for i := byte(0); i < 255; i++ {
|
|
|
|
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
|
|
|
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
|
|
|
ccopyObj := ccopy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
|
|
|
|
|
|
|
origObj.AddBalance(big.NewInt(2 * int64(i)))
|
|
|
|
copyObj.AddBalance(big.NewInt(3 * int64(i)))
|
|
|
|
ccopyObj.AddBalance(big.NewInt(4 * int64(i)))
|
|
|
|
|
|
|
|
orig.updateStateObject(origObj)
|
|
|
|
copy.updateStateObject(copyObj)
|
|
|
|
ccopy.updateStateObject(copyObj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finalise the changes on all concurrently
|
|
|
|
finalise := func(wg *sync.WaitGroup, db *DB) {
|
|
|
|
defer wg.Done()
|
|
|
|
db.Finalise(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(3)
|
|
|
|
go finalise(&wg, orig)
|
|
|
|
go finalise(&wg, copy)
|
|
|
|
go finalise(&wg, ccopy)
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Verify that the three states have been updated independently
|
|
|
|
for i := byte(0); i < 255; i++ {
|
|
|
|
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
|
|
|
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
|
|
|
ccopyObj := ccopy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
|
|
|
|
|
|
|
if want := big.NewInt(3 * int64(i)); origObj.Balance().Cmp(want) != 0 {
|
|
|
|
t.Errorf("orig obj %d: balance mismatch: have %v, want %v", i, origObj.Balance(), want)
|
|
|
|
}
|
|
|
|
if want := big.NewInt(4 * int64(i)); copyObj.Balance().Cmp(want) != 0 {
|
|
|
|
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, copyObj.Balance(), want)
|
|
|
|
}
|
|
|
|
if want := big.NewInt(5 * int64(i)); ccopyObj.Balance().Cmp(want) != 0 {
|
|
|
|
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, ccopyObj.Balance(), want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSnapshotRandom(t *testing.T) {
|
|
|
|
config := &quick.Config{MaxCount: 1000}
|
|
|
|
err := quick.Check((*snapshotTest).run, config)
|
|
|
|
if cerr, ok := err.(*quick.CheckError); ok {
|
|
|
|
test := cerr.In[0].(*snapshotTest)
|
|
|
|
t.Errorf("%v:\n%s", test.err, test)
|
|
|
|
} else if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A snapshotTest checks that reverting DB snapshots properly undoes all changes
|
|
|
|
// captured by the snapshot. Instances of this test with pseudorandom content are created
|
|
|
|
// by Generate.
|
|
|
|
//
|
|
|
|
// The test works as follows:
|
|
|
|
//
|
|
|
|
// A new state is created and all actions are applied to it. Several snapshots are taken
|
|
|
|
// in between actions. The test then reverts each snapshot. For each snapshot the actions
|
|
|
|
// leading up to it are replayed on a fresh, empty state. The behaviour of all public
|
|
|
|
// accessor methods on the reverted state must match the return value of the equivalent
|
|
|
|
// methods on the replayed state.
|
|
|
|
type snapshotTest struct {
|
|
|
|
addrs []common.Address // all account addresses
|
|
|
|
actions []testAction // modifications to the state
|
|
|
|
snapshots []int // actions indexes at which snapshot is taken
|
|
|
|
err error // failure details are reported through this field
|
|
|
|
}
|
|
|
|
|
|
|
|
type testAction struct {
|
|
|
|
name string
|
|
|
|
fn func(testAction, *DB)
|
|
|
|
args []int64
|
|
|
|
noAddr bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// newTestAction creates a random action that changes state.
|
|
|
|
func newTestAction(addr common.Address, r *rand.Rand) testAction {
|
|
|
|
actions := []testAction{
|
|
|
|
{
|
|
|
|
name: "SetBalance",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.SetBalance(addr, big.NewInt(a.args[0]))
|
|
|
|
},
|
|
|
|
args: make([]int64, 1),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "AddBalance",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.AddBalance(addr, big.NewInt(a.args[0]))
|
|
|
|
},
|
|
|
|
args: make([]int64, 1),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "SetNonce",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.SetNonce(addr, uint64(a.args[0]))
|
|
|
|
},
|
|
|
|
args: make([]int64, 1),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "SetState",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
var key, val common.Hash
|
|
|
|
binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
|
|
|
|
binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
|
|
|
|
s.SetState(addr, key, val)
|
|
|
|
},
|
|
|
|
args: make([]int64, 2),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "SetCode",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
code := make([]byte, 16)
|
|
|
|
binary.BigEndian.PutUint64(code, uint64(a.args[0]))
|
|
|
|
binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
|
|
|
|
s.SetCode(addr, code, false)
|
|
|
|
},
|
|
|
|
args: make([]int64, 2),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "CreateAccount",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.CreateAccount(addr)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Suicide",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.Suicide(addr)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "AddRefund",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.AddRefund(uint64(a.args[0]))
|
|
|
|
},
|
|
|
|
args: make([]int64, 1),
|
|
|
|
noAddr: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "AddLog",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
data := make([]byte, 2)
|
|
|
|
binary.BigEndian.PutUint16(data, uint16(a.args[0]))
|
|
|
|
s.AddLog(&types.Log{Address: addr, Data: data})
|
|
|
|
},
|
|
|
|
args: make([]int64, 1),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "AddPreimage",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
preimage := []byte{1}
|
|
|
|
hash := common.BytesToHash(preimage)
|
|
|
|
s.AddPreimage(hash, preimage)
|
|
|
|
},
|
|
|
|
args: make([]int64, 1),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "AddAddressToAccessList",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.AddAddressToAccessList(addr)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "AddSlotToAccessList",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
s.AddSlotToAccessList(addr,
|
|
|
|
common.Hash{byte(a.args[0])})
|
|
|
|
},
|
|
|
|
args: make([]int64, 1),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "SetTransientState",
|
|
|
|
fn: func(a testAction, s *DB) {
|
|
|
|
var key, val common.Hash
|
|
|
|
binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
|
|
|
|
binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
|
|
|
|
s.SetTransientState(addr, key, val)
|
|
|
|
},
|
|
|
|
args: make([]int64, 2),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
action := actions[r.Intn(len(actions))]
|
|
|
|
var nameargs []string
|
|
|
|
if !action.noAddr {
|
|
|
|
nameargs = append(nameargs, addr.Hex())
|
|
|
|
}
|
|
|
|
for i := range action.args {
|
|
|
|
action.args[i] = rand.Int63n(100)
|
|
|
|
nameargs = append(nameargs, fmt.Sprint(action.args[i]))
|
|
|
|
}
|
|
|
|
action.name += strings.Join(nameargs, ", ")
|
|
|
|
return action
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate returns a new snapshot test of the given size. All randomness is
|
|
|
|
// derived from r.
|
|
|
|
func (*snapshotTest) Generate(r *rand.Rand, size int) reflect.Value {
|
|
|
|
// Generate random actions.
|
|
|
|
addrs := make([]common.Address, 50)
|
|
|
|
for i := range addrs {
|
|
|
|
addrs[i][0] = byte(i)
|
|
|
|
}
|
|
|
|
actions := make([]testAction, size)
|
|
|
|
for i := range actions {
|
|
|
|
addr := addrs[r.Intn(len(addrs))]
|
|
|
|
actions[i] = newTestAction(addr, r)
|
|
|
|
}
|
|
|
|
// Generate snapshot indexes.
|
|
|
|
nsnapshots := int(math.Sqrt(float64(size)))
|
|
|
|
if size > 0 && nsnapshots == 0 {
|
|
|
|
nsnapshots = 1
|
|
|
|
}
|
|
|
|
snapshots := make([]int, nsnapshots)
|
|
|
|
snaplen := len(actions) / nsnapshots
|
|
|
|
for i := range snapshots {
|
|
|
|
// Try to place the snapshots some number of actions apart from each other.
|
|
|
|
snapshots[i] = (i * snaplen) + r.Intn(snaplen)
|
|
|
|
}
|
|
|
|
return reflect.ValueOf(&snapshotTest{addrs, actions, snapshots, nil})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (test *snapshotTest) String() string {
|
|
|
|
out := new(bytes.Buffer)
|
|
|
|
sindex := 0
|
|
|
|
for i, action := range test.actions {
|
|
|
|
if len(test.snapshots) > sindex && i == test.snapshots[sindex] {
|
|
|
|
fmt.Fprintf(out, "---- snapshot %d ----\n", sindex)
|
|
|
|
sindex++
|
|
|
|
}
|
|
|
|
fmt.Fprintf(out, "%4d: %s\n", i, action.name)
|
|
|
|
}
|
|
|
|
return out.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (test *snapshotTest) run() bool {
|
|
|
|
// Run all actions and create snapshots.
|
|
|
|
var (
|
|
|
|
state, _ = New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
|
|
|
snapshotRevs = make([]int, len(test.snapshots))
|
|
|
|
sindex = 0
|
|
|
|
)
|
|
|
|
for i, action := range test.actions {
|
|
|
|
if len(test.snapshots) > sindex && i == test.snapshots[sindex] {
|
|
|
|
snapshotRevs[sindex] = state.Snapshot()
|
|
|
|
sindex++
|
|
|
|
}
|
|
|
|
action.fn(action, state)
|
|
|
|
}
|
|
|
|
// Revert all snapshots in reverse order. Each revert must yield a state
|
|
|
|
// that is equivalent to fresh state with all actions up the snapshot applied.
|
|
|
|
for sindex--; sindex >= 0; sindex-- {
|
|
|
|
checkstate, _ := New(common.Hash{}, state.Database(), nil)
|
|
|
|
for _, action := range test.actions[:test.snapshots[sindex]] {
|
|
|
|
action.fn(action, checkstate)
|
|
|
|
}
|
|
|
|
state.RevertToSnapshot(snapshotRevs[sindex])
|
|
|
|
if err := test.checkEqual(state, checkstate); err != nil {
|
|
|
|
test.err = fmt.Errorf("state mismatch after revert to snapshot %d\n%v", sindex, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkEqual checks that methods of state and checkstate return the same values.
|
|
|
|
func (test *snapshotTest) checkEqual(state, checkstate *DB) error {
|
|
|
|
for _, addr := range test.addrs {
|
|
|
|
var err error
|
|
|
|
checkeq := func(op string, a, b interface{}) bool {
|
|
|
|
if err == nil && !reflect.DeepEqual(a, b) {
|
|
|
|
err = fmt.Errorf("got %s(%s) == %v, want %v", op, addr.Hex(), a, b)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// Check basic accessor methods.
|
|
|
|
checkeq("Exist", state.Exist(addr), checkstate.Exist(addr))
|
|
|
|
checkeq("HasSuicided", state.HasSuicided(addr), checkstate.HasSuicided(addr))
|
|
|
|
checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr))
|
|
|
|
checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr))
|
|
|
|
checkeq("GetCode", state.GetCode(addr, false), checkstate.GetCode(addr, false))
|
|
|
|
checkeq("GetCodeHash", state.GetCodeHash(addr), checkstate.GetCodeHash(addr))
|
|
|
|
checkeq("GetCodeSize", state.GetCodeSize(addr, false), checkstate.GetCodeSize(addr, false))
|
|
|
|
// Check storage.
|
|
|
|
if obj := state.getStateObject(addr); obj != nil {
|
|
|
|
state.ForEachStorage(addr, func(key, value common.Hash) bool {
|
|
|
|
return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value)
|
|
|
|
})
|
|
|
|
checkstate.ForEachStorage(addr, func(key, value common.Hash) bool {
|
|
|
|
return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if state.GetRefund() != checkstate.GetRefund() {
|
|
|
|
return fmt.Errorf("got GetRefund() == %d, want GetRefund() == %d",
|
|
|
|
state.GetRefund(), checkstate.GetRefund())
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) {
|
|
|
|
return fmt.Errorf("got GetLogs(common.Hash{}) == %v, want GetLogs(common.Hash{}) == %v",
|
|
|
|
state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{}))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTouchDelete(t *testing.T) {
|
|
|
|
s := newStateTest()
|
|
|
|
s.state.GetOrNewStateObject(common.Address{})
|
|
|
|
root, _ := s.state.Commit(false)
|
|
|
|
s.state, _ = New(root, s.state.db, s.state.snaps)
|
|
|
|
|
|
|
|
snapshot := s.state.Snapshot()
|
|
|
|
s.state.AddBalance(common.Address{}, new(big.Int))
|
|
|
|
|
|
|
|
if len(s.state.journal.dirties) != 1 {
|
|
|
|
t.Fatal("expected one dirty state object")
|
|
|
|
}
|
|
|
|
s.state.RevertToSnapshot(snapshot)
|
|
|
|
if len(s.state.journal.dirties) != 0 {
|
|
|
|
t.Fatal("expected no dirty state object")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
|
|
|
|
// See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
|
|
|
|
func TestCopyOfCopy(t *testing.T) {
|
|
|
|
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
|
|
|
addr := common.HexToAddress("aaaa")
|
|
|
|
state.SetBalance(addr, big.NewInt(42))
|
|
|
|
|
|
|
|
if got := state.Copy().GetBalance(addr).Uint64(); got != 42 {
|
|
|
|
t.Fatalf("1st copy fail, expected 42, got %v", got)
|
|
|
|
}
|
|
|
|
if got := state.Copy().Copy().GetBalance(addr).Uint64(); got != 42 {
|
|
|
|
t.Fatalf("2nd copy fail, expected 42, got %v", got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests a regression where committing a copy lost some internal meta information,
|
|
|
|
// leading to corrupted subsequent copies.
|
|
|
|
//
|
|
|
|
// See https://github.com/ethereum/go-ethereum/issues/20106.
|
|
|
|
func TestCopyCommitCopy(t *testing.T) {
|
|
|
|
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
|
|
|
|
|
|
|
// Create an account and check if the retrieved balance is correct
|
|
|
|
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
|
|
|
|
skey := common.HexToHash("aaa")
|
|
|
|
sval := common.HexToHash("bbb")
|
|
|
|
|
|
|
|
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
|
|
|
state.SetCode(addr, []byte("hello"), false) // Change an external metadata
|
|
|
|
state.SetState(addr, skey, sval) // Change the storage trie
|
|
|
|
|
|
|
|
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := state.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := state.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
|
|
|
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
|
|
|
}
|
|
|
|
// Copy the non-committed state database and check pre/post commit balance
|
|
|
|
copyOne := state.Copy()
|
|
|
|
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("first copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := copyOne.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("first copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := copyOne.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("first copy pre-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
|
|
|
t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
|
|
|
}
|
|
|
|
|
|
|
|
copyOne.Commit(false)
|
|
|
|
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := copyOne.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := copyOne.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := copyOne.GetCommittedState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
// Copy the copy and check the balance once more
|
|
|
|
copyTwo := copyOne.Copy()
|
|
|
|
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("second copy balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := copyTwo.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("second copy code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := copyTwo.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := copyTwo.GetCommittedState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests a regression where committing a copy lost some internal meta information,
|
|
|
|
// leading to corrupted subsequent copies.
|
|
|
|
//
|
|
|
|
// See https://github.com/ethereum/go-ethereum/issues/20106.
|
|
|
|
func TestCopyCopyCommitCopy(t *testing.T) {
|
|
|
|
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
|
|
|
|
|
|
|
// Create an account and check if the retrieved balance is correct
|
|
|
|
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
|
|
|
|
skey := common.HexToHash("aaa")
|
|
|
|
sval := common.HexToHash("bbb")
|
|
|
|
|
|
|
|
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
|
|
|
state.SetCode(addr, []byte("hello"), false) // Change an external metadata
|
|
|
|
state.SetState(addr, skey, sval) // Change the storage trie
|
|
|
|
|
|
|
|
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := state.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := state.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
|
|
|
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
|
|
|
}
|
|
|
|
// Copy the non-committed state database and check pre/post commit balance
|
|
|
|
copyOne := state.Copy()
|
|
|
|
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("first copy balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := copyOne.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("first copy code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := copyOne.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("first copy non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
|
|
|
t.Fatalf("first copy committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
|
|
|
}
|
|
|
|
// Copy the copy and check the balance once more
|
|
|
|
copyTwo := copyOne.Copy()
|
|
|
|
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("second copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := copyTwo.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("second copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := copyTwo.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("second copy pre-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
|
|
|
t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
|
|
|
}
|
|
|
|
copyTwo.Commit(false)
|
|
|
|
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := copyTwo.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := copyTwo.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := copyTwo.GetCommittedState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
// Copy the copy-copy and check the balance once more
|
|
|
|
copyThree := copyTwo.Copy()
|
|
|
|
if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
|
|
|
t.Fatalf("third copy balance mismatch: have %v, want %v", balance, 42)
|
|
|
|
}
|
|
|
|
if code := copyThree.GetCode(addr, false); !bytes.Equal(code, []byte("hello")) {
|
|
|
|
t.Fatalf("third copy code mismatch: have %x, want %x", code, []byte("hello"))
|
|
|
|
}
|
|
|
|
if val := copyThree.GetState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
if val := copyThree.GetCommittedState(addr, skey); val != sval {
|
|
|
|
t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
|
|
|
|
// while changing the internals of DB. The workflow is that a contract is
|
|
|
|
// self-destructed, then in a follow-up transaction (but same block) it's created
|
|
|
|
// again and the transaction reverted.
|
|
|
|
//
|
|
|
|
// The original DB implementation flushed dirty objects to the tries after
|
|
|
|
// each transaction, so this works ok. The rework accumulated writes in memory
|
|
|
|
// first, but the journal wiped the entire state object on create-revert.
|
|
|
|
func TestDeleteCreateRevert(t *testing.T) {
|
|
|
|
// Create an initial state with a single contract
|
|
|
|
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
|
|
|
|
|
|
|
addr := common.BytesToAddress([]byte("so"))
|
|
|
|
state.SetBalance(addr, big.NewInt(1))
|
|
|
|
|
|
|
|
root, _ := state.Commit(false)
|
|
|
|
state, _ = New(root, state.db, state.snaps)
|
|
|
|
|
|
|
|
// Simulate self-destructing in one transaction, then create-reverting in another
|
|
|
|
state.Suicide(addr)
|
|
|
|
state.Finalise(true)
|
|
|
|
|
|
|
|
id := state.Snapshot()
|
|
|
|
state.SetBalance(addr, big.NewInt(2))
|
|
|
|
state.RevertToSnapshot(id)
|
|
|
|
|
|
|
|
// Commit the entire state and make sure we don't crash and have the correct state
|
|
|
|
root, _ = state.Commit(true)
|
|
|
|
state, _ = New(root, state.db, state.snaps)
|
|
|
|
|
|
|
|
if state.getStateObject(addr) != nil {
|
|
|
|
t.Fatalf("self-destructed contract came alive")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestMissingTrieNodes tests that if the DB fails to load parts of the trie,
|
|
|
|
// the Commit operation fails with an error
|
|
|
|
// If we are missing trie nodes, we should not continue writing to the trie
|
|
|
|
func TestMissingTrieNodes(t *testing.T) {
|
|
|
|
// Create an initial state with a few accounts
|
|
|
|
memDb := rawdb.NewMemoryDatabase()
|
|
|
|
db := NewDatabase(memDb)
|
|
|
|
var root common.Hash
|
|
|
|
state, _ := New(common.Hash{}, db, nil)
|
|
|
|
addr := common.BytesToAddress([]byte("so"))
|
|
|
|
{
|
|
|
|
state.SetBalance(addr, big.NewInt(1))
|
|
|
|
state.SetCode(addr, []byte{1, 2, 3}, false)
|
|
|
|
a2 := common.BytesToAddress([]byte("another"))
|
|
|
|
state.SetBalance(a2, big.NewInt(100))
|
|
|
|
state.SetCode(a2, []byte{1, 2, 4}, false)
|
|
|
|
root, _ = state.Commit(false)
|
|
|
|
t.Logf("root: %x", root)
|
|
|
|
// force-flush
|
|
|
|
state.Database().TrieDB().Cap(0)
|
|
|
|
}
|
|
|
|
// Create a new state on the old root
|
|
|
|
state, _ = New(root, db, nil)
|
|
|
|
// Now we clear out the memdb
|
|
|
|
it := memDb.NewIterator(nil, nil)
|
|
|
|
for it.Next() {
|
|
|
|
k := it.Key()
|
|
|
|
// Leave the root intact
|
|
|
|
if !bytes.Equal(k, root[:]) {
|
|
|
|
t.Logf("key: %x", k)
|
|
|
|
memDb.Delete(k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
balance := state.GetBalance(addr)
|
|
|
|
// The removed elem should lead to it returning zero balance
|
|
|
|
if exp, got := uint64(0), balance.Uint64(); got != exp {
|
|
|
|
t.Errorf("expected %d, got %d", exp, got)
|
|
|
|
}
|
|
|
|
// Modify the state
|
|
|
|
state.SetBalance(addr, big.NewInt(2))
|
|
|
|
root, err := state.Commit(false)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error, got root :%x", root)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStateDBAccessList(t *testing.T) {
|
|
|
|
// Some helpers
|
|
|
|
addr := func(a string) common.Address {
|
|
|
|
return common.HexToAddress(a)
|
|
|
|
}
|
|
|
|
slot := func(a string) common.Hash {
|
|
|
|
return common.HexToHash(a)
|
|
|
|
}
|
|
|
|
|
|
|
|
memDb := rawdb.NewMemoryDatabase()
|
|
|
|
db := NewDatabase(memDb)
|
|
|
|
state, _ := New(common.Hash{}, db, nil)
|
|
|
|
state.accessList = newAccessList()
|
|
|
|
|
|
|
|
verifyAddrs := func(astrings ...string) {
|
|
|
|
t.Helper()
|
|
|
|
// convert to common.Address form
|
|
|
|
var addresses []common.Address
|
|
|
|
var addressMap = make(map[common.Address]struct{})
|
|
|
|
for _, astring := range astrings {
|
|
|
|
address := addr(astring)
|
|
|
|
addresses = append(addresses, address)
|
|
|
|
addressMap[address] = struct{}{}
|
|
|
|
}
|
|
|
|
// Check that the given addresses are in the access list
|
|
|
|
for _, address := range addresses {
|
|
|
|
if !state.AddressInAccessList(address) {
|
|
|
|
t.Fatalf("expected %x to be in access list", address)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check that only the expected addresses are present in the access list
|
|
|
|
for address := range state.accessList.addresses {
|
|
|
|
if _, exist := addressMap[address]; !exist {
|
|
|
|
t.Fatalf("extra address %x in access list", address)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
verifySlots := func(addrString string, slotStrings ...string) {
|
|
|
|
if !state.AddressInAccessList(addr(addrString)) {
|
|
|
|
t.Fatalf("scope missing address/slots %v", addrString)
|
|
|
|
}
|
|
|
|
var address = addr(addrString)
|
|
|
|
// convert to common.Hash form
|
|
|
|
var slots []common.Hash
|
|
|
|
var slotMap = make(map[common.Hash]struct{})
|
|
|
|
for _, slotString := range slotStrings {
|
|
|
|
s := slot(slotString)
|
|
|
|
slots = append(slots, s)
|
|
|
|
slotMap[s] = struct{}{}
|
|
|
|
}
|
|
|
|
// Check that the expected items are in the access list
|
|
|
|
for i, s := range slots {
|
|
|
|
if _, slotPresent := state.SlotInAccessList(address, s); !slotPresent {
|
|
|
|
t.Fatalf("input %d: scope missing slot %v (address %v)", i, s, addrString)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check that no extra elements are in the access list
|
|
|
|
index := state.accessList.addresses[address]
|
|
|
|
if index >= 0 {
|
|
|
|
stateSlots := state.accessList.slots[index]
|
|
|
|
for s := range stateSlots {
|
|
|
|
if _, slotPresent := slotMap[s]; !slotPresent {
|
|
|
|
t.Fatalf("scope has extra slot %v (address %v)", s, addrString)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
state.AddAddressToAccessList(addr("aa")) // 1
|
|
|
|
state.AddSlotToAccessList(addr("bb"), slot("01")) // 2,3
|
|
|
|
state.AddSlotToAccessList(addr("bb"), slot("02")) // 4
|
|
|
|
verifyAddrs("aa", "bb")
|
|
|
|
verifySlots("bb", "01", "02")
|
|
|
|
|
|
|
|
// Make a copy
|
|
|
|
stateCopy1 := state.Copy()
|
|
|
|
if exp, got := 4, state.journal.length(); exp != got {
|
|
|
|
t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// same again, should cause no journal entries
|
|
|
|
state.AddSlotToAccessList(addr("bb"), slot("01"))
|
|
|
|
state.AddSlotToAccessList(addr("bb"), slot("02"))
|
|
|
|
state.AddAddressToAccessList(addr("aa"))
|
|
|
|
if exp, got := 4, state.journal.length(); exp != got {
|
|
|
|
t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
|
|
|
|
}
|
|
|
|
// some new ones
|
|
|
|
state.AddSlotToAccessList(addr("bb"), slot("03")) // 5
|
|
|
|
state.AddSlotToAccessList(addr("aa"), slot("01")) // 6
|
|
|
|
state.AddSlotToAccessList(addr("cc"), slot("01")) // 7,8
|
|
|
|
state.AddAddressToAccessList(addr("cc"))
|
|
|
|
if exp, got := 8, state.journal.length(); exp != got {
|
|
|
|
t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
|
|
|
|
}
|
|
|
|
|
|
|
|
verifyAddrs("aa", "bb", "cc")
|
|
|
|
verifySlots("aa", "01")
|
|
|
|
verifySlots("bb", "01", "02", "03")
|
|
|
|
verifySlots("cc", "01")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
// now start rolling back changes
|
|
|
|
state.journal.revert(state, 7)
|
|
|
|
if _, ok := state.SlotInAccessList(addr("cc"), slot("01")); ok {
|
|
|
|
t.Fatalf("slot present, expected missing")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
verifyAddrs("aa", "bb", "cc")
|
|
|
|
verifySlots("aa", "01")
|
|
|
|
verifySlots("bb", "01", "02", "03")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
state.journal.revert(state, 6)
|
|
|
|
if state.AddressInAccessList(addr("cc")) {
|
|
|
|
t.Fatalf("addr present, expected missing")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
verifyAddrs("aa", "bb")
|
|
|
|
verifySlots("aa", "01")
|
|
|
|
verifySlots("bb", "01", "02", "03")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
state.journal.revert(state, 5)
|
|
|
|
if _, ok := state.SlotInAccessList(addr("aa"), slot("01")); ok {
|
|
|
|
t.Fatalf("slot present, expected missing")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
verifyAddrs("aa", "bb")
|
|
|
|
verifySlots("bb", "01", "02", "03")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
state.journal.revert(state, 4)
|
|
|
|
if _, ok := state.SlotInAccessList(addr("bb"), slot("03")); ok {
|
|
|
|
t.Fatalf("slot present, expected missing")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
verifyAddrs("aa", "bb")
|
|
|
|
verifySlots("bb", "01", "02")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
state.journal.revert(state, 3)
|
|
|
|
if _, ok := state.SlotInAccessList(addr("bb"), slot("02")); ok {
|
|
|
|
t.Fatalf("slot present, expected missing")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
verifyAddrs("aa", "bb")
|
|
|
|
verifySlots("bb", "01")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
state.journal.revert(state, 2)
|
|
|
|
if _, ok := state.SlotInAccessList(addr("bb"), slot("01")); ok {
|
|
|
|
t.Fatalf("slot present, expected missing")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
verifyAddrs("aa", "bb")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
state.journal.revert(state, 1)
|
|
|
|
if state.AddressInAccessList(addr("bb")) {
|
|
|
|
t.Fatalf("addr present, expected missing")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
verifyAddrs("aa")
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
state.journal.revert(state, 0)
|
|
|
|
if state.AddressInAccessList(addr("aa")) {
|
|
|
|
t.Fatalf("addr present, expected missing")
|
|
|
|
}
|
|
|
|
if got, exp := len(state.accessList.addresses), 0; got != exp {
|
|
|
|
t.Fatalf("expected empty, got %d", got)
|
|
|
|
}
|
|
|
|
if got, exp := len(state.accessList.slots), 0; got != exp {
|
|
|
|
t.Fatalf("expected empty, got %d", got)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
// Check the copy
|
|
|
|
// Make a copy
|
|
|
|
state = stateCopy1
|
|
|
|
verifyAddrs("aa", "bb")
|
|
|
|
verifySlots("bb", "01", "02")
|
|
|
|
if got, exp := len(state.accessList.addresses), 2; got != exp {
|
|
|
|
t.Fatalf("expected empty, got %d", got)
|
|
|
|
}
|
|
|
|
if got, exp := len(state.accessList.slots), 1; got != exp {
|
|
|
|
t.Fatalf("expected empty, got %d", got)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that account and storage tries are flushed in the correct order and that
|
|
|
|
// no data loss occurs.
|
|
|
|
func TestFlushOrderDataLoss(t *testing.T) {
|
|
|
|
// Create a state trie with many accounts and slots
|
|
|
|
var (
|
|
|
|
memdb = rawdb.NewMemoryDatabase()
|
|
|
|
statedb = NewDatabase(memdb)
|
|
|
|
state, _ = New(common.Hash{}, statedb, nil)
|
|
|
|
)
|
|
|
|
for a := byte(0); a < 10; a++ {
|
|
|
|
state.CreateAccount(common.Address{a})
|
|
|
|
for s := byte(0); s < 10; s++ {
|
|
|
|
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
|
|
|
|
}
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
root, err := state.Commit(false)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to commit state trie: %v", err)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
statedb.TrieDB().Reference(root, common.Hash{})
|
|
|
|
if err := statedb.TrieDB().Cap(1024); err != nil {
|
|
|
|
t.Fatalf("failed to cap trie dirty cache: %v", err)
|
|
|
|
}
|
|
|
|
if err := statedb.TrieDB().Commit(root, false); err != nil {
|
|
|
|
t.Fatalf("failed to commit state trie: %v", err)
|
|
|
|
}
|
|
|
|
// Reopen the state trie from flushed disk and verify it
|
|
|
|
state, err = New(root, NewDatabase(memdb), nil)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to reopen state trie: %v", err)
|
|
|
|
}
|
|
|
|
for a := byte(0); a < 10; a++ {
|
|
|
|
for s := byte(0); s < 10; s++ {
|
|
|
|
if have := state.GetState(common.Address{a}, common.Hash{a, s}); have != (common.Hash{a, s}) {
|
|
|
|
t.Errorf("account %d: slot %d: state mismatch: have %x, want %x", a, s, have, common.Hash{a, s})
|
|
|
|
}
|
|
|
|
}
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStateDBTransientStorage(t *testing.T) {
|
|
|
|
memDb := rawdb.NewMemoryDatabase()
|
|
|
|
db := NewDatabase(memDb)
|
|
|
|
state, _ := New(common.Hash{}, db, nil)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
|
|
|
|
key := common.Hash{0x01}
|
|
|
|
value := common.Hash{0x02}
|
|
|
|
addr := common.Address{}
|
|
|
|
|
|
|
|
state.SetTransientState(addr, key, value)
|
|
|
|
if exp, got := 1, state.journal.length(); exp != got {
|
|
|
|
t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
// the retrieved value should equal what was set
|
|
|
|
if got := state.GetTransientState(addr, key); got != value {
|
|
|
|
t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// revert the transient state being set and then check that the
|
|
|
|
// value is now the empty hash
|
|
|
|
state.journal.revert(state, 0)
|
|
|
|
if got, exp := state.GetTransientState(addr, key), (common.Hash{}); exp != got {
|
|
|
|
t.Fatalf("transient storage mismatch: have %x, want %x", got, exp)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// set transient state and then copy the statedb and ensure that
|
|
|
|
// the transient state is copied
|
|
|
|
state.SetTransientState(addr, key, value)
|
|
|
|
cpy := state.Copy()
|
|
|
|
if got := cpy.GetTransientState(addr, key); got != value {
|
|
|
|
t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
|
Resolve harmony-one/bounties#90: Add revert mechanism for UpdateValidatorWrapper (#3939)
* Add revert mechanism for UpdateValidatorWrapper
Closes harmony-one/bounties#90
(1) Use LRU for ValidatorWrapper objects in stateDB to plug a potential
memory leak
(2) Merge ValidatorWrapper and ValidatorWrapperCopy to let callers ask
for either a copy, or a pointer to the cached object. Additionally,
give callers the option to not deep copy delegations (which is a
heavy process). Copies need to be explicitly committed (and thus
can be reverted), while the pointers are committed when Finalise
is called.
(3) Add a UpdateValidatorWrapperWithRevert function, which is used by
staking txs `Delegate`, `Undelegate`, and `CollectRewards`. Other
2 types of staking txs and `db.Finalize` continue to use
UpdateValidateWrapper without revert, again, to save memoery
(4) Add unit tests which check
a) Revert goes through
b) Wrapper is as expected after revert
c) State is as expected after revert
* Change back to dictionary for stateValidators
Since the memory / CPU usage saved is not significantly different when
using an LRU + map structure, go back to the original dictionary
structure to keep code easy to read and have limited modifications.
* Add tests for validator wrapper reverts
As requested by @rlan35, add tests beyond just adding and reverting a
delegation. The tests are successive in the sense that we do multiple
modifications to the wrapper, save a snapshot before each modification
and revert to each of them to confirm everything works well. This change
improves test coverage of statedb.go to 66.7% from 64.8% and that of
core/state to 71.9% from 70.8%, and covers all the code that has been
modified by this PR in statedb.go.
For clarity, the modifications to the wrapper include (1) creation of
wrapper in state, (2) adding a delegation to the wrapper, (3)
increasing the blocks signed, and (4) a change in the validator Name and
the BlockReward. Two additional tests have been added to cover the
`panic` and the `GetCode` cases.
3 years ago
|
|
|
}
|
|
|
|
}
|