Move header fields into private struct; expose them using getters/setters

Also introduce NewHeader and NewHeaderWith functions.  The latter is
friendlier for create-and-initialize expressions, and can be used
in place of the old &Header{field: value, ...} syntax.
pull/1508/head
Eugene Kim 5 years ago
parent d26525af63
commit 8cc2543c9e
  1. 16
      api/proto/node/node_test.go
  2. 2
      api/service/explorer/service.go
  3. 4
      api/service/explorer/storage_test.go
  4. 2
      api/service/explorer/structs_test.go
  5. 4
      api/service/resharding/service.go
  6. 2
      api/service/syncing/syncing.go
  7. 48
      block/gen_header_json.go
  8. 523
      block/header.go
  9. 30
      consensus/consensus_service.go
  10. 82
      consensus/consensus_v2.go
  11. 2
      contracts/contract_caller.go
  12. 46
      core/block_validator.go
  13. 56
      core/blockchain.go
  14. 12
      core/chain_indexer.go
  15. 40
      core/chain_makers.go
  16. 14
      core/core_test.go
  17. 16
      core/evm.go
  18. 32
      core/genesis.go
  19. 39
      core/headerchain.go
  20. 23
      core/rawdb/accessors_chain.go
  21. 46
      core/rawdb/accessors_chain_test.go
  22. 2
      core/rawdb/accessors_indexes_test.go
  23. 16
      core/state_processor.go
  24. 22
      core/tx_pool.go
  25. 6
      core/tx_pool_test.go
  26. 115
      core/types/block.go
  27. 11
      core/types/block_test.go
  28. 8
      core/types/crosslink.go
  29. 2
      drand/drand_test.go
  30. 2
      hmy/api_backend.go
  31. 2
      hmy/bloombits.go
  32. 4
      hmyclient/hmyclient.go
  33. 64
      internal/chain/engine.go
  34. 20
      internal/chain/reward.go
  35. 2
      internal/hmyapi/blockchain.go
  36. 4
      internal/hmyapi/filters/filter.go
  37. 10
      internal/hmyapi/filters/filter_system.go
  38. 24
      internal/hmyapi/types.go
  39. 2
      node/node.go
  40. 56
      node/node_cross_shard.go
  41. 24
      node/node_handler.go
  42. 4
      node/node_newblock.go
  43. 71
      node/worker/worker.go
  44. 4
      shard/shard_state.go

@ -60,14 +60,14 @@ func TestConstructBlocksSyncMessage(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db)) statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
root := statedb.IntermediateRoot(false) root := statedb.IntermediateRoot(false)
head := &block.Header{ head := block.NewHeaderWith().
Number: new(big.Int).SetUint64(uint64(10000)), Number(new(big.Int).SetUint64(uint64(10000))).
Epoch: big.NewInt(0), Epoch(big.NewInt(0)).
ShardID: 0, ShardID(0).
Time: new(big.Int).SetUint64(uint64(100000)), Time(new(big.Int).SetUint64(uint64(100000))).
Root: root, Root(root).
} GasLimit(10000000000).
head.GasLimit = 10000000000 Header()
if _, err := statedb.Commit(false); err != nil { if _, err := statedb.Commit(false); err != nil {
t.Fatalf("statedb.Commit() failed: %s", err) t.Fatalf("statedb.Commit() failed: %s", err)

@ -235,7 +235,7 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
} }
mask, err := bls2.NewMask(pubkeys, nil) mask, err := bls2.NewMask(pubkeys, nil)
if err == nil && accountBlocks[id+1] != nil { if err == nil && accountBlocks[id+1] != nil {
err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap) err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap())
if err == nil { if err == nil {
for _, validator := range committee.NodeList { for _, validator := range committee.NodeList {
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress) oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)

@ -58,7 +58,7 @@ func TestDump(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&block2.Header{Number: big.NewInt(314)}, txs, nil, nil, nil) block := types.NewBlock(block2.NewHeaderWith().Number(big.NewInt(314)).Header(), txs, nil, nil, nil)
ins := GetStorageInstance("1.1.1.1", "3333", true) ins := GetStorageInstance("1.1.1.1", "3333", true)
ins.Dump(block, uint64(1)) ins.Dump(block, uint64(1))
db := ins.GetDB() db := ins.GetDB()
@ -116,7 +116,7 @@ func TestUpdateAddressStorage(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&block2.Header{Number: big.NewInt(314)}, txs, nil, nil, nil) block := types.NewBlock(block2.NewHeaderWith().Number(big.NewInt(314)).Header(), txs, nil, nil, nil)
ins := GetStorageInstance("1.1.1.1", "3333", true) ins := GetStorageInstance("1.1.1.1", "3333", true)
ins.Dump(block, uint64(1)) ins.Dump(block, uint64(1))
db := ins.GetDB() db := ins.GetDB()

@ -20,7 +20,7 @@ func TestGetTransaction(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&block2.Header{Number: big.NewInt(314)}, txs, nil, nil, nil) block := types.NewBlock(block2.NewHeaderWith().Number(big.NewInt(314)).Header(), txs, nil, nil, nil)
tx := GetTransaction(tx1, block) tx := GetTransaction(tx1, block)
assert.Equal(t, tx.ID, tx1.Hash().Hex(), "should be equal tx1.Hash()") assert.Equal(t, tx.ID, tx1.Hash().Hex(), "should be equal tx1.Hash()")

@ -61,11 +61,11 @@ func (s *Service) Run(stopChan chan struct{}, stoppedChan chan struct{}) {
func (s *Service) DoService() { func (s *Service) DoService() {
tick := time.NewTicker(ReshardingCheckTime) tick := time.NewTicker(ReshardingCheckTime)
// Get current shard state hash. // Get current shard state hash.
currentShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash currentShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash()
for { for {
select { select {
case <-tick.C: case <-tick.C:
LatestShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash LatestShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash()
if currentShardStateHash != LatestShardStateHash { if currentShardStateHash != LatestShardStateHash {
// TODO(minhdoan): Add resharding logic later after modifying the resharding func as it current doesn't calculate the role (leader/validator) // TODO(minhdoan): Add resharding logic later after modifying the resharding func as it current doesn't calculate the role (leader/validator)
} }

@ -555,7 +555,7 @@ func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChai
return false return false
} }
ss.syncMux.Lock() ss.syncMux.Lock()
if err := worker.UpdateCurrent(block.Header().Coinbase); err != nil { if err := worker.UpdateCurrent(block.Header().Coinbase()); err != nil {
utils.Logger().Warn().Err(err).Msg("[SYNC] (*Worker).UpdateCurrent failed") utils.Logger().Warn().Err(err).Msg("[SYNC] (*Worker).UpdateCurrent failed")
} }
ss.syncMux.Unlock() ss.syncMux.Unlock()

@ -32,18 +32,18 @@ func (h Header) MarshalJSON() ([]byte, error) {
Hash common.Hash `json:"hash"` Hash common.Hash `json:"hash"`
} }
var enc Header var enc Header
enc.ParentHash = h.ParentHash enc.ParentHash = h.ParentHash()
enc.Coinbase = h.Coinbase enc.Coinbase = h.Coinbase()
enc.Root = h.Root enc.Root = h.Root()
enc.TxHash = h.TxHash enc.TxHash = h.TxHash()
enc.ReceiptHash = h.ReceiptHash enc.ReceiptHash = h.ReceiptHash()
enc.Bloom = h.Bloom enc.Bloom = h.Bloom()
enc.Number = (*hexutil.Big)(h.Number) enc.Number = (*hexutil.Big)(h.Number())
enc.GasLimit = hexutil.Uint64(h.GasLimit) enc.GasLimit = hexutil.Uint64(h.GasLimit())
enc.GasUsed = hexutil.Uint64(h.GasUsed) enc.GasUsed = hexutil.Uint64(h.GasUsed())
enc.Time = (*hexutil.Big)(h.Time) enc.Time = (*hexutil.Big)(h.Time())
enc.Extra = h.Extra enc.Extra = h.Extra()
enc.MixDigest = h.MixDigest enc.MixDigest = h.MixDigest()
enc.Hash = h.Hash() enc.Hash = h.Hash()
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -71,47 +71,47 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.ParentHash == nil { if dec.ParentHash == nil {
return errors.New("missing required field 'parentHash' for Header") return errors.New("missing required field 'parentHash' for Header")
} }
h.ParentHash = *dec.ParentHash h.SetParentHash(*dec.ParentHash)
if dec.Coinbase == nil { if dec.Coinbase == nil {
return errors.New("missing required field 'miner' for Header") return errors.New("missing required field 'miner' for Header")
} }
h.Coinbase = *dec.Coinbase h.SetCoinbase(*dec.Coinbase)
if dec.Root == nil { if dec.Root == nil {
return errors.New("missing required field 'stateRoot' for Header") return errors.New("missing required field 'stateRoot' for Header")
} }
h.Root = *dec.Root h.SetRoot(*dec.Root)
if dec.TxHash == nil { if dec.TxHash == nil {
return errors.New("missing required field 'transactionsRoot' for Header") return errors.New("missing required field 'transactionsRoot' for Header")
} }
h.TxHash = *dec.TxHash h.SetTxHash(*dec.TxHash)
if dec.ReceiptHash == nil { if dec.ReceiptHash == nil {
return errors.New("missing required field 'receiptsRoot' for Header") return errors.New("missing required field 'receiptsRoot' for Header")
} }
h.ReceiptHash = *dec.ReceiptHash h.SetReceiptHash(*dec.ReceiptHash)
if dec.Bloom == nil { if dec.Bloom == nil {
return errors.New("missing required field 'logsBloom' for Header") return errors.New("missing required field 'logsBloom' for Header")
} }
h.Bloom = *dec.Bloom h.SetBloom(*dec.Bloom)
h.Number = (*big.Int)(dec.Number) h.SetNumber((*big.Int)(dec.Number))
if dec.GasLimit == nil { if dec.GasLimit == nil {
return errors.New("missing required field 'gasLimit' for Header") return errors.New("missing required field 'gasLimit' for Header")
} }
h.GasLimit = uint64(*dec.GasLimit) h.SetGasLimit(uint64(*dec.GasLimit))
if dec.GasUsed == nil { if dec.GasUsed == nil {
return errors.New("missing required field 'gasUsed' for Header") return errors.New("missing required field 'gasUsed' for Header")
} }
h.GasUsed = uint64(*dec.GasUsed) h.SetGasUsed(uint64(*dec.GasUsed))
if dec.Time == nil { if dec.Time == nil {
return errors.New("missing required field 'timestamp' for Header") return errors.New("missing required field 'timestamp' for Header")
} }
h.Time = (*big.Int)(dec.Time) h.SetTime((*big.Int)(dec.Time))
if dec.Extra == nil { if dec.Extra == nil {
return errors.New("missing required field 'extraData' for Header") return errors.New("missing required field 'extraData' for Header")
} }
h.Extra = *dec.Extra h.SetExtra(*dec.Extra)
if dec.MixDigest == nil { if dec.MixDigest == nil {
return errors.New("missing required field 'mixHash' for Header") return errors.New("missing required field 'mixHash' for Header")
} }
h.MixDigest = *dec.MixDigest h.SetMixDigest(*dec.MixDigest)
return nil return nil
} }

@ -1,6 +1,7 @@
package block package block
import ( import (
"io"
"math/big" "math/big"
"unsafe" "unsafe"
@ -16,6 +17,30 @@ import (
// Header represents a block header in the Harmony blockchain. // Header represents a block header in the Harmony blockchain.
type Header struct { type Header struct {
fields headerFields
}
// EncodeRLP encodes the header fields into RLP format.
func (h *Header) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, &h.fields)
}
// DecodeRLP decodes the given RLP decode stream into the header fields.
func (h *Header) DecodeRLP(s *rlp.Stream) error {
return s.Decode(&h.fields)
}
// NewHeader creates a new header object.
func NewHeader() *Header {
return &Header{headerFields{
Number: new(big.Int),
Time: new(big.Int),
ViewID: new(big.Int),
Epoch: new(big.Int),
}}
}
type headerFields struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
Coinbase common.Address `json:"miner" gencodec:"required"` Coinbase common.Address `json:"miner" gencodec:"required"`
Root common.Hash `json:"stateRoot" gencodec:"required"` Root common.Hash `json:"stateRoot" gencodec:"required"`
@ -43,6 +68,301 @@ type Header struct {
CrossLinks []byte `json:"crossLink"` CrossLinks []byte `json:"crossLink"`
} }
// ParentHash is the header hash of the parent block. For the genesis block
// which has no parent by definition, this field is zeroed out.
func (h *Header) ParentHash() common.Hash {
return h.fields.ParentHash
}
// SetParentHash sets the parent hash field.
func (h *Header) SetParentHash(newParentHash common.Hash) {
h.fields.ParentHash = newParentHash
}
// Coinbase is the address of the node that proposed this block and all
// transactions in it.
func (h *Header) Coinbase() common.Address {
return h.fields.Coinbase
}
// SetCoinbase sets the coinbase address field.
func (h *Header) SetCoinbase(newCoinbase common.Address) {
h.fields.Coinbase = newCoinbase
}
// Root is the state (account) trie root hash.
func (h *Header) Root() common.Hash {
return h.fields.Root
}
// SetRoot sets the state trie root hash field.
func (h *Header) SetRoot(newRoot common.Hash) {
h.fields.Root = newRoot
}
// TxHash is the transaction trie root hash.
func (h *Header) TxHash() common.Hash {
return h.fields.TxHash
}
// SetTxHash sets the transaction trie root hash field.
func (h *Header) SetTxHash(newTxHash common.Hash) {
h.fields.TxHash = newTxHash
}
// ReceiptHash is the same-shard transaction receipt trie hash.
func (h *Header) ReceiptHash() common.Hash {
return h.fields.ReceiptHash
}
// SetReceiptHash sets the same-shard transaction receipt trie hash.
func (h *Header) SetReceiptHash(newReceiptHash common.Hash) {
h.fields.ReceiptHash = newReceiptHash
}
// OutgoingReceiptHash is the egress transaction receipt trie hash.
func (h *Header) OutgoingReceiptHash() common.Hash {
return h.fields.OutgoingReceiptHash
}
// SetOutgoingReceiptHash sets the egress transaction receipt trie hash.
func (h *Header) SetOutgoingReceiptHash(newOutgoingReceiptHash common.Hash) {
h.fields.OutgoingReceiptHash = newOutgoingReceiptHash
}
// IncomingReceiptHash is the ingress transaction receipt trie hash.
func (h *Header) IncomingReceiptHash() common.Hash {
return h.fields.IncomingReceiptHash
}
// SetIncomingReceiptHash sets the ingress transaction receipt trie hash.
func (h *Header) SetIncomingReceiptHash(newIncomingReceiptHash common.Hash) {
h.fields.IncomingReceiptHash = newIncomingReceiptHash
}
// Bloom is the Bloom filter that indexes accounts and topics logged by smart
// contract transactions (executions) in this block.
func (h *Header) Bloom() types.Bloom {
return h.fields.Bloom
}
// SetBloom sets the smart contract log Bloom filter for this block.
func (h *Header) SetBloom(newBloom types.Bloom) {
h.fields.Bloom = newBloom
}
// Number is the block number.
//
// The returned instance is a copy; the caller may do anything with it.
func (h *Header) Number() *big.Int {
return new(big.Int).Set(h.fields.Number)
}
// SetNumber sets the block number.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetNumber(newNumber *big.Int) {
h.fields.Number.Set(newNumber)
}
// GasLimit is the gas limit for transactions in this block.
func (h *Header) GasLimit() uint64 {
return h.fields.GasLimit
}
// SetGasLimit sets the gas limit for transactions in this block.
func (h *Header) SetGasLimit(newGasLimit uint64) {
h.fields.GasLimit = newGasLimit
}
// GasUsed is the amount of gas used by transactions in this block.
func (h *Header) GasUsed() uint64 {
return h.fields.GasUsed
}
// SetGasUsed sets the amount of gas used by transactions in this block.
func (h *Header) SetGasUsed(newGasUsed uint64) {
h.fields.GasUsed = newGasUsed
}
// Time is the UNIX timestamp of this block.
//
// The returned instance is a copy; the caller may do anything with it.
func (h *Header) Time() *big.Int {
return new(big.Int).Set(h.fields.Time)
}
// SetTime sets the UNIX timestamp of this block.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetTime(newTime *big.Int) {
h.fields.Time.Set(newTime)
}
// Extra is the extra data field of this block.
//
// The returned slice is a copy; the caller may do anything with it.
func (h *Header) Extra() []byte {
return append(h.fields.Extra[:0:0], h.fields.Extra...)
}
// SetExtra sets the extra data field of this block.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetExtra(newExtra []byte) {
h.fields.Extra = append(newExtra[:0:0], newExtra...)
}
// MixDigest is the mixhash.
//
// This field is a remnant from Ethereum, and Harmony does not use it and always
// zeroes it out.
func (h *Header) MixDigest() common.Hash {
return h.fields.MixDigest
}
// SetMixDigest sets the mixhash of this block.
func (h *Header) SetMixDigest(newMixDigest common.Hash) {
h.fields.MixDigest = newMixDigest
}
// ViewID is the ID of the view in which this block was originally proposed.
//
// It normally increases by one for each subsequent block, or by more than one
// if one or more PBFT/FBFT view changes have occurred.
//
// The returned instance is a copy; the caller may do anything with it.
func (h *Header) ViewID() *big.Int {
return new(big.Int).Set(h.fields.ViewID)
}
// SetViewID sets the view ID in which the block was originally proposed.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetViewID(newViewID *big.Int) {
h.fields.ViewID.Set(newViewID)
}
// Epoch is the epoch number of this block.
//
// The returned instance is a copy; the caller may do anything with it.
func (h *Header) Epoch() *big.Int {
return new(big.Int).Set(h.fields.Epoch)
}
// SetEpoch sets the epoch number of this block.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetEpoch(newEpoch *big.Int) {
h.fields.Epoch.Set(newEpoch)
}
// ShardID is the shard ID to which this block belongs.
func (h *Header) ShardID() uint32 {
return h.fields.ShardID
}
// SetShardID sets the shard ID to which this block belongs.
func (h *Header) SetShardID(newShardID uint32) {
h.fields.ShardID = newShardID
}
// LastCommitSignature is the FBFT commit group signature for the last block.
func (h *Header) LastCommitSignature() [96]byte {
return h.fields.LastCommitSignature
}
// SetLastCommitSignature sets the FBFT commit group signature for the last
// block.
func (h *Header) SetLastCommitSignature(newLastCommitSignature [96]byte) {
h.fields.LastCommitSignature = newLastCommitSignature
}
// LastCommitBitmap is the signatory bitmap of the previous block. Bit
// positions index into committee member array.
//
// The returned slice is a copy; the caller may do anything with it.
func (h *Header) LastCommitBitmap() []byte {
return append(h.fields.LastCommitBitmap[:0:0], h.fields.LastCommitBitmap...)
}
// SetLastCommitBitmap sets the signatory bitmap of the previous block.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetLastCommitBitmap(newLastCommitBitmap []byte) {
h.fields.LastCommitBitmap = append(newLastCommitBitmap[:0:0], newLastCommitBitmap...)
}
// ShardStateHash is the shard state hash.
func (h *Header) ShardStateHash() common.Hash {
return h.fields.ShardStateHash
}
// SetShardStateHash sets the shard state hash.
func (h *Header) SetShardStateHash(newShardStateHash common.Hash) {
h.fields.ShardStateHash = newShardStateHash
}
// Vrf is the output of the VRF for the epoch.
//
// The returned slice is a copy; the caller may do anything with it.
func (h *Header) Vrf() []byte {
return append(h.fields.Vrf[:0:0], h.fields.Vrf...)
}
// SetVrf sets the output of the VRF for the epoch.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetVrf(newVrf []byte) {
h.fields.Vrf = append(newVrf[:0:0], newVrf...)
}
// Vdf is the output of the VDF for the epoch.
//
// The returned slice is a copy; the caller may do anything with it.
func (h *Header) Vdf() []byte {
return append(h.fields.Vdf[:0:0], h.fields.Vdf...)
}
// SetVdf sets the output of the VDF for the epoch.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetVdf(newVdf []byte) {
h.fields.Vdf = append(newVdf[:0:0], newVdf...)
}
// ShardState is the RLP-encoded form of shard state (list of committees) for
// the next epoch.
//
// The returned slice is a copy; the caller may do anything with it.
func (h *Header) ShardState() []byte {
return append(h.fields.ShardState[:0:0], h.fields.ShardState...)
}
// SetShardState sets the RLP-encoded form of shard state
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetShardState(newShardState []byte) {
h.fields.ShardState = append(newShardState[:0:0], newShardState...)
}
// CrossLinks is the RLP-encoded form of non-beacon block headers chosen to be
// canonical by the beacon committee. This field is present only on beacon
// chain block headers.
//
// The returned slice is a copy; the caller may do anything with it.
func (h *Header) CrossLinks() []byte {
return append(h.fields.CrossLinks[:0:0], h.fields.CrossLinks...)
}
// SetCrossLinks sets the RLP-encoded form of non-beacon block headers chosen to
// be canonical by the beacon committee.
//
// It stores a copy; the caller may freely modify the original.
func (h *Header) SetCrossLinks(newCrossLinks []byte) {
h.fields.CrossLinks = append(newCrossLinks[:0:0], newCrossLinks...)
}
// field type overrides for gencodec // field type overrides for gencodec
type headerMarshaling struct { type headerMarshaling struct {
Difficulty *hexutil.Big Difficulty *hexutil.Big
@ -64,7 +384,7 @@ func (h *Header) Hash() common.Hash {
// to approximate and limit the memory consumption of various caches. // to approximate and limit the memory consumption of various caches.
func (h *Header) Size() common.StorageSize { func (h *Header) Size() common.StorageSize {
// TODO: update with new fields // TODO: update with new fields
return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Number.BitLen()+h.Time.BitLen())/8) return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra())+(h.Number().BitLen()+h.Time().BitLen())/8)
} }
// Logger returns a sub-logger with block contexts added. // Logger returns a sub-logger with block contexts added.
@ -72,9 +392,9 @@ func (h *Header) Logger(logger *zerolog.Logger) *zerolog.Logger {
nlogger := logger. nlogger := logger.
With(). With().
Str("blockHash", h.Hash().Hex()). Str("blockHash", h.Hash().Hex()).
Uint32("blockShard", h.ShardID). Uint32("blockShard", h.ShardID()).
Uint64("blockEpoch", h.Epoch.Uint64()). Uint64("blockEpoch", h.Epoch().Uint64()).
Uint64("blockNumber", h.Number.Uint64()). Uint64("blockNumber", h.Number().Uint64()).
Logger() Logger()
return &nlogger return &nlogger
} }
@ -82,9 +402,202 @@ func (h *Header) Logger(logger *zerolog.Logger) *zerolog.Logger {
// GetShardState returns the deserialized shard state object. // GetShardState returns the deserialized shard state object.
func (h *Header) GetShardState() (shard.State, error) { func (h *Header) GetShardState() (shard.State, error) {
shardState := shard.State{} shardState := shard.State{}
err := rlp.DecodeBytes(h.ShardState, &shardState) err := rlp.DecodeBytes(h.ShardState(), &shardState)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return shardState, nil return shardState, nil
} }
// HeaderFieldSetter is a header field setter.
//
// See NewHeaderWith for how it is used.
type HeaderFieldSetter struct {
h *Header
}
// NewHeaderWith creates a new header and returns its field setter context.
//
// Call a chain of setters on the returned field setter, followed by a call of
// Header method. Example:
//
// header := NewHeaderWith().
// ParentHash(parent.Hash()).
// Epoch(parent.Epoch()).
// ShardID(parent.ShardID()).
// Number(new(big.Int).Add(parent.Number(), big.NewInt(1)).
// Header()
func NewHeaderWith() *HeaderFieldSetter {
return (*HeaderFieldSetter)(&HeaderFieldSetter{h: NewHeader()})
}
// ParentHash sets the parent hash field.
func (s HeaderFieldSetter) ParentHash(newParentHash common.Hash) HeaderFieldSetter {
s.h.SetParentHash(newParentHash)
return s
}
// Coinbase sets the coinbase address field.
func (s HeaderFieldSetter) Coinbase(newCoinbase common.Address) HeaderFieldSetter {
s.h.SetCoinbase(newCoinbase)
return s
}
// Root sets the state trie root hash field.
func (s HeaderFieldSetter) Root(newRoot common.Hash) HeaderFieldSetter {
s.h.SetRoot(newRoot)
return s
}
// TxHash sets the transaction trie root hash field.
func (s HeaderFieldSetter) TxHash(newTxHash common.Hash) HeaderFieldSetter {
s.h.SetTxHash(newTxHash)
return s
}
// ReceiptHash sets the same-shard transaction receipt trie hash.
func (s HeaderFieldSetter) ReceiptHash(newReceiptHash common.Hash) HeaderFieldSetter {
s.h.SetReceiptHash(newReceiptHash)
return s
}
// OutgoingReceiptHash sets the egress transaction receipt trie hash.
func (s HeaderFieldSetter) OutgoingReceiptHash(newOutgoingReceiptHash common.Hash) HeaderFieldSetter {
s.h.SetOutgoingReceiptHash(newOutgoingReceiptHash)
return s
}
// IncomingReceiptHash sets the ingress transaction receipt trie hash.
func (s HeaderFieldSetter) IncomingReceiptHash(newIncomingReceiptHash common.Hash) HeaderFieldSetter {
s.h.SetIncomingReceiptHash(newIncomingReceiptHash)
return s
}
// Bloom sets the smart contract log Bloom filter for this block.
func (s HeaderFieldSetter) Bloom(newBloom types.Bloom) HeaderFieldSetter {
s.h.SetBloom(newBloom)
return s
}
// Number sets the block number.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) Number(newNumber *big.Int) HeaderFieldSetter {
s.h.SetNumber(newNumber)
return s
}
// GasLimit sets the gas limit for transactions in this block.
func (s HeaderFieldSetter) GasLimit(newGasLimit uint64) HeaderFieldSetter {
s.h.SetGasLimit(newGasLimit)
return s
}
// GasUsed sets the amount of gas used by transactions in this block.
func (s HeaderFieldSetter) GasUsed(newGasUsed uint64) HeaderFieldSetter {
s.h.SetGasUsed(newGasUsed)
return s
}
// Time sets the UNIX timestamp of this block.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) Time(newTime *big.Int) HeaderFieldSetter {
s.h.SetTime(newTime)
return s
}
// Extra sets the extra data field of this block.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) Extra(newExtra []byte) HeaderFieldSetter {
s.h.SetExtra(newExtra)
return s
}
// MixDigest sets the mixhash of this block.
func (s HeaderFieldSetter) MixDigest(newMixDigest common.Hash) HeaderFieldSetter {
s.h.SetMixDigest(newMixDigest)
return s
}
// ViewID sets the view ID in which the block was originally proposed.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) ViewID(newViewID *big.Int) HeaderFieldSetter {
s.h.SetViewID(newViewID)
return s
}
// Epoch sets the epoch number of this block.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) Epoch(newEpoch *big.Int) HeaderFieldSetter {
s.h.SetEpoch(newEpoch)
return s
}
// ShardID sets the shard ID to which this block belongs.
func (s HeaderFieldSetter) ShardID(newShardID uint32) HeaderFieldSetter {
s.h.SetShardID(newShardID)
return s
}
// LastCommitSignature sets the FBFT commit group signature for the last block.
func (s HeaderFieldSetter) LastCommitSignature(newLastCommitSignature [96]byte) HeaderFieldSetter {
s.h.SetLastCommitSignature(newLastCommitSignature)
return s
}
// LastCommitBitmap sets the signatory bitmap of the previous block.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) LastCommitBitmap(newLastCommitBitmap []byte) HeaderFieldSetter {
s.h.SetLastCommitBitmap(newLastCommitBitmap)
return s
}
// ShardStateHash sets the shard state hash.
func (s HeaderFieldSetter) ShardStateHash(newShardStateHash common.Hash) HeaderFieldSetter {
s.h.SetShardStateHash(newShardStateHash)
return s
}
// Vrf sets the output of the VRF for the epoch.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) Vrf(newVrf []byte) HeaderFieldSetter {
s.h.SetVrf(newVrf)
return s
}
// Vdf sets the output of the VDF for the epoch.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) Vdf(newVdf []byte) HeaderFieldSetter {
s.h.SetVdf(newVdf)
return s
}
// ShardState sets the RLP-encoded form of shard state
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) ShardState(newShardState []byte) HeaderFieldSetter {
s.h.SetShardState(newShardState)
return s
}
// CrossLinks sets the RLP-encoded form of non-beacon block headers chosen to be
// canonical by the beacon committee.
//
// It stores a copy; the caller may freely modify the original.
func (s HeaderFieldSetter) CrossLinks(newCrossLinks []byte) HeaderFieldSetter {
s.h.SetCrossLinks(newCrossLinks)
return s
}
// Header returns the header whose fields have been set. Call this at the end
// of a field setter chain.
func (s HeaderFieldSetter) Header() *Header {
return s.h
}

@ -458,35 +458,35 @@ func (consensus *Consensus) getLogger() *zerolog.Logger {
// retrieve corresponding blsPublicKey from Coinbase Address // retrieve corresponding blsPublicKey from Coinbase Address
func (consensus *Consensus) getLeaderPubKeyFromCoinbase(header *block.Header) (*bls.PublicKey, error) { func (consensus *Consensus) getLeaderPubKeyFromCoinbase(header *block.Header) (*bls.PublicKey, error) {
shardState, err := consensus.ChainReader.ReadShardState(header.Epoch) shardState, err := consensus.ChainReader.ReadShardState(header.Epoch())
if err != nil { if err != nil {
return nil, ctxerror.New("cannot read shard state", return nil, ctxerror.New("cannot read shard state",
"epoch", header.Epoch, "epoch", header.Epoch(),
"coinbaseAddr", header.Coinbase, "coinbaseAddr", header.Coinbase(),
).WithCause(err) ).WithCause(err)
} }
committee := shardState.FindCommitteeByID(header.ShardID) committee := shardState.FindCommitteeByID(header.ShardID())
if committee == nil { if committee == nil {
return nil, ctxerror.New("cannot find shard in the shard state", return nil, ctxerror.New("cannot find shard in the shard state",
"blockNum", header.Number, "blockNum", header.Number(),
"shardID", header.ShardID, "shardID", header.ShardID(),
"coinbaseAddr", header.Coinbase, "coinbaseAddr", header.Coinbase(),
) )
} }
committerKey := new(bls.PublicKey) committerKey := new(bls.PublicKey)
for _, member := range committee.NodeList { for _, member := range committee.NodeList {
if member.EcdsaAddress == header.Coinbase { if member.EcdsaAddress == header.Coinbase() {
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey) err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil { if err != nil {
return nil, ctxerror.New("cannot convert BLS public key", return nil, ctxerror.New("cannot convert BLS public key",
"blsPublicKey", member.BlsPublicKey, "blsPublicKey", member.BlsPublicKey,
"coinbaseAddr", header.Coinbase).WithCause(err) "coinbaseAddr", header.Coinbase()).WithCause(err)
} }
return committerKey, nil return committerKey, nil
} }
} }
return nil, ctxerror.New("cannot find corresponding BLS Public Key", "coinbaseAddr", header.Coinbase) return nil, ctxerror.New("cannot find corresponding BLS Public Key", "coinbaseAddr", header.Coinbase())
} }
// UpdateConsensusInformation will update shard information (epoch, publicKeys, blockNum, viewID) // UpdateConsensusInformation will update shard information (epoch, publicKeys, blockNum, viewID)
@ -505,8 +505,8 @@ func (consensus *Consensus) UpdateConsensusInformation() Mode {
header := consensus.ChainReader.CurrentHeader() header := consensus.ChainReader.CurrentHeader()
epoch := header.Epoch epoch := header.Epoch()
curPubKeys := core.GetPublicKeys(epoch, header.ShardID) curPubKeys := core.GetPublicKeys(epoch, header.ShardID())
consensus.numPrevPubKeys = len(curPubKeys) consensus.numPrevPubKeys = len(curPubKeys)
consensus.getLogger().Info().Msg("[UpdateConsensusInformation] Updating.....") consensus.getLogger().Info().Msg("[UpdateConsensusInformation] Updating.....")
@ -514,9 +514,9 @@ func (consensus *Consensus) UpdateConsensusInformation() Mode {
if core.IsEpochLastBlockByHeader(header) { if core.IsEpochLastBlockByHeader(header) {
// increase epoch by one if it's the last block // increase epoch by one if it's the last block
consensus.SetEpochNum(epoch.Uint64() + 1) consensus.SetEpochNum(epoch.Uint64() + 1)
consensus.getLogger().Info().Uint64("headerNum", header.Number.Uint64()).Msg("[UpdateConsensusInformation] Epoch updated for next epoch") consensus.getLogger().Info().Uint64("headerNum", header.Number().Uint64()).Msg("[UpdateConsensusInformation] Epoch updated for next epoch")
nextEpoch := new(big.Int).Add(epoch, common.Big1) nextEpoch := new(big.Int).Add(epoch, common.Big1)
pubKeys = core.GetPublicKeys(nextEpoch, header.ShardID) pubKeys = core.GetPublicKeys(nextEpoch, header.ShardID())
} else { } else {
consensus.SetEpochNum(epoch.Uint64()) consensus.SetEpochNum(epoch.Uint64())
pubKeys = curPubKeys pubKeys = curPubKeys
@ -534,7 +534,7 @@ func (consensus *Consensus) UpdateConsensusInformation() Mode {
consensus.UpdatePublicKeys(pubKeys) consensus.UpdatePublicKeys(pubKeys)
// take care of possible leader change during the epoch // take care of possible leader change during the epoch
if !core.IsEpochLastBlockByHeader(header) && header.Number.Uint64() != 0 { if !core.IsEpochLastBlockByHeader(header) && header.Number().Uint64() != 0 {
leaderPubKey, err := consensus.getLeaderPubKeyFromCoinbase(header) leaderPubKey, err := consensus.getLeaderPubKeyFromCoinbase(header)
if err != nil || leaderPubKey == nil { if err != nil || leaderPubKey == nil {
consensus.getLogger().Debug().Err(err).Msg("[SYNC] Unable to get leaderPubKey from coinbase") consensus.getLogger().Debug().Err(err).Msg("[SYNC] Unable to get leaderPubKey from coinbase")

@ -184,8 +184,8 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
// verify validity of block header object // verify validity of block header object
blockHeader := recvMsg.Payload blockHeader := recvMsg.Payload
var headerObj block.Header headerObj := block.NewHeader()
err = rlp.DecodeBytes(blockHeader, &headerObj) err = rlp.DecodeBytes(blockHeader, headerObj)
if err != nil { if err != nil {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Err(err). Err(err).
@ -194,35 +194,35 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
return return
} }
if recvMsg.BlockNum < consensus.blockNum || recvMsg.BlockNum != headerObj.Number.Uint64() { if recvMsg.BlockNum < consensus.blockNum || recvMsg.BlockNum != headerObj.Number().Uint64() {
consensus.getLogger().Debug(). consensus.getLogger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Uint64("hdrBlockNum", headerObj.Number.Uint64()). Uint64("hdrBlockNum", headerObj.Number().Uint64()).
Msg("[OnAnnounce] BlockNum does not match") Msg("[OnAnnounce] BlockNum does not match")
return return
} }
if consensus.mode.Mode() == Normal { if consensus.mode.Mode() == Normal {
if err = chain.Engine.VerifyHeader(consensus.ChainReader, &headerObj, true); err != nil { if err = chain.Engine.VerifyHeader(consensus.ChainReader, headerObj, true); err != nil {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Err(err). Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number.String()). Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()).
Str("MsgBlockNum", headerObj.Number.String()). Str("MsgBlockNum", headerObj.Number().String()).
Msg("[OnAnnounce] Block content is not verified successfully") Msg("[OnAnnounce] Block content is not verified successfully")
return return
} }
//VRF/VDF is only generated in the beach chain //VRF/VDF is only generated in the beach chain
if consensus.NeedsRandomNumberGeneration(headerObj.Epoch) { if consensus.NeedsRandomNumberGeneration(headerObj.Epoch()) {
//validate the VRF with proof if a non zero VRF is found in header //validate the VRF with proof if a non zero VRF is found in header
if len(headerObj.Vrf) > 0 { if len(headerObj.Vrf()) > 0 {
if !consensus.ValidateVrfAndProof(headerObj) { if !consensus.ValidateVrfAndProof(headerObj) {
return return
} }
} }
//validate the VDF with proof if a non zero VDF is found in header //validate the VDF with proof if a non zero VDF is found in header
if len(headerObj.Vdf) > 0 { if len(headerObj.Vdf()) > 0 {
if !consensus.ValidateVdfAndProof(headerObj) { if !consensus.ValidateVdfAndProof(headerObj) {
return return
} }
@ -513,8 +513,8 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
if err := chain.Engine.VerifyHeader(consensus.ChainReader, blockObj.Header(), true); err != nil { if err := chain.Engine.VerifyHeader(consensus.ChainReader, blockObj.Header(), true); err != nil {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Err(err). Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number.String()). Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()).
Str("MsgBlockNum", blockObj.Header().Number.String()). Str("MsgBlockNum", blockObj.Header().Number().String()).
Msg("[OnPrepared] Block header is not verified successfully") Msg("[OnPrepared] Block header is not verified successfully")
return return
} }
@ -1062,14 +1062,14 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
} }
} }
case <-consensus.syncReadyChan: case <-consensus.syncReadyChan:
consensus.SetBlockNum(consensus.ChainReader.CurrentHeader().Number.Uint64() + 1) consensus.SetBlockNum(consensus.ChainReader.CurrentHeader().Number().Uint64() + 1)
consensus.SetViewID(consensus.ChainReader.CurrentHeader().ViewID.Uint64() + 1) consensus.SetViewID(consensus.ChainReader.CurrentHeader().ViewID().Uint64() + 1)
mode := consensus.UpdateConsensusInformation() mode := consensus.UpdateConsensusInformation()
consensus.mode.SetMode(mode) consensus.mode.SetMode(mode)
consensus.getLogger().Info().Str("Mode", mode.String()).Msg("Node is in sync") consensus.getLogger().Info().Str("Mode", mode.String()).Msg("Node is in sync")
case <-consensus.syncNotReadyChan: case <-consensus.syncNotReadyChan:
consensus.SetBlockNum(consensus.ChainReader.CurrentHeader().Number.Uint64() + 1) consensus.SetBlockNum(consensus.ChainReader.CurrentHeader().Number().Uint64() + 1)
consensus.mode.SetMode(Syncing) consensus.mode.SetMode(Syncing)
consensus.getLogger().Info().Msg("Node is out of sync") consensus.getLogger().Info().Msg("Node is out of sync")
@ -1079,14 +1079,14 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
Msg("[ConsensusMainLoop] Received Proposed New Block!") Msg("[ConsensusMainLoop] Received Proposed New Block!")
//VRF/VDF is only generated in the beacon chain //VRF/VDF is only generated in the beacon chain
if consensus.NeedsRandomNumberGeneration(newBlock.Header().Epoch) { if consensus.NeedsRandomNumberGeneration(newBlock.Header().Epoch()) {
// generate VRF if the current block has a new leader // generate VRF if the current block has a new leader
if !consensus.ChainReader.IsSameLeaderAsPreviousBlock(newBlock) { if !consensus.ChainReader.IsSameLeaderAsPreviousBlock(newBlock) {
vrfBlockNumbers, err := consensus.ChainReader.ReadEpochVrfBlockNums(newBlock.Header().Epoch) vrfBlockNumbers, err := consensus.ChainReader.ReadEpochVrfBlockNums(newBlock.Header().Epoch())
if err != nil { if err != nil {
consensus.getLogger().Info(). consensus.getLogger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Uint64("Epoch", newBlock.Header().Epoch.Uint64()). Uint64("Epoch", newBlock.Header().Epoch().Uint64()).
Msg("[ConsensusMainLoop] no VRF block number from local db") Msg("[ConsensusMainLoop] no VRF block number from local db")
} }
@ -1096,7 +1096,7 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
if v == newBlock.NumberU64() { if v == newBlock.NumberU64() {
consensus.getLogger().Info(). consensus.getLogger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Uint64("Epoch", newBlock.Header().Epoch.Uint64()). Uint64("Epoch", newBlock.Header().Epoch().Uint64()).
Msg("[ConsensusMainLoop] VRF is already generated for this block") Msg("[ConsensusMainLoop] VRF is already generated for this block")
vrfAlreadyGenerated = true vrfAlreadyGenerated = true
break break
@ -1113,7 +1113,7 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
if (!vdfInProgress) && len(vrfBlockNumbers) >= consensus.VdfSeedSize() { if (!vdfInProgress) && len(vrfBlockNumbers) >= consensus.VdfSeedSize() {
//check local database to see if there's a VDF generated for this epoch //check local database to see if there's a VDF generated for this epoch
//generate a VDF if no blocknum is available //generate a VDF if no blocknum is available
_, err := consensus.ChainReader.ReadEpochVdfBlockNum(newBlock.Header().Epoch) _, err := consensus.ChainReader.ReadEpochVdfBlockNum(newBlock.Header().Epoch())
if err != nil { if err != nil {
consensus.GenerateVdfAndProof(newBlock, vrfBlockNumbers) consensus.GenerateVdfAndProof(newBlock, vrfBlockNumbers)
vdfInProgress = true vdfInProgress = true
@ -1130,20 +1130,20 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
if !vdfObject.Verify(vdfOutput) { if !vdfObject.Verify(vdfOutput) {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Uint64("Epoch", newBlock.Header().Epoch.Uint64()). Uint64("Epoch", newBlock.Header().Epoch().Uint64()).
Msg("[ConsensusMainLoop] failed to verify the VDF output") Msg("[ConsensusMainLoop] failed to verify the VDF output")
} else { } else {
//write the VDF only if VDF has not been generated //write the VDF only if VDF has not been generated
_, err := consensus.ChainReader.ReadEpochVdfBlockNum(newBlock.Header().Epoch) _, err := consensus.ChainReader.ReadEpochVdfBlockNum(newBlock.Header().Epoch())
if err == nil { if err == nil {
consensus.getLogger().Info(). consensus.getLogger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Uint64("Epoch", newBlock.Header().Epoch.Uint64()). Uint64("Epoch", newBlock.Header().Epoch().Uint64()).
Msg("[ConsensusMainLoop] VDF has already been generated previously") Msg("[ConsensusMainLoop] VDF has already been generated previously")
} else { } else {
consensus.getLogger().Info(). consensus.getLogger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Uint64("Epoch", newBlock.Header().Epoch.Uint64()). Uint64("Epoch", newBlock.Header().Epoch().Uint64()).
Msg("[ConsensusMainLoop] Generated a new VDF") Msg("[ConsensusMainLoop] Generated a new VDF")
newBlock.AddVdf(vdfOutput[:]) newBlock.AddVdf(vdfOutput[:])
@ -1197,7 +1197,7 @@ func (consensus *Consensus) GenerateVrfAndProof(newBlock *types.Block, vrfBlockN
consensus.getLogger().Info(). consensus.getLogger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Uint64("Epoch", newBlock.Header().Epoch.Uint64()). Uint64("Epoch", newBlock.Header().Epoch().Uint64()).
Int("Num of VRF", len(vrfBlockNumbers)). Int("Num of VRF", len(vrfBlockNumbers)).
Msg("[ConsensusMainLoop] Leader generated a VRF") Msg("[ConsensusMainLoop] Leader generated a VRF")
@ -1205,35 +1205,35 @@ func (consensus *Consensus) GenerateVrfAndProof(newBlock *types.Block, vrfBlockN
} }
// ValidateVrfAndProof validates a VRF/Proof from hash of previous block // ValidateVrfAndProof validates a VRF/Proof from hash of previous block
func (consensus *Consensus) ValidateVrfAndProof(headerObj block.Header) bool { func (consensus *Consensus) ValidateVrfAndProof(headerObj *block.Header) bool {
vrfPk := vrf_bls.NewVRFVerifier(consensus.LeaderPubKey) vrfPk := vrf_bls.NewVRFVerifier(consensus.LeaderPubKey)
var blockHash [32]byte var blockHash [32]byte
previousHeader := consensus.ChainReader.GetHeaderByNumber(headerObj.Number.Uint64() - 1) previousHeader := consensus.ChainReader.GetHeaderByNumber(headerObj.Number().Uint64() - 1)
previousHash := previousHeader.Hash() previousHash := previousHeader.Hash()
copy(blockHash[:], previousHash[:]) copy(blockHash[:], previousHash[:])
vrfProof := [96]byte{} vrfProof := [96]byte{}
copy(vrfProof[:], headerObj.Vrf[32:]) copy(vrfProof[:], headerObj.Vrf()[32:])
hash, err := vrfPk.ProofToHash(blockHash[:], vrfProof[:]) hash, err := vrfPk.ProofToHash(blockHash[:], vrfProof[:])
if err != nil { if err != nil {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Err(err). Err(err).
Str("MsgBlockNum", headerObj.Number.String()). Str("MsgBlockNum", headerObj.Number().String()).
Msg("[OnAnnounce] VRF verification error") Msg("[OnAnnounce] VRF verification error")
return false return false
} }
if !bytes.Equal(hash[:], headerObj.Vrf[:32]) { if !bytes.Equal(hash[:], headerObj.Vrf()[:32]) {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Str("MsgBlockNum", headerObj.Number.String()). Str("MsgBlockNum", headerObj.Number().String()).
Msg("[OnAnnounce] VRF proof is not valid") Msg("[OnAnnounce] VRF proof is not valid")
return false return false
} }
vrfBlockNumbers, _ := consensus.ChainReader.ReadEpochVrfBlockNums(headerObj.Epoch) vrfBlockNumbers, _ := consensus.ChainReader.ReadEpochVrfBlockNums(headerObj.Epoch())
consensus.getLogger().Info(). consensus.getLogger().Info().
Str("MsgBlockNum", headerObj.Number.String()). Str("MsgBlockNum", headerObj.Number().String()).
Int("Number of VRF", len(vrfBlockNumbers)). Int("Number of VRF", len(vrfBlockNumbers)).
Msg("[OnAnnounce] validated a new VRF") Msg("[OnAnnounce] validated a new VRF")
@ -1253,7 +1253,7 @@ func (consensus *Consensus) GenerateVdfAndProof(newBlock *types.Block, vrfBlockN
consensus.getLogger().Info(). consensus.getLogger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Uint64("Epoch", newBlock.Header().Epoch.Uint64()). Uint64("Epoch", newBlock.Header().Epoch().Uint64()).
Int("Num of VRF", len(vrfBlockNumbers)). Int("Num of VRF", len(vrfBlockNumbers)).
Msg("[ConsensusMainLoop] VDF computation started") Msg("[ConsensusMainLoop] VDF computation started")
@ -1277,11 +1277,11 @@ func (consensus *Consensus) GenerateVdfAndProof(newBlock *types.Block, vrfBlockN
} }
// ValidateVdfAndProof validates the VDF/proof in the current epoch // ValidateVdfAndProof validates the VDF/proof in the current epoch
func (consensus *Consensus) ValidateVdfAndProof(headerObj block.Header) bool { func (consensus *Consensus) ValidateVdfAndProof(headerObj *block.Header) bool {
vrfBlockNumbers, err := consensus.ChainReader.ReadEpochVrfBlockNums(headerObj.Epoch) vrfBlockNumbers, err := consensus.ChainReader.ReadEpochVrfBlockNums(headerObj.Epoch())
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err). consensus.getLogger().Error().Err(err).
Str("MsgBlockNum", headerObj.Number.String()). Str("MsgBlockNum", headerObj.Number().String()).
Msg("[OnAnnounce] failed to read VRF block numbers for VDF computation") Msg("[OnAnnounce] failed to read VRF block numbers for VDF computation")
} }
@ -1301,17 +1301,17 @@ func (consensus *Consensus) ValidateVdfAndProof(headerObj block.Header) bool {
vdfObject := vdf_go.New(core.ShardingSchedule.VdfDifficulty(), seed) vdfObject := vdf_go.New(core.ShardingSchedule.VdfDifficulty(), seed)
vdfOutput := [516]byte{} vdfOutput := [516]byte{}
copy(vdfOutput[:], headerObj.Vdf) copy(vdfOutput[:], headerObj.Vdf())
if vdfObject.Verify(vdfOutput) { if vdfObject.Verify(vdfOutput) {
consensus.getLogger().Info(). consensus.getLogger().Info().
Str("MsgBlockNum", headerObj.Number.String()). Str("MsgBlockNum", headerObj.Number().String()).
Int("Num of VRF", consensus.VdfSeedSize()). Int("Num of VRF", consensus.VdfSeedSize()).
Msg("[OnAnnounce] validated a new VDF") Msg("[OnAnnounce] validated a new VDF")
} else { } else {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Str("MsgBlockNum", headerObj.Number.String()). Str("MsgBlockNum", headerObj.Number().String()).
Uint64("Epoch", headerObj.Epoch.Uint64()). Uint64("Epoch", headerObj.Epoch().Uint64()).
Int("Num of VRF", consensus.VdfSeedSize()). Int("Num of VRF", consensus.VdfSeedSize()).
Msg("[OnAnnounce] VDF proof is not valid") Msg("[OnAnnounce] VDF proof is not valid")
return false return false

@ -33,7 +33,7 @@ func NewContractCaller(bc *core.BlockChain, config *params.ChainConfig) *Contrac
// CallContract calls a contracts with the specified transaction. // CallContract calls a contracts with the specified transaction.
func (cc *ContractCaller) CallContract(tx *types.Transaction) ([]byte, error) { func (cc *ContractCaller) CallContract(tx *types.Transaction) ([]byte, error) {
currBlock := cc.blockchain.CurrentBlock() currBlock := cc.blockchain.CurrentBlock()
msg, err := tx.AsMessage(types.MakeSigner(cc.config, currBlock.Header().Number)) msg, err := tx.AsMessage(types.MakeSigner(cc.config, currBlock.Header().Number()))
if err != nil { if err != nil {
utils.GetLogInstance().Error("[ABI] Failed to convert transaction to message", "error", err) utils.GetLogInstance().Error("[ABI] Failed to convert transaction to message", "error", err)
return []byte{}, err return []byte{}, err

@ -69,8 +69,8 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
//if err := v.engine.VerifyUncles(v.bc, block); err != nil { //if err := v.engine.VerifyUncles(v.bc, block); err != nil {
// return err // return err
//} //}
if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash { if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash() {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash())
} }
return nil return nil
} }
@ -87,24 +87,24 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
// Validate the received block's bloom with the one derived from the generated receipts. // Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true. // For valid blocks this should always validate to true.
rbloom := types.CreateBloom(receipts) rbloom := types.CreateBloom(receipts)
if rbloom != header.Bloom { if rbloom != header.Bloom() {
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom) return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom(), rbloom)
} }
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]])) // Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]]))
receiptSha := types.DeriveSha(receipts) receiptSha := types.DeriveSha(receipts)
if receiptSha != header.ReceiptHash { if receiptSha != header.ReceiptHash() {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash(), receiptSha)
} }
cxsSha := types.DeriveMultipleShardsSha(cxReceipts) cxsSha := types.DeriveMultipleShardsSha(cxReceipts)
if cxsSha != header.OutgoingReceiptHash { if cxsSha != header.OutgoingReceiptHash() {
return fmt.Errorf("invalid cross shard receipt root hash (remote: %x local: %x)", header.OutgoingReceiptHash, cxsSha) return fmt.Errorf("invalid cross shard receipt root hash (remote: %x local: %x)", header.OutgoingReceiptHash(), cxsSha)
} }
// Validate the state root against the received state root and throw // Validate the state root against the received state root and throw
// an error if they don't match. // an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number())); header.Root() != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root) return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root(), root)
} }
return nil return nil
} }
@ -114,14 +114,14 @@ func VerifyBlockLastCommitSigs(bc *BlockChain, block *types.Block) error {
header := block.Header() header := block.Header()
parentBlock := bc.GetBlockByNumber(block.NumberU64() - 1) parentBlock := bc.GetBlockByNumber(block.NumberU64() - 1)
if parentBlock == nil { if parentBlock == nil {
return ctxerror.New("[VerifyNewBlock] Failed to get parent block", "shardID", header.ShardID, "blockNum", header.Number) return ctxerror.New("[VerifyNewBlock] Failed to get parent block", "shardID", header.ShardID(), "blockNum", header.Number())
} }
parentHeader := parentBlock.Header() parentHeader := parentBlock.Header()
shardState, err := bc.ReadShardState(parentHeader.Epoch) shardState, err := bc.ReadShardState(parentHeader.Epoch())
committee := shardState.FindCommitteeByID(parentHeader.ShardID) committee := shardState.FindCommitteeByID(parentHeader.ShardID())
if err != nil || committee == nil { if err != nil || committee == nil {
return ctxerror.New("[VerifyNewBlock] Failed to read shard state for cross link header", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("[VerifyNewBlock] Failed to read shard state for cross link header", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
var committerKeys []*bls.PublicKey var committerKeys []*bls.PublicKey
@ -136,28 +136,30 @@ func VerifyBlockLastCommitSigs(bc *BlockChain, block *types.Block) error {
committerKeys = append(committerKeys, committerKey) committerKeys = append(committerKeys, committerKey)
} }
if !parseKeysSuccess { if !parseKeysSuccess {
return ctxerror.New("[VerifyNewBlock] cannot convert BLS public key", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("[VerifyNewBlock] cannot convert BLS public key", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
mask, err := bls2.NewMask(committerKeys, nil) mask, err := bls2.NewMask(committerKeys, nil)
if err != nil { if err != nil {
return ctxerror.New("[VerifyNewBlock] cannot create group sig mask", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("[VerifyNewBlock] cannot create group sig mask", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
if err := mask.SetMask(header.LastCommitBitmap); err != nil { if err := mask.SetMask(header.LastCommitBitmap()); err != nil {
return ctxerror.New("[VerifyNewBlock] cannot set group sig mask bits", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("[VerifyNewBlock] cannot set group sig mask bits", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
aggSig := bls.Sign{} aggSig := bls.Sign{}
err = aggSig.Deserialize(header.LastCommitSignature[:]) lastCommitSig := header.LastCommitSignature()
err = aggSig.Deserialize(lastCommitSig[:])
if err != nil { if err != nil {
return ctxerror.New("[VerifyNewBlock] unable to deserialize multi-signature from payload").WithCause(err) return ctxerror.New("[VerifyNewBlock] unable to deserialize multi-signature from payload").WithCause(err)
} }
blockNumBytes := make([]byte, 8) blockNumBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumBytes, header.Number.Uint64()-1) binary.LittleEndian.PutUint64(blockNumBytes, header.Number().Uint64()-1)
commitPayload := append(blockNumBytes, header.ParentHash[:]...) parentHash := header.ParentHash()
commitPayload := append(blockNumBytes, parentHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) { if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) {
return ctxerror.New("[VerifyNewBlock] Failed to verify the signature for last commit sig", "shardID", header.ShardID, "blockNum", header.Number) return ctxerror.New("[VerifyNewBlock] Failed to verify the signature for last commit sig", "shardID", header.ShardID(), "blockNum", header.Number())
} }
return nil return nil
} }

@ -251,7 +251,7 @@ func IsEpochLastBlock(block *types.Block) bool {
// IsEpochLastBlockByHeader returns whether this block is the last block of an epoch // IsEpochLastBlockByHeader returns whether this block is the last block of an epoch
// given block header // given block header
func IsEpochLastBlockByHeader(header *block.Header) bool { func IsEpochLastBlockByHeader(header *block.Header) bool {
return ShardingSchedule.IsLastBlock(header.Number.Uint64()) return ShardingSchedule.IsLastBlock(header.Number().Uint64())
} }
func (bc *BlockChain) getProcInterrupt() bool { func (bc *BlockChain) getProcInterrupt() bool {
@ -309,15 +309,15 @@ func (bc *BlockChain) loadLastState() error {
// Issue a status log for the user // Issue a status log for the user
currentFastBlock := bc.CurrentFastBlock() currentFastBlock := bc.CurrentFastBlock()
headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number().Uint64())
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
utils.Logger().Info(). utils.Logger().Info().
Str("number", currentHeader.Number.String()). Str("number", currentHeader.Number().String()).
Str("hash", currentHeader.Hash().Hex()). Str("hash", currentHeader.Hash().Hex()).
Str("td", headerTd.String()). Str("td", headerTd.String()).
Str("age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)).String()). Str("age", common.PrettyAge(time.Unix(currentHeader.Time().Int64(), 0)).String()).
Msg("Loaded most recent local header") Msg("Loaded most recent local header")
utils.Logger().Info(). utils.Logger().Info().
Str("number", currentBlock.Number().String()). Str("number", currentBlock.Number().String()).
@ -361,8 +361,8 @@ func (bc *BlockChain) SetHead(head uint64) error {
bc.shardStateCache.Purge() bc.shardStateCache.Purge()
// Rewind the block chain, ensuring we don't end up with a stateless head block // Rewind the block chain, ensuring we don't end up with a stateless head block
if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number().Uint64() < currentBlock.NumberU64() {
bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64()))
} }
if currentBlock := bc.CurrentBlock(); currentBlock != nil { if currentBlock := bc.CurrentBlock(); currentBlock != nil {
if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
@ -371,8 +371,8 @@ func (bc *BlockChain) SetHead(head uint64) error {
} }
} }
// Rewind the fast block in a simpleton way to the target head // Rewind the fast block in a simpleton way to the target head
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number().Uint64() < currentFastBlock.NumberU64() {
bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64()))
} }
// If either blocks reached nil, reset to the genesis state // If either blocks reached nil, reset to the genesis state
if currentBlock := bc.CurrentBlock(); currentBlock == nil { if currentBlock := bc.CurrentBlock(); currentBlock == nil {
@ -825,7 +825,7 @@ func (bc *BlockChain) Rollback(chain []common.Hash) {
currentHeader := bc.hc.CurrentHeader() currentHeader := bc.hc.CurrentHeader()
if currentHeader != nil && currentHeader.Hash() == hash { if currentHeader != nil && currentHeader.Hash() == hash {
bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash(), currentHeader.Number().Uint64()-1))
} }
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentFastBlock.Hash() == hash { if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentFastBlock.Hash() == hash {
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
@ -1032,7 +1032,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
} }
// Find the next state trie we need to commit // Find the next state trie we need to commit
header := bc.GetHeaderByNumber(current - triesInMemory) header := bc.GetHeaderByNumber(current - triesInMemory)
chosen := header.Number.Uint64() chosen := header.Number().Uint64()
// If we exceeded out time allowance, flush an entire trie to disk // If we exceeded out time allowance, flush an entire trie to disk
if bc.gcproc > bc.cacheConfig.TrieTimeLimit { if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
@ -1046,7 +1046,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
Msg("State in memory for too long, committing") Msg("State in memory for too long, committing")
} }
// Flush an entire trie and restart the counters // Flush an entire trie and restart the counters
triedb.Commit(header.Root, true) triedb.Commit(header.Root(), true)
lastWrite = chosen lastWrite = chosen
bc.gcproc = 0 bc.gcproc = 0
} }
@ -1066,7 +1066,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
batch := bc.db.NewBatch() batch := bc.db.NewBatch()
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
epoch := block.Header().Epoch epoch := block.Header().Epoch()
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch) shardingConfig := ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards()) shardNum := int(shardingConfig.NumShards())
for i := 0; i < shardNum; i++ { for i := 0; i < shardNum; i++ {
@ -1129,13 +1129,13 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
header := block.Header() header := block.Header()
header.Logger(utils.Logger()).Info(). header.Logger(utils.Logger()).Info().
Int("segmentIndex", idx). Int("segmentIndex", idx).
Str("parentHash", header.ParentHash.Hex()). Str("parentHash", header.ParentHash().Hex()).
Msg("added block to chain") Msg("added block to chain")
// TODO: move into WriteBlockWithState // TODO: move into WriteBlockWithState
if header.ShardStateHash != (common.Hash{}) { if header.ShardStateHash() != (common.Hash{}) {
epoch := new(big.Int).Add(header.Epoch, common.Big1) epoch := new(big.Int).Add(header.Epoch(), common.Big1)
err = bc.WriteShardStateBytes(epoch, header.ShardState) err = bc.WriteShardStateBytes(epoch, header.ShardState())
if err != nil { if err != nil {
header.Logger(utils.Logger()).Warn().Err(err).Msg("cannot store shard state") header.Logger(utils.Logger()).Warn().Err(err).Msg("cannot store shard state")
return n, err return n, err
@ -1143,9 +1143,9 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
} }
// TODO: move into WriteBlockWithState // TODO: move into WriteBlockWithState
if len(header.CrossLinks) > 0 { if len(header.CrossLinks()) > 0 {
crossLinks := &types.CrossLinks{} crossLinks := &types.CrossLinks{}
err = rlp.DecodeBytes(header.CrossLinks, crossLinks) err = rlp.DecodeBytes(header.CrossLinks(), crossLinks)
if err != nil { if err != nil {
header.Logger(utils.Logger()).Warn().Err(err).Msg("[insertChain] cannot parse cross links") header.Logger(utils.Logger()).Warn().Err(err).Msg("[insertChain] cannot parse cross links")
return n, err return n, err
@ -1365,19 +1365,19 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
//check non zero VRF field in header and add to local db //check non zero VRF field in header and add to local db
if len(block.Vrf()) > 0 { if len(block.Vrf()) > 0 {
vrfBlockNumbers, _ := bc.ReadEpochVrfBlockNums(block.Header().Epoch) vrfBlockNumbers, _ := bc.ReadEpochVrfBlockNums(block.Header().Epoch())
if (len(vrfBlockNumbers) > 0) && (vrfBlockNumbers[len(vrfBlockNumbers)-1] == block.NumberU64()) { if (len(vrfBlockNumbers) > 0) && (vrfBlockNumbers[len(vrfBlockNumbers)-1] == block.NumberU64()) {
utils.Logger().Error(). utils.Logger().Error().
Str("number", chain[i].Number().String()). Str("number", chain[i].Number().String()).
Str("epoch", block.Header().Epoch.String()). Str("epoch", block.Header().Epoch().String()).
Msg("VRF block number is already in local db") Msg("VRF block number is already in local db")
} else { } else {
vrfBlockNumbers = append(vrfBlockNumbers, block.NumberU64()) vrfBlockNumbers = append(vrfBlockNumbers, block.NumberU64())
err = bc.WriteEpochVrfBlockNums(block.Header().Epoch, vrfBlockNumbers) err = bc.WriteEpochVrfBlockNums(block.Header().Epoch(), vrfBlockNumbers)
if err != nil { if err != nil {
utils.Logger().Error(). utils.Logger().Error().
Str("number", chain[i].Number().String()). Str("number", chain[i].Number().String()).
Str("epoch", block.Header().Epoch.String()). Str("epoch", block.Header().Epoch().String()).
Msg("failed to write VRF block number to local db") Msg("failed to write VRF block number to local db")
} }
} }
@ -1385,11 +1385,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
//check non zero Vdf in header and add to local db //check non zero Vdf in header and add to local db
if len(block.Vdf()) > 0 { if len(block.Vdf()) > 0 {
err = bc.WriteEpochVdfBlockNum(block.Header().Epoch, block.Number()) err = bc.WriteEpochVdfBlockNum(block.Header().Epoch(), block.Number())
if err != nil { if err != nil {
utils.Logger().Error(). utils.Logger().Error().
Str("number", chain[i].Number().String()). Str("number", chain[i].Number().String()).
Str("epoch", block.Header().Epoch.String()). Str("epoch", block.Header().Epoch().String()).
Msg("failed to write VDF block number to local db") Msg("failed to write VDF block number to local db")
} }
} }
@ -1882,7 +1882,7 @@ func (bc *BlockChain) GetVdfByNumber(number uint64) []byte {
return []byte{} return []byte{}
} }
return header.Vdf return header.Vdf()
} }
// GetVrfByNumber retrieves the randomness preimage given the block number, return 0 if not exist // GetVrfByNumber retrieves the randomness preimage given the block number, return 0 if not exist
@ -1891,7 +1891,7 @@ func (bc *BlockChain) GetVrfByNumber(number uint64) []byte {
if header == nil { if header == nil {
return []byte{} return []byte{}
} }
return header.Vrf return header.Vrf()
} }
// GetShardState returns the shard state for the given epoch, // GetShardState returns the shard state for the given epoch,
@ -2117,14 +2117,14 @@ func (bc *BlockChain) WriteCXReceipts(shardID uint32, blockNum uint64, blockHash
// CXMerkleProof calculates the cross shard transaction merkle proof of a given destination shard // CXMerkleProof calculates the cross shard transaction merkle proof of a given destination shard
func (bc *BlockChain) CXMerkleProof(shardID uint32, block *types.Block) (*types.CXMerkleProof, error) { func (bc *BlockChain) CXMerkleProof(shardID uint32, block *types.Block) (*types.CXMerkleProof, error) {
proof := &types.CXMerkleProof{BlockNum: block.Number(), BlockHash: block.Hash(), ShardID: block.ShardID(), CXReceiptHash: block.Header().OutgoingReceiptHash, CXShardHashes: []common.Hash{}, ShardIDs: []uint32{}} proof := &types.CXMerkleProof{BlockNum: block.Number(), BlockHash: block.Hash(), ShardID: block.ShardID(), CXReceiptHash: block.Header().OutgoingReceiptHash(), CXShardHashes: []common.Hash{}, ShardIDs: []uint32{}}
cxs, err := rawdb.ReadCXReceipts(bc.db, shardID, block.NumberU64(), block.Hash(), false) cxs, err := rawdb.ReadCXReceipts(bc.db, shardID, block.NumberU64(), block.Hash(), false)
if err != nil || cxs == nil { if err != nil || cxs == nil {
return nil, err return nil, err
} }
epoch := block.Header().Epoch epoch := block.Header().Epoch()
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch) shardingConfig := ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards()) shardNum := int(shardingConfig.NumShards())

@ -200,7 +200,7 @@ func (c *ChainIndexer) eventLoop(currentHeader *block.Header, events chan ChainH
defer sub.Unsubscribe() defer sub.Unsubscribe()
// Fire the initial new head event to start any outstanding processing // Fire the initial new head event to start any outstanding processing
c.newHead(currentHeader.Number.Uint64(), false) c.newHead(currentHeader.Number().Uint64(), false)
var ( var (
prevHeader = currentHeader prevHeader = currentHeader
@ -221,17 +221,17 @@ func (c *ChainIndexer) eventLoop(currentHeader *block.Header, events chan ChainH
return return
} }
header := ev.Block.Header() header := ev.Block.Header()
if header.ParentHash != prevHash { if header.ParentHash() != prevHash {
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then) // Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly? // TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash { if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number().Uint64()) != prevHash {
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil { if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
c.newHead(h.Number.Uint64(), true) c.newHead(h.Number().Uint64(), true)
} }
} }
} }
c.newHead(header.Number.Uint64(), false) c.newHead(header.Number().Uint64(), false)
prevHeader, prevHash = header, header.Hash() prevHeader, prevHash = header, header.Hash()
} }
@ -404,7 +404,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
header := rawdb.ReadHeader(c.chainDb, hash, number) header := rawdb.ReadHeader(c.chainDb, hash, number)
if header == nil { if header == nil {
return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4]) return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4])
} else if header.ParentHash != lastHead { } else if header.ParentHash() != lastHead {
return common.Hash{}, fmt.Errorf("chain reorged during section processing") return common.Hash{}, fmt.Errorf("chain reorged during section processing")
} }
if err := c.backend.Process(c.ctx, header); err != nil { if err := c.backend.Process(c.ctx, header); err != nil {

@ -59,18 +59,18 @@ func (b *BlockGen) SetCoinbase(addr common.Address) {
} }
panic("coinbase can only be set once") panic("coinbase can only be set once")
} }
b.header.Coinbase = addr b.header.SetCoinbase(addr)
b.gasPool = new(GasPool).AddGas(b.header.GasLimit) b.gasPool = new(GasPool).AddGas(b.header.GasLimit())
} }
// SetExtra sets the extra data field of the generated block. // SetExtra sets the extra data field of the generated block.
func (b *BlockGen) SetExtra(data []byte) { func (b *BlockGen) SetExtra(data []byte) {
b.header.Extra = data b.header.SetExtra(data)
} }
// SetShardID sets the shardID field of the generated block. // SetShardID sets the shardID field of the generated block.
func (b *BlockGen) SetShardID(shardID uint32) { func (b *BlockGen) SetShardID(shardID uint32) {
b.header.ShardID = shardID b.header.SetShardID(shardID)
} }
// AddTx adds a transaction to the generated block. If no coinbase has // AddTx adds a transaction to the generated block. If no coinbase has
@ -98,7 +98,11 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
b.SetCoinbase(common.Address{}) b.SetCoinbase(common.Address{})
} }
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
receipt, _, _, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{}) coinbase := b.header.Coinbase()
gasUsed := b.header.GasUsed()
receipt, _, _, err := ApplyTransaction(b.config, bc, &coinbase, b.gasPool, b.statedb, b.header, tx, &gasUsed, vm.Config{})
b.header.SetGasUsed(gasUsed)
b.header.SetCoinbase(coinbase)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -108,7 +112,7 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
// Number returns the block number of the block being generated. // Number returns the block number of the block being generated.
func (b *BlockGen) Number() *big.Int { func (b *BlockGen) Number() *big.Int {
return new(big.Int).Set(b.header.Number) return b.header.Number()
} }
// AddUncheckedReceipt forcefully adds a receipts to the block without a // AddUncheckedReceipt forcefully adds a receipts to the block without a
@ -184,7 +188,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
// Write state changes to db // Write state changes to db
root, err := statedb.Commit(config.IsEIP158(b.header.Number)) root, err := statedb.Commit(config.IsEIP158(b.header.Number()))
if err != nil { if err != nil {
panic(fmt.Sprintf("state write error: %v", err)) panic(fmt.Sprintf("state write error: %v", err))
} }
@ -216,20 +220,14 @@ func makeHeader(chain consensus_engine.ChainReader, parent *types.Block, state *
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
} }
return &block.Header{ return block.NewHeaderWith().
Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())), Root(state.IntermediateRoot(chain.Config().IsEIP158(parent.Number()))).
ParentHash: parent.Hash(), ParentHash(parent.Hash()).
Coinbase: parent.Coinbase(), Coinbase(parent.Coinbase()).
//Difficulty: engine.CalcDifficulty(chain, time.Uint64(), &types.Header{ GasLimit(CalcGasLimit(parent, parent.GasLimit(), parent.GasLimit())).
// Number: parent.Number(), Number(new(big.Int).Add(parent.Number(), common.Big1)).
// Time: new(big.Int).Sub(time, big.NewInt(10)), Time(time).
// Difficulty: parent.Difficulty(), Header()
// UncleHash: parent.UncleHash(),
//}),
GasLimit: CalcGasLimit(parent, parent.GasLimit(), parent.GasLimit()),
Number: new(big.Int).Add(parent.Number(), common.Big1),
Time: time,
}
} }
// makeHeaderChain creates a deterministic chain of headers rooted at parent. // makeHeaderChain creates a deterministic chain of headers rooted at parent.

@ -10,13 +10,13 @@ import (
) )
func TestIsEpochBlock(t *testing.T) { func TestIsEpochBlock(t *testing.T) {
block1 := types.NewBlock(&block.Header{Number: big.NewInt(10)}, nil, nil, nil, nil) block1 := types.NewBlock(block.NewHeaderWith().Number(big.NewInt(10)).Header(), nil, nil, nil, nil)
block2 := types.NewBlock(&block.Header{Number: big.NewInt(0)}, nil, nil, nil, nil) block2 := types.NewBlock(block.NewHeaderWith().Number(big.NewInt(0)).Header(), nil, nil, nil, nil)
block3 := types.NewBlock(&block.Header{Number: big.NewInt(344064)}, nil, nil, nil, nil) block3 := types.NewBlock(block.NewHeaderWith().Number(big.NewInt(344064)).Header(), nil, nil, nil, nil)
block4 := types.NewBlock(&block.Header{Number: big.NewInt(77)}, nil, nil, nil, nil) block4 := types.NewBlock(block.NewHeaderWith().Number(big.NewInt(77)).Header(), nil, nil, nil, nil)
block5 := types.NewBlock(&block.Header{Number: big.NewInt(78)}, nil, nil, nil, nil) block5 := types.NewBlock(block.NewHeaderWith().Number(big.NewInt(78)).Header(), nil, nil, nil, nil)
block6 := types.NewBlock(&block.Header{Number: big.NewInt(188)}, nil, nil, nil, nil) block6 := types.NewBlock(block.NewHeaderWith().Number(big.NewInt(188)).Header(), nil, nil, nil, nil)
block7 := types.NewBlock(&block.Header{Number: big.NewInt(189)}, nil, nil, nil, nil) block7 := types.NewBlock(block.NewHeaderWith().Number(big.NewInt(189)).Header(), nil, nil, nil, nil)
tests := []struct { tests := []struct {
schedule shardingconfig.Schedule schedule shardingconfig.Schedule
block *types.Block block *types.Block

@ -52,10 +52,10 @@ func NewEVMContext(msg Message, header *block.Header, chain ChainContext, author
GetHash: GetHashFn(header, chain), GetHash: GetHashFn(header, chain),
Origin: msg.From(), Origin: msg.From(),
Coinbase: beneficiary, Coinbase: beneficiary,
BlockNumber: new(big.Int).Set(header.Number), BlockNumber: header.Number(),
Time: new(big.Int).Set(header.Time), Time: header.Time(),
//Difficulty: new(big.Int).Set(header.Difficulty), //Difficulty: new(big.Int).Set(header.Difficulty),
GasLimit: header.GasLimit, GasLimit: header.GasLimit(),
GasPrice: new(big.Int).Set(msg.GasPrice()), GasPrice: new(big.Int).Set(msg.GasPrice()),
} }
} }
@ -68,7 +68,7 @@ func GetHashFn(ref *block.Header, chain ChainContext) func(n uint64) common.Hash
// If there's no hash cache yet, make one // If there's no hash cache yet, make one
if cache == nil { if cache == nil {
cache = map[uint64]common.Hash{ cache = map[uint64]common.Hash{
ref.Number.Uint64() - 1: ref.ParentHash, ref.Number().Uint64() - 1: ref.ParentHash(),
} }
} }
// Try to fulfill the request from the cache // Try to fulfill the request from the cache
@ -76,10 +76,10 @@ func GetHashFn(ref *block.Header, chain ChainContext) func(n uint64) common.Hash
return hash return hash
} }
// Not cached, iterate the blocks and cache the hashes // Not cached, iterate the blocks and cache the hashes
for header := chain.GetHeader(ref.ParentHash, ref.Number.Uint64()-1); header != nil; header = chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) { for header := chain.GetHeader(ref.ParentHash(), ref.Number().Uint64()-1); header != nil; header = chain.GetHeader(header.ParentHash(), header.Number().Uint64()-1) {
cache[header.Number.Uint64()-1] = header.ParentHash cache[header.Number().Uint64()-1] = header.ParentHash()
if n == header.Number.Uint64()-1 { if n == header.Number().Uint64()-1 {
return header.ParentHash return header.ParentHash()
} }
} }
return common.Hash{} return common.Hash{}

@ -245,21 +245,21 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
utils.Logger().Error().Msg("failed to rlp-serialize genesis shard state") utils.Logger().Error().Msg("failed to rlp-serialize genesis shard state")
os.Exit(1) os.Exit(1)
} }
head := &block.Header{ head := block.NewHeaderWith().
Number: new(big.Int).SetUint64(g.Number), Number(new(big.Int).SetUint64(g.Number)).
Epoch: big.NewInt(0), Epoch(big.NewInt(0)).
ShardID: g.ShardID, ShardID(g.ShardID).
Time: new(big.Int).SetUint64(g.Timestamp), Time(new(big.Int).SetUint64(g.Timestamp)).
ParentHash: g.ParentHash, ParentHash(g.ParentHash).
Extra: g.ExtraData, Extra(g.ExtraData).
GasLimit: g.GasLimit, GasLimit(g.GasLimit).
GasUsed: g.GasUsed, GasUsed(g.GasUsed).
MixDigest: g.Mixhash, MixDigest(g.Mixhash).
Coinbase: g.Coinbase, Coinbase(g.Coinbase).
Root: root, Root(root).
ShardStateHash: g.ShardStateHash, ShardStateHash(g.ShardStateHash).
ShardState: shardStateBytes, ShardState(shardStateBytes).
} Header()
statedb.Commit(false) statedb.Commit(false)
statedb.Database().TrieDB().Commit(root, true) statedb.Database().TrieDB().Commit(root, true)
@ -280,7 +280,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
rawdb.WriteHeadBlockHash(db, block.Hash()) rawdb.WriteHeadBlockHash(db, block.Hash())
rawdb.WriteHeadHeaderHash(db, block.Hash()) rawdb.WriteHeadHeaderHash(db, block.Hash())
err := rawdb.WriteShardStateBytes(db, block.Header().Epoch, block.Header().ShardState) err := rawdb.WriteShardStateBytes(db, block.Header().Epoch(), block.Header().ShardState())
if err != nil { if err != nil {
utils.Logger().Error().Err(err).Msg("Failed to store genesis shard state") utils.Logger().Error().Err(err).Msg("Failed to store genesis shard state")

@ -137,7 +137,7 @@ func (hc *HeaderChain) WriteHeader(header *block.Header) (status WriteStatus, er
// Cache some values to prevent constant recalculation // Cache some values to prevent constant recalculation
var ( var (
hash = header.Hash() hash = header.Hash()
number = header.Number.Uint64() number = header.Number().Uint64()
) )
// TODO: implement fork choice mechanism // TODO: implement fork choice mechanism
//localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64()) //localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
@ -206,18 +206,19 @@ type WhCallback func(*block.Header) error
func (hc *HeaderChain) ValidateHeaderChain(chain []*block.Header, checkFreq int) (int, error) { func (hc *HeaderChain) ValidateHeaderChain(chain []*block.Header, checkFreq int) (int, error) {
// Do a sanity check that the provided chain is actually ordered and linked // Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ { for i := 1; i < len(chain); i++ {
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() { parentHash := chain[i].ParentHash()
if chain[i].Number().Uint64() != chain[i-1].Number().Uint64()+1 || parentHash != chain[i-1].Hash() {
// Chain broke ancestry, log a message (programming error) and skip insertion // Chain broke ancestry, log a message (programming error) and skip insertion
utils.Logger().Error(). utils.Logger().Error().
Str("number", chain[i].Number.String()). Str("number", chain[i].Number().String()).
Str("hash", chain[i].Hash().Hex()). Str("hash", chain[i].Hash().Hex()).
Str("parent", chain[i].ParentHash.Hex()). Str("parent", parentHash.Hex()).
Str("prevnumber", chain[i-1].Number.String()). Str("prevnumber", chain[i-1].Number().String()).
Str("prevhash", chain[i-1].Hash().Hex()). Str("prevhash", chain[i-1].Hash().Hex()).
Msg("Non contiguous header insert") Msg("Non contiguous header insert")
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number, return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4]) chain[i-1].Hash().Bytes()[:4], i, chain[i].Number(), chain[i].Hash().Bytes()[:4], parentHash[:4])
} }
} }
@ -271,7 +272,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*block.Header, writeHeader WhCa
return i, errors.New("aborted") return i, errors.New("aborted")
} }
// If the header's already known, skip it, otherwise store // If the header's already known, skip it, otherwise store
if hc.HasHeader(header.Hash(), header.Number.Uint64()) { if hc.HasHeader(header.Hash(), header.Number().Uint64()) {
stats.ignored++ stats.ignored++
continue continue
} }
@ -286,10 +287,10 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*block.Header, writeHeader WhCa
context := utils.Logger().With(). context := utils.Logger().With().
Int("count", stats.processed). Int("count", stats.processed).
Str("elapsed", common.PrettyDuration(time.Since(start)).String()). Str("elapsed", common.PrettyDuration(time.Since(start)).String()).
Str("number", last.Number.String()). Str("number", last.Number().String()).
Str("hash", last.Hash().Hex()) Str("hash", last.Hash().Hex())
if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(last.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = context.Str("age", common.PrettyAge(timestamp).String()) context = context.Str("age", common.PrettyAge(timestamp).String())
} }
if stats.ignored > 0 { if stats.ignored > 0 {
@ -312,12 +313,12 @@ func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []co
// Iterate the headers until enough is collected or the genesis reached // Iterate the headers until enough is collected or the genesis reached
chain := make([]common.Hash, 0, max) chain := make([]common.Hash, 0, max)
for i := uint64(0); i < max; i++ { for i := uint64(0); i < max; i++ {
next := header.ParentHash next := header.ParentHash()
if header = hc.GetHeader(next, header.Number.Uint64()-1); header == nil { if header = hc.GetHeader(next, header.Number().Uint64()-1); header == nil {
break break
} }
chain = append(chain, next) chain = append(chain, next)
if header.Number.Sign() == 0 { if header.Number().Sign() == 0 {
break break
} }
} }
@ -336,7 +337,7 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma
if ancestor == 1 { if ancestor == 1 {
// in this case it is cheaper to just read the header // in this case it is cheaper to just read the header
if header := hc.GetHeader(hash, number); header != nil { if header := hc.GetHeader(hash, number); header != nil {
return header.ParentHash, number - 1 return header.ParentHash(), number - 1
} }
return common.Hash{}, 0 return common.Hash{}, 0
} }
@ -354,7 +355,7 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma
if header == nil { if header == nil {
return common.Hash{}, 0 return common.Hash{}, 0
} }
hash = header.ParentHash hash = header.ParentHash()
number-- number--
} }
return hash, number return hash, number
@ -462,19 +463,19 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
height := uint64(0) height := uint64(0)
if hdr := hc.CurrentHeader(); hdr != nil { if hdr := hc.CurrentHeader(); hdr != nil {
height = hdr.Number.Uint64() height = hdr.Number().Uint64()
} }
batch := hc.chainDb.NewBatch() batch := hc.chainDb.NewBatch()
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number().Uint64() > head; hdr = hc.CurrentHeader() {
hash := hdr.Hash() hash := hdr.Hash()
num := hdr.Number.Uint64() num := hdr.Number().Uint64()
if delFn != nil { if delFn != nil {
delFn(batch, hash, num) delFn(batch, hash, num)
} }
rawdb.DeleteHeader(batch, hash, num) rawdb.DeleteHeader(batch, hash, num)
rawdb.DeleteTd(batch, hash, num) rawdb.DeleteTd(batch, hash, num)
hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1)) hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash(), hdr.Number().Uint64()-1))
} }
// Roll back the canonical chain numbering // Roll back the canonical chain numbering
for i := height; i > head; i-- { for i := height; i > head; i-- {

@ -171,7 +171,7 @@ func WriteHeader(db DatabaseWriter, header *block.Header) {
// Write the hash -> number mapping // Write the hash -> number mapping
var ( var (
hash = header.Hash() hash = header.Hash()
number = header.Number.Uint64() number = header.Number().Uint64()
encoded = encodeBlockNumber(number) encoded = encodeBlockNumber(number)
) )
key := headerNumberKey(hash) key := headerNumberKey(hash)
@ -349,12 +349,7 @@ func WriteBlock(db DatabaseWriter, block *types.Block) {
WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
WriteHeader(db, block.Header()) WriteHeader(db, block.Header())
// TODO ek – maybe roll the below into WriteHeader() // TODO ek – maybe roll the below into WriteHeader()
epoch := block.Header().Epoch epoch := block.Header().Epoch()
if epoch == nil {
// backward compatibility
return
}
epoch = new(big.Int).Set(epoch)
epochBlockNum := block.Number() epochBlockNum := block.Number()
writeOne := func() { writeOne := func() {
if err := WriteEpochBlockNumber(db, epoch, epochBlockNum); err != nil { if err := WriteEpochBlockNumber(db, epoch, epochBlockNum); err != nil {
@ -368,7 +363,7 @@ func WriteBlock(db DatabaseWriter, block *types.Block) {
} }
// TODO: don't change epoch based on shard state presence // TODO: don't change epoch based on shard state presence
if len(block.Header().ShardState) > 0 && block.NumberU64() != 0 { if len(block.Header().ShardState()) > 0 && block.NumberU64() != 0 {
// End-of-epoch block; record the next epoch after this block. // End-of-epoch block; record the next epoch after this block.
epoch = new(big.Int).Add(epoch, common.Big1) epoch = new(big.Int).Add(epoch, common.Big1)
epochBlockNum = new(big.Int).Add(epochBlockNum, common.Big1) epochBlockNum = new(big.Int).Add(epochBlockNum, common.Big1)
@ -386,24 +381,24 @@ func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) {
// FindCommonAncestor returns the last common ancestor of two block headers // FindCommonAncestor returns the last common ancestor of two block headers
func FindCommonAncestor(db DatabaseReader, a, b *block.Header) *block.Header { func FindCommonAncestor(db DatabaseReader, a, b *block.Header) *block.Header {
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { for bn := b.Number().Uint64(); a.Number().Uint64() > bn; {
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) a = ReadHeader(db, a.ParentHash(), a.Number().Uint64()-1)
if a == nil { if a == nil {
return nil return nil
} }
} }
for an := a.Number.Uint64(); an < b.Number.Uint64(); { for an := a.Number().Uint64(); an < b.Number().Uint64(); {
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) b = ReadHeader(db, b.ParentHash(), b.Number().Uint64()-1)
if b == nil { if b == nil {
return nil return nil
} }
} }
for a.Hash() != b.Hash() { for a.Hash() != b.Hash() {
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) a = ReadHeader(db, a.ParentHash(), a.Number().Uint64()-1)
if a == nil { if a == nil {
return nil return nil
} }
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) b = ReadHeader(db, b.ParentHash(), b.Number().Uint64()-1)
if b == nil { if b == nil {
return nil return nil
} }

@ -40,18 +40,18 @@ func TestHeaderStorage(t *testing.T) {
db := ethdb.NewMemDatabase() db := ethdb.NewMemDatabase()
// Create a test header to move around the database and make sure it's really new // Create a test header to move around the database and make sure it's really new
header := &block.Header{Number: big.NewInt(42), Extra: []byte("test header")} header := block.NewHeaderWith().Number(big.NewInt(42)).Extra([]byte("test header")).Header()
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil { if entry := ReadHeader(db, header.Hash(), header.Number().Uint64()); entry != nil {
t.Fatalf("Non existent header returned: %v", entry) t.Fatalf("Non existent header returned: %v", entry)
} }
// Write and verify the header in the database // Write and verify the header in the database
WriteHeader(db, header) WriteHeader(db, header)
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil { if entry := ReadHeader(db, header.Hash(), header.Number().Uint64()); entry == nil {
t.Fatalf("Stored header not found") t.Fatalf("Stored header not found")
} else if entry.Hash() != header.Hash() { } else if entry.Hash() != header.Hash() {
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header) t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
} }
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil { if entry := ReadHeaderRLP(db, header.Hash(), header.Number().Uint64()); entry == nil {
t.Fatalf("Stored header RLP not found") t.Fatalf("Stored header RLP not found")
} else { } else {
hasher := sha3.NewLegacyKeccak256() hasher := sha3.NewLegacyKeccak256()
@ -62,8 +62,8 @@ func TestHeaderStorage(t *testing.T) {
} }
} }
// Delete the header and verify the execution // Delete the header and verify the execution
DeleteHeader(db, header.Hash(), header.Number.Uint64()) DeleteHeader(db, header.Hash(), header.Number().Uint64())
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil { if entry := ReadHeader(db, header.Hash(), header.Number().Uint64()); entry != nil {
t.Fatalf("Deleted header returned: %v", entry) t.Fatalf("Deleted header returned: %v", entry)
} }
} }
@ -73,7 +73,7 @@ func TestBodyStorage(t *testing.T) {
db := ethdb.NewMemDatabase() db := ethdb.NewMemDatabase()
// Create a test body to move around the database and make sure it's really new // Create a test body to move around the database and make sure it's really new
body := &types.Body{Uncles: []*block.Header{{Extra: []byte("test header")}}} body := &types.Body{Uncles: []*block.Header{block.NewHeaderWith().Extra([]byte("test header")).Header()}}
hasher := sha3.NewLegacyKeccak256() hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, body) rlp.Encode(hasher, body)
@ -111,14 +111,14 @@ func TestBlockStorage(t *testing.T) {
db := ethdb.NewMemDatabase() db := ethdb.NewMemDatabase()
// Create a test block to move around the database and make sure it's really new // Create a test block to move around the database and make sure it's really new
block := types.NewBlockWithHeader(&block.Header{ block := types.NewBlockWithHeader(block.NewHeaderWith().
Extra: []byte("test block"), Extra([]byte("test block")).
TxHash: types.EmptyRootHash, TxHash(types.EmptyRootHash).
ReceiptHash: types.EmptyRootHash, ReceiptHash(types.EmptyRootHash).
Epoch: big.NewInt(0), Epoch(big.NewInt(0)).
Number: big.NewInt(0), Number(big.NewInt(0)).
ShardState: []byte("dummy data"), ShardState([]byte("dummy data")).
}) Header())
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry) t.Fatalf("Non existent block returned: %v", entry)
} }
@ -171,11 +171,11 @@ func TestBlockStorage(t *testing.T) {
// Tests that partial block contents don't get reassembled into full blocks. // Tests that partial block contents don't get reassembled into full blocks.
func TestPartialBlockStorage(t *testing.T) { func TestPartialBlockStorage(t *testing.T) {
db := ethdb.NewMemDatabase() db := ethdb.NewMemDatabase()
block := types.NewBlockWithHeader(&block.Header{ block := types.NewBlockWithHeader(block.NewHeaderWith().
Extra: []byte("test block"), Extra([]byte("test block")).
TxHash: types.EmptyRootHash, TxHash(types.EmptyRootHash).
ReceiptHash: types.EmptyRootHash, ReceiptHash(types.EmptyRootHash).
}) Header())
// Store a header and check that it's not recognized as a block // Store a header and check that it's not recognized as a block
WriteHeader(db, block.Header()) WriteHeader(db, block.Header())
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
@ -251,9 +251,9 @@ func TestCanonicalMappingStorage(t *testing.T) {
func TestHeadStorage(t *testing.T) { func TestHeadStorage(t *testing.T) {
db := ethdb.NewMemDatabase() db := ethdb.NewMemDatabase()
blockHead := types.NewBlockWithHeader(&block.Header{Extra: []byte("test block header")}) blockHead := types.NewBlockWithHeader(block.NewHeaderWith().Extra([]byte("test block header")).Header())
blockFull := types.NewBlockWithHeader(&block.Header{Extra: []byte("test block full")}) blockFull := types.NewBlockWithHeader(block.NewHeaderWith().Extra([]byte("test block full")).Header())
blockFast := types.NewBlockWithHeader(&block.Header{Extra: []byte("test block fast")}) blockFast := types.NewBlockWithHeader(block.NewHeaderWith().Extra([]byte("test block fast")).Header())
// Check that no head entries are in a pristine database // Check that no head entries are in a pristine database
if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) { if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) {

@ -36,7 +36,7 @@ func TestLookupStorage(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&block2.Header{Number: big.NewInt(314)}, txs, nil, nil, nil) block := types.NewBlock(block2.NewHeaderWith().Number(big.NewInt(314)).Header(), txs, nil, nil, nil)
// Check that no transactions entries are in a pristine database // Check that no transactions entries are in a pristine database
for i, tx := range txs { for i, tx := range txs {

@ -66,7 +66,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.DB, cfg vm.C
incxs = block.IncomingReceipts() incxs = block.IncomingReceipts()
usedGas = new(uint64) usedGas = new(uint64)
header = block.Header() header = block.Header()
coinbase = block.Header().Coinbase coinbase = block.Header().Coinbase()
allLogs []*types.Log allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit()) gp = new(GasPool).AddGas(block.GasLimit())
) )
@ -104,12 +104,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.DB, cfg vm.C
// return true if it is valid // return true if it is valid
func getTransactionType(header *block.Header, tx *types.Transaction) types.TransactionType { func getTransactionType(header *block.Header, tx *types.Transaction) types.TransactionType {
if tx.ShardID() == tx.ToShardID() && header.ShardID == tx.ShardID() { if tx.ShardID() == tx.ToShardID() && header.ShardID() == tx.ShardID() {
return types.SameShardTx return types.SameShardTx
} }
numShards := ShardingSchedule.InstanceForEpoch(header.Epoch).NumShards() numShards := ShardingSchedule.InstanceForEpoch(header.Epoch()).NumShards()
// Assuming here all the shards are consecutive from 0 to n-1, n is total number of shards // Assuming here all the shards are consecutive from 0 to n-1, n is total number of shards
if tx.ShardID() != tx.ToShardID() && header.ShardID == tx.ShardID() && tx.ToShardID() < numShards { if tx.ShardID() != tx.ToShardID() && header.ShardID() == tx.ShardID() && tx.ToShardID() < numShards {
return types.SubtractionOnly return types.SubtractionOnly
} }
return types.InvalidTx return types.InvalidTx
@ -124,7 +124,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
if txType == types.InvalidTx { if txType == types.InvalidTx {
return nil, nil, 0, fmt.Errorf("Invalid Transaction Type") return nil, nil, 0, fmt.Errorf("Invalid Transaction Type")
} }
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) msg, err := tx.AsMessage(types.MakeSigner(config, header.Number()))
// skip signer err for additiononly tx // skip signer err for additiononly tx
if err != nil { if err != nil {
return nil, nil, 0, err return nil, nil, 0, err
@ -143,10 +143,10 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
} }
// Update the state with pending changes // Update the state with pending changes
var root []byte var root []byte
if config.IsByzantium(header.Number) { if config.IsByzantium(header.Number()) {
statedb.Finalise(true) statedb.Finalise(true)
} else { } else {
root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes() root = statedb.IntermediateRoot(config.IsEIP158(header.Number())).Bytes()
} }
*usedGas += gas *usedGas += gas
@ -190,7 +190,7 @@ func ApplyIncomingReceipt(config *params.ChainConfig, db *state.DB, header *bloc
db.CreateAccount(*cx.To) db.CreateAccount(*cx.To)
} }
db.AddBalance(*cx.To, cx.Amount) db.AddBalance(*cx.To, cx.Amount)
db.IntermediateRoot(config.IsEIP158(header.Number)).Bytes() db.IntermediateRoot(config.IsEIP158(header.Number())).Bytes()
} }
return nil return nil
} }

@ -374,10 +374,10 @@ func (pool *TxPool) reset(oldHead, newHead *block.Header) {
// If we're reorging an old state, reinject all dropped transactions // If we're reorging an old state, reinject all dropped transactions
var reinject types.Transactions var reinject types.Transactions
if oldHead != nil && oldHead.Hash() != newHead.ParentHash { if oldHead != nil && oldHead.Hash() != newHead.ParentHash() {
// If the reorg is too deep, avoid doing it (will happen during fast sync) // If the reorg is too deep, avoid doing it (will happen during fast sync)
oldNum := oldHead.Number.Uint64() oldNum := oldHead.Number().Uint64()
newNum := newHead.Number.Uint64() newNum := newHead.Number().Uint64()
if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
utils.Logger().Debug().Uint64("depth", depth).Msg("Skipping deep transaction reorg") utils.Logger().Debug().Uint64("depth", depth).Msg("Skipping deep transaction reorg")
@ -386,14 +386,14 @@ func (pool *TxPool) reset(oldHead, newHead *block.Header) {
var discarded, included types.Transactions var discarded, included types.Transactions
var ( var (
rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number().Uint64())
add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) add = pool.chain.GetBlock(newHead.Hash(), newHead.Number().Uint64())
) )
for rem.NumberU64() > add.NumberU64() { for rem.NumberU64() > add.NumberU64() {
discarded = append(discarded, rem.Transactions()...) discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
utils.Logger().Error(). utils.Logger().Error().
Str("block", oldHead.Number.String()). Str("block", oldHead.Number().String()).
Str("hash", oldHead.Hash().Hex()). Str("hash", oldHead.Hash().Hex()).
Msg("Unrooted old chain seen by tx pool") Msg("Unrooted old chain seen by tx pool")
return return
@ -403,7 +403,7 @@ func (pool *TxPool) reset(oldHead, newHead *block.Header) {
included = append(included, add.Transactions()...) included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
utils.Logger().Error(). utils.Logger().Error().
Str("block", newHead.Number.String()). Str("block", newHead.Number().String()).
Str("hash", newHead.Hash().Hex()). Str("hash", newHead.Hash().Hex()).
Msg("Unrooted new chain seen by tx pool") Msg("Unrooted new chain seen by tx pool")
return return
@ -413,7 +413,7 @@ func (pool *TxPool) reset(oldHead, newHead *block.Header) {
discarded = append(discarded, rem.Transactions()...) discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
utils.Logger().Error(). utils.Logger().Error().
Str("block", oldHead.Number.String()). Str("block", oldHead.Number().String()).
Str("hash", oldHead.Hash().Hex()). Str("hash", oldHead.Hash().Hex()).
Msg("Unrooted old chain seen by tx pool") Msg("Unrooted old chain seen by tx pool")
return return
@ -421,7 +421,7 @@ func (pool *TxPool) reset(oldHead, newHead *block.Header) {
included = append(included, add.Transactions()...) included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
utils.Logger().Error(). utils.Logger().Error().
Str("block", newHead.Number.String()). Str("block", newHead.Number().String()).
Str("hash", newHead.Hash().Hex()). Str("hash", newHead.Hash().Hex()).
Msg("Unrooted new chain seen by tx pool") Msg("Unrooted new chain seen by tx pool")
return return
@ -434,14 +434,14 @@ func (pool *TxPool) reset(oldHead, newHead *block.Header) {
if newHead == nil { if newHead == nil {
newHead = pool.chain.CurrentBlock().Header() // Special case during testing newHead = pool.chain.CurrentBlock().Header() // Special case during testing
} }
statedb, err := pool.chain.StateAt(newHead.Root) statedb, err := pool.chain.StateAt(newHead.Root())
if err != nil { if err != nil {
utils.Logger().Error().Err(err).Msg("Failed to reset txpool state") utils.Logger().Error().Err(err).Msg("Failed to reset txpool state")
return return
} }
pool.currentState = statedb pool.currentState = statedb
pool.pendingState = state.ManageState(statedb) pool.pendingState = state.ManageState(statedb)
pool.currentMaxGas = newHead.GasLimit pool.currentMaxGas = newHead.GasLimit()
// Inject any transactions discarded due to reorgs // Inject any transactions discarded due to reorgs
utils.Logger().Debug().Int("count", len(reinject)).Msg("Reinjecting stale transactions") utils.Logger().Debug().Int("count", len(reinject)).Msg("Reinjecting stale transactions")

@ -54,9 +54,9 @@ type testBlockChain struct {
} }
func (bc *testBlockChain) CurrentBlock() *types.Block { func (bc *testBlockChain) CurrentBlock() *types.Block {
return types.NewBlock(&block.Header{ return types.NewBlock(block.NewHeaderWith().
GasLimit: bc.gasLimit, GasLimit(bc.gasLimit).
}, nil, nil, nil, nil) Header(), nil, nil, nil, nil)
} }
func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {

@ -102,14 +102,16 @@ type Block struct {
// SetLastCommitSig sets the last block's commit group signature. // SetLastCommitSig sets the last block's commit group signature.
func (b *Block) SetLastCommitSig(sig []byte, signers []byte) { func (b *Block) SetLastCommitSig(sig []byte, signers []byte) {
if len(sig) != len(b.header.LastCommitSignature) { if len(sig) != len(b.header.LastCommitSignature()) {
utils.Logger().Warn(). utils.Logger().Warn().
Int("srcLen", len(sig)). Int("srcLen", len(sig)).
Int("dstLen", len(b.header.LastCommitSignature)). Int("dstLen", len(b.header.LastCommitSignature())).
Msg("SetLastCommitSig: sig size mismatch") Msg("SetLastCommitSig: sig size mismatch")
} }
copy(b.header.LastCommitSignature[:], sig[:]) var sig2 [96]byte
b.header.LastCommitBitmap = append(signers[:0:0], signers...) copy(sig2[:], sig)
b.header.SetLastCommitSignature(sig2)
b.header.SetLastCommitBitmap(signers)
} }
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the // DeprecatedTd is an old relic for extracting the TD of a block. It is in the
@ -154,26 +156,26 @@ func NewBlock(header *block.Header, txs []*Transaction, receipts []*Receipt, out
// TODO: panic if len(txs) != len(receipts) // TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 { if len(txs) == 0 {
b.header.TxHash = EmptyRootHash b.header.SetTxHash(EmptyRootHash)
} else { } else {
b.header.TxHash = DeriveSha(Transactions(txs)) b.header.SetTxHash(DeriveSha(Transactions(txs)))
b.transactions = make(Transactions, len(txs)) b.transactions = make(Transactions, len(txs))
copy(b.transactions, txs) copy(b.transactions, txs)
} }
if len(receipts) == 0 { if len(receipts) == 0 {
b.header.ReceiptHash = EmptyRootHash b.header.SetReceiptHash(EmptyRootHash)
} else { } else {
b.header.ReceiptHash = DeriveSha(Receipts(receipts)) b.header.SetReceiptHash(DeriveSha(Receipts(receipts)))
b.header.Bloom = CreateBloom(receipts) b.header.SetBloom(CreateBloom(receipts))
} }
b.header.OutgoingReceiptHash = DeriveMultipleShardsSha(CXReceipts(outcxs)) b.header.SetOutgoingReceiptHash(DeriveMultipleShardsSha(CXReceipts(outcxs)))
if len(incxs) == 0 { if len(incxs) == 0 {
b.header.IncomingReceiptHash = EmptyRootHash b.header.SetIncomingReceiptHash(EmptyRootHash)
} else { } else {
b.header.IncomingReceiptHash = DeriveSha(CXReceiptsProofs(incxs)) b.header.SetIncomingReceiptHash(DeriveSha(CXReceiptsProofs(incxs)))
b.incomingReceipts = make(CXReceiptsProofs, len(incxs)) b.incomingReceipts = make(CXReceiptsProofs, len(incxs))
copy(b.incomingReceipts, incxs) copy(b.incomingReceipts, incxs)
} }
@ -190,45 +192,14 @@ func NewBlockWithHeader(header *block.Header) *Block {
// CopyHeader creates a deep copy of a block header to prevent side effects from // CopyHeader creates a deep copy of a block header to prevent side effects from
// modifying a header variable. // modifying a header variable.
// TODO ek – no longer necessary
func CopyHeader(h *block.Header) *block.Header { func CopyHeader(h *block.Header) *block.Header {
// TODO: update with new fields
cpy := *h cpy := *h
if cpy.Time = new(big.Int); h.Time != nil { // A field value object that lives outside of a header struct is never
cpy.Time.Set(h.Time) // exposed to the outside for external modification, as its getter and
} // setter always make a copy. Therefore, we do not have to clone such
if cpy.Number = new(big.Int); h.Number != nil { // fields, and multiple header structs can safely share the same field value
cpy.Number.Set(h.Number) // objects.
}
if cpy.ViewID = new(big.Int); h.ViewID != nil {
cpy.ViewID.Set(h.ViewID)
}
if cpy.Epoch = new(big.Int); h.Epoch != nil {
cpy.Epoch.Set(h.Epoch)
}
if len(h.Extra) > 0 {
cpy.Extra = make([]byte, len(h.Extra))
copy(cpy.Extra, h.Extra)
}
if len(h.ShardState) > 0 {
cpy.ShardState = make([]byte, len(h.ShardState))
copy(cpy.ShardState, h.ShardState)
}
if len(h.Vrf) > 0 {
cpy.Vrf = make([]byte, len(h.Vrf))
copy(cpy.Vrf, h.Vrf)
}
if len(h.Vdf) > 0 {
cpy.Vdf = make([]byte, len(h.Vdf))
copy(cpy.Vdf, h.Vdf)
}
if len(h.CrossLinks) > 0 {
cpy.CrossLinks = make([]byte, len(h.CrossLinks))
copy(cpy.CrossLinks, h.CrossLinks)
}
if len(h.LastCommitBitmap) > 0 {
cpy.LastCommitBitmap = make([]byte, len(h.LastCommitBitmap))
copy(cpy.LastCommitBitmap, h.LastCommitBitmap)
}
return &cpy return &cpy
} }
@ -291,52 +262,52 @@ func (b *Block) Transaction(hash common.Hash) *Transaction {
} }
// Number returns header number. // Number returns header number.
func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) } func (b *Block) Number() *big.Int { return b.header.Number() }
// GasLimit returns header gas limit. // GasLimit returns header gas limit.
func (b *Block) GasLimit() uint64 { return b.header.GasLimit } func (b *Block) GasLimit() uint64 { return b.header.GasLimit() }
// GasUsed returns header gas used. // GasUsed returns header gas used.
func (b *Block) GasUsed() uint64 { return b.header.GasUsed } func (b *Block) GasUsed() uint64 { return b.header.GasUsed() }
// Time is header time. // Time is header time.
func (b *Block) Time() *big.Int { return new(big.Int).Set(b.header.Time) } func (b *Block) Time() *big.Int { return b.header.Time() }
// NumberU64 is the header number in uint64. // NumberU64 is the header number in uint64.
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() } func (b *Block) NumberU64() uint64 { return b.header.Number().Uint64() }
// MixDigest is the header mix digest. // MixDigest is the header mix digest.
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest } func (b *Block) MixDigest() common.Hash { return b.header.MixDigest() }
// ShardID is the header ShardID // ShardID is the header ShardID
func (b *Block) ShardID() uint32 { return b.header.ShardID } func (b *Block) ShardID() uint32 { return b.header.ShardID() }
// Epoch is the header Epoch // Epoch is the header Epoch
func (b *Block) Epoch() *big.Int { return b.header.Epoch } func (b *Block) Epoch() *big.Int { return b.header.Epoch() }
// Bloom returns header bloom. // Bloom returns header bloom.
func (b *Block) Bloom() ethtypes.Bloom { return b.header.Bloom } func (b *Block) Bloom() ethtypes.Bloom { return b.header.Bloom() }
// Coinbase returns header coinbase. // Coinbase returns header coinbase.
func (b *Block) Coinbase() common.Address { return b.header.Coinbase } func (b *Block) Coinbase() common.Address { return b.header.Coinbase() }
// Root returns header root. // Root returns header root.
func (b *Block) Root() common.Hash { return b.header.Root } func (b *Block) Root() common.Hash { return b.header.Root() }
// ParentHash return header parent hash. // ParentHash return header parent hash.
func (b *Block) ParentHash() common.Hash { return b.header.ParentHash } func (b *Block) ParentHash() common.Hash { return b.header.ParentHash() }
// TxHash returns header tx hash. // TxHash returns header tx hash.
func (b *Block) TxHash() common.Hash { return b.header.TxHash } func (b *Block) TxHash() common.Hash { return b.header.TxHash() }
// ReceiptHash returns header receipt hash. // ReceiptHash returns header receipt hash.
func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash } func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash() }
// OutgoingReceiptHash returns header cross shard receipt hash. // OutgoingReceiptHash returns header cross shard receipt hash.
func (b *Block) OutgoingReceiptHash() common.Hash { return b.header.OutgoingReceiptHash } func (b *Block) OutgoingReceiptHash() common.Hash { return b.header.OutgoingReceiptHash() }
// Extra returns header extra. // Extra returns header extra.
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) } func (b *Block) Extra() []byte { return b.header.Extra() }
// Header returns a copy of Header. // Header returns a copy of Header.
func (b *Block) Header() *block.Header { return CopyHeader(b.header) } func (b *Block) Header() *block.Header { return CopyHeader(b.header) }
@ -345,10 +316,10 @@ func (b *Block) Header() *block.Header { return CopyHeader(b.header) }
func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.incomingReceipts} } func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.incomingReceipts} }
// Vdf returns header Vdf. // Vdf returns header Vdf.
func (b *Block) Vdf() []byte { return common.CopyBytes(b.header.Vdf) } func (b *Block) Vdf() []byte { return b.header.Vdf() }
// Vrf returns header Vrf. // Vrf returns header Vrf.
func (b *Block) Vrf() []byte { return common.CopyBytes(b.header.Vrf) } func (b *Block) Vrf() []byte { return b.header.Vrf() }
// Size returns the true RLP encoded storage size of the block, either by encoding // Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previsouly cached value. // and returning it, or returning a previsouly cached value.
@ -451,17 +422,17 @@ func (s blockSorter) Less(i, j int) bool {
// Number checks if block b1 is less than block b2. // Number checks if block b1 is less than block b2.
func Number(b1, b2 *Block) bool { func Number(b1, b2 *Block) bool {
return b1.header.Number.Cmp(b2.header.Number) < 0 return b1.header.Number().Cmp(b2.header.Number()) < 0
} }
// AddVrf add vrf into block header // AddVrf add vrf into block header
func (b *Block) AddVrf(vrf []byte) { func (b *Block) AddVrf(vrf []byte) {
b.header.Vrf = vrf b.header.SetVrf(vrf)
} }
// AddVdf add vdf into block header // AddVdf add vdf into block header
func (b *Block) AddVdf(vdf []byte) { func (b *Block) AddVdf(vdf []byte) {
b.header.Vdf = vdf b.header.SetVdf(vdf)
} }
// AddShardState add shardState into block header // AddShardState add shardState into block header
@ -469,12 +440,12 @@ func (b *Block) AddShardState(shardState shard.State) error {
// Make a copy because State.Hash() internally sorts entries. // Make a copy because State.Hash() internally sorts entries.
// Store the sorted copy. // Store the sorted copy.
shardState = append(shardState[:0:0], shardState...) shardState = append(shardState[:0:0], shardState...)
b.header.ShardStateHash = shardState.Hash() b.header.SetShardStateHash(shardState.Hash())
data, err := rlp.EncodeToBytes(shardState) data, err := rlp.EncodeToBytes(shardState)
if err != nil { if err != nil {
return err return err
} }
b.header.ShardState = data b.header.SetShardState(data)
return nil return nil
} }

@ -96,15 +96,16 @@ func TestBlock_SetLastCommitSig(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
b := &Block{header: &block.Header{}} b := &Block{header: block.NewHeader()}
b.SetLastCommitSig(tt.sig, tt.signers) b.SetLastCommitSig(tt.sig, tt.signers)
if !bytes.Equal(tt.sig, b.header.LastCommitSignature[:]) { sig := b.header.LastCommitSignature()
if !bytes.Equal(tt.sig, sig[:]) {
t.Errorf("signature mismatch: expected %+v, actual %+v", t.Errorf("signature mismatch: expected %+v, actual %+v",
tt.sig, b.header.LastCommitSignature) tt.sig, sig)
} }
if !bytes.Equal(tt.signers, b.header.LastCommitBitmap) { if !bytes.Equal(tt.signers, b.header.LastCommitBitmap()) {
t.Errorf("signature mismatch: expected %+v, actual %+v", t.Errorf("signature mismatch: expected %+v, actual %+v",
tt.signers, b.header.LastCommitBitmap) tt.signers, b.header.LastCommitBitmap())
} }
}) })
} }

@ -28,12 +28,12 @@ func (cl CrossLink) Header() *block.Header {
// ShardID returns shardID // ShardID returns shardID
func (cl CrossLink) ShardID() uint32 { func (cl CrossLink) ShardID() uint32 {
return cl.ChainHeader.ShardID return cl.ChainHeader.ShardID()
} }
// BlockNum returns blockNum // BlockNum returns blockNum
func (cl CrossLink) BlockNum() *big.Int { func (cl CrossLink) BlockNum() *big.Int {
return cl.ChainHeader.Number return cl.ChainHeader.Number()
} }
// Hash returns hash // Hash returns hash
@ -43,12 +43,12 @@ func (cl CrossLink) Hash() common.Hash {
// StateRoot returns hash of state root // StateRoot returns hash of state root
func (cl CrossLink) StateRoot() common.Hash { func (cl CrossLink) StateRoot() common.Hash {
return cl.ChainHeader.Root return cl.ChainHeader.Root()
} }
// OutgoingReceiptsRoot returns hash of cross shard receipts // OutgoingReceiptsRoot returns hash of cross shard receipts
func (cl CrossLink) OutgoingReceiptsRoot() common.Hash { func (cl CrossLink) OutgoingReceiptsRoot() common.Hash {
return cl.ChainHeader.OutgoingReceiptHash return cl.ChainHeader.OutgoingReceiptHash()
} }
// Serialize returns bytes of cross link rlp-encoded content // Serialize returns bytes of cross link rlp-encoded content

@ -128,7 +128,7 @@ func TestVrf(test *testing.T) {
tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), 0, big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), 0, big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
txs := []*types.Transaction{tx1} txs := []*types.Transaction{tx1}
block := types.NewBlock(&block2.Header{Number: big.NewInt(314)}, txs, nil, nil, nil) block := types.NewBlock(block2.NewHeaderWith().Number(big.NewInt(314)).Header(), txs, nil, nil, nil)
blockHash := block.Hash() blockHash := block.Hash()
dRand.vrf(blockHash) dRand.vrf(blockHash)

@ -67,7 +67,7 @@ func (b *APIBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.Blo
if header == nil || err != nil { if header == nil || err != nil {
return nil, nil, err return nil, nil, err
} }
stateDb, err := b.hmy.blockchain.StateAt(header.Root) stateDb, err := b.hmy.blockchain.StateAt(header.Root())
return stateDb, header, err return stateDb, header, err
} }

@ -120,7 +120,7 @@ func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHea
// Process implements core.ChainIndexerBackend, adding a new header's bloom into // Process implements core.ChainIndexerBackend, adding a new header's bloom into
// the index. // the index.
func (b *BloomIndexer) Process(ctx context.Context, header *block.Header) error { func (b *BloomIndexer) Process(ctx context.Context, header *block.Header) error {
b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom) b.gen.AddBloom(uint(header.Number().Uint64()-b.section*b.size), header.Bloom())
b.head = header.Hash() b.head = header.Hash()
return nil return nil
} }

@ -107,10 +107,10 @@ func (c *Client) getBlock(ctx context.Context, method string, args ...interface{
return nil, err return nil, err
} }
// Quick-verify transaction. This mostly helps with debugging the server. // Quick-verify transaction. This mostly helps with debugging the server.
if head.TxHash == types.EmptyRootHash && len(body.Transactions) > 0 { if head.TxHash() == types.EmptyRootHash && len(body.Transactions) > 0 {
return nil, fmt.Errorf("server returned non-empty transaction list but block header indicates no transactions") return nil, fmt.Errorf("server returned non-empty transaction list but block header indicates no transactions")
} }
if head.TxHash != types.EmptyRootHash && len(body.Transactions) == 0 { if head.TxHash() != types.EmptyRootHash && len(body.Transactions) == 0 {
return nil, fmt.Errorf("server returned empty transaction list but block header indicates transactions") return nil, fmt.Errorf("server returned empty transaction list but block header indicates transactions")
} }
// Fill the sender cache of transactions in the block. // Fill the sender cache of transactions in the block.

@ -27,17 +27,17 @@ func (e *engineImpl) SealHash(header *block.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256() hasher := sha3.NewLegacyKeccak256()
// TODO: update with new fields // TODO: update with new fields
if err := rlp.Encode(hasher, []interface{}{ if err := rlp.Encode(hasher, []interface{}{
header.ParentHash, header.ParentHash(),
header.Coinbase, header.Coinbase(),
header.Root, header.Root(),
header.TxHash, header.TxHash(),
header.ReceiptHash, header.ReceiptHash(),
header.Bloom, header.Bloom(),
header.Number, header.Number(),
header.GasLimit, header.GasLimit(),
header.GasUsed, header.GasUsed(),
header.Time, header.Time(),
header.Extra, header.Extra(),
}); err != nil { }); err != nil {
utils.Logger().Warn().Err(err).Msg("rlp.Encode failed") utils.Logger().Warn().Err(err).Msg("rlp.Encode failed")
} }
@ -66,7 +66,7 @@ func (e *engineImpl) Prepare(chain engine.ChainReader, header *block.Header) err
// VerifyHeader checks whether a header conforms to the consensus rules of the bft engine. // VerifyHeader checks whether a header conforms to the consensus rules of the bft engine.
func (e *engineImpl) VerifyHeader(chain engine.ChainReader, header *block.Header, seal bool) error { func (e *engineImpl) VerifyHeader(chain engine.ChainReader, header *block.Header, seal bool) error {
parentHeader := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) parentHeader := chain.GetHeader(header.ParentHash(), header.Number().Uint64()-1)
if parentHeader == nil { if parentHeader == nil {
return engine.ErrUnknownAncestor return engine.ErrUnknownAncestor
} }
@ -91,22 +91,22 @@ func (e *engineImpl) VerifyHeaders(chain engine.ChainReader, headers []*block.He
// retrievePublicKeysFromLastBlock finds the public keys of last block's committee // retrievePublicKeysFromLastBlock finds the public keys of last block's committee
func retrievePublicKeysFromLastBlock(bc engine.ChainReader, header *block.Header) ([]*bls.PublicKey, error) { func retrievePublicKeysFromLastBlock(bc engine.ChainReader, header *block.Header) ([]*bls.PublicKey, error) {
parentHeader := bc.GetHeaderByHash(header.ParentHash) parentHeader := bc.GetHeaderByHash(header.ParentHash())
if parentHeader == nil { if parentHeader == nil {
return nil, ctxerror.New("cannot find parent block header in DB", return nil, ctxerror.New("cannot find parent block header in DB",
"parentHash", header.ParentHash) "parentHash", header.ParentHash())
} }
parentShardState, err := bc.ReadShardState(parentHeader.Epoch) parentShardState, err := bc.ReadShardState(parentHeader.Epoch())
if err != nil { if err != nil {
return nil, ctxerror.New("cannot read shard state", return nil, ctxerror.New("cannot read shard state",
"epoch", parentHeader.Epoch, "epoch", parentHeader.Epoch(),
).WithCause(err) ).WithCause(err)
} }
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID) parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID())
if parentCommittee == nil { if parentCommittee == nil {
return nil, ctxerror.New("cannot find shard in the shard state", return nil, ctxerror.New("cannot find shard in the shard state",
"parentBlockNumber", parentHeader.Number, "parentBlockNumber", parentHeader.Number(),
"shardID", parentHeader.ShardID, "shardID", parentHeader.ShardID(),
) )
} }
var committerKeys []*bls.PublicKey var committerKeys []*bls.PublicKey
@ -125,23 +125,25 @@ func retrievePublicKeysFromLastBlock(bc engine.ChainReader, header *block.Header
// VerifySeal implements Engine, checking whether the given block satisfies // VerifySeal implements Engine, checking whether the given block satisfies
// the PoS difficulty requirements, i.e. >= 2f+1 valid signatures from the committee // the PoS difficulty requirements, i.e. >= 2f+1 valid signatures from the committee
func (e *engineImpl) VerifySeal(chain engine.ChainReader, header *block.Header) error { func (e *engineImpl) VerifySeal(chain engine.ChainReader, header *block.Header) error {
if chain.CurrentHeader().Number.Uint64() <= uint64(1) { if chain.CurrentHeader().Number().Uint64() <= uint64(1) {
return nil return nil
} }
publicKeys, err := retrievePublicKeysFromLastBlock(chain, header) publicKeys, err := retrievePublicKeysFromLastBlock(chain, header)
if err != nil { if err != nil {
return ctxerror.New("[VerifySeal] Cannot retrieve publickeys from last block").WithCause(err) return ctxerror.New("[VerifySeal] Cannot retrieve publickeys from last block").WithCause(err)
} }
payload := append(header.LastCommitSignature[:], header.LastCommitBitmap...) sig := header.LastCommitSignature()
payload := append(sig[:], header.LastCommitBitmap()...)
aggSig, mask, err := ReadSignatureBitmapByPublicKeys(payload, publicKeys) aggSig, mask, err := ReadSignatureBitmapByPublicKeys(payload, publicKeys)
if err != nil { if err != nil {
return ctxerror.New("[VerifySeal] Unable to deserialize the LastCommitSignature and LastCommitBitmap in Block Header").WithCause(err) return ctxerror.New("[VerifySeal] Unable to deserialize the LastCommitSignature and LastCommitBitmap in Block Header").WithCause(err)
} }
parentHeader := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) parentHash := header.ParentHash()
parentHeader := chain.GetHeader(parentHash, header.Number().Uint64()-1)
parentQuorum, err := QuorumForBlock(chain, parentHeader) parentQuorum, err := QuorumForBlock(chain, parentHeader)
if err != nil { if err != nil {
return errors.Wrapf(err, return errors.Wrapf(err,
"cannot calculate quorum for block %s", header.Number) "cannot calculate quorum for block %s", header.Number())
} }
if count := utils.CountOneBits(mask.Bitmap); count < parentQuorum { if count := utils.CountOneBits(mask.Bitmap); count < parentQuorum {
return ctxerror.New("[VerifySeal] Not enough signature in LastCommitSignature from Block Header", return ctxerror.New("[VerifySeal] Not enough signature in LastCommitSignature from Block Header",
@ -149,11 +151,11 @@ func (e *engineImpl) VerifySeal(chain engine.ChainReader, header *block.Header)
} }
blockNumHash := make([]byte, 8) blockNumHash := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumHash, header.Number.Uint64()-1) binary.LittleEndian.PutUint64(blockNumHash, header.Number().Uint64()-1)
lastCommitPayload := append(blockNumHash, header.ParentHash[:]...) lastCommitPayload := append(blockNumHash, parentHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, lastCommitPayload) { if !aggSig.VerifyHash(mask.AggregatePublic, lastCommitPayload) {
return ctxerror.New("[VerifySeal] Unable to verify aggregated signature from last block", "lastBlockNum", header.Number.Uint64()-1, "lastBlockHash", header.ParentHash) return ctxerror.New("[VerifySeal] Unable to verify aggregated signature from last block", "lastBlockNum", header.Number().Uint64()-1, "lastBlockHash", parentHash)
} }
return nil return nil
} }
@ -166,7 +168,7 @@ func (e *engineImpl) Finalize(chain engine.ChainReader, header *block.Header, st
if err := AccumulateRewards(chain, state, header); err != nil { if err := AccumulateRewards(chain, state, header); err != nil {
return nil, ctxerror.New("cannot pay block reward").WithCause(err) return nil, ctxerror.New("cannot pay block reward").WithCause(err)
} }
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) header.SetRoot(state.IntermediateRoot(chain.Config().IsEIP158(header.Number())))
return types.NewBlock(header, txs, receipts, outcxs, incxs), nil return types.NewBlock(header, txs, receipts, outcxs, incxs), nil
} }
@ -174,15 +176,15 @@ func (e *engineImpl) Finalize(chain engine.ChainReader, header *block.Header, st
func QuorumForBlock( func QuorumForBlock(
chain engine.ChainReader, h *block.Header, chain engine.ChainReader, h *block.Header,
) (quorum int, err error) { ) (quorum int, err error) {
ss, err := chain.ReadShardState(h.Epoch) ss, err := chain.ReadShardState(h.Epoch())
if err != nil { if err != nil {
return 0, errors.Wrapf(err, return 0, errors.Wrapf(err,
"cannot read shard state for epoch %s", h.Epoch) "cannot read shard state for epoch %s", h.Epoch())
} }
c := ss.FindCommitteeByID(h.ShardID) c := ss.FindCommitteeByID(h.ShardID())
if c == nil { if c == nil {
return 0, errors.Errorf( return 0, errors.Errorf(
"cannot find shard %d in shard state", h.ShardID) "cannot find shard %d in shard state", h.ShardID())
} }
return (len(c.NodeList))*2/3 + 1, nil return (len(c.NodeList))*2/3 + 1, nil
} }

@ -25,7 +25,7 @@ var BlockReward = new(big.Int).Mul(big.NewInt(24), big.NewInt(denominations.One)
func AccumulateRewards( func AccumulateRewards(
bc engine.ChainReader, state *state.DB, header *block.Header, bc engine.ChainReader, state *state.DB, header *block.Header,
) error { ) error {
blockNum := header.Number.Uint64() blockNum := header.Number().Uint64()
if blockNum == 0 { if blockNum == 0 {
// Epoch block has no parent to reward. // Epoch block has no parent to reward.
return nil return nil
@ -33,27 +33,27 @@ func AccumulateRewards(
// TODO ek – retrieving by parent number (blockNum - 1) doesn't work, // TODO ek – retrieving by parent number (blockNum - 1) doesn't work,
// while it is okay with hash. Sounds like DB inconsistency. // while it is okay with hash. Sounds like DB inconsistency.
// Figure out why. // Figure out why.
parentHeader := bc.GetHeaderByHash(header.ParentHash) parentHeader := bc.GetHeaderByHash(header.ParentHash())
if parentHeader == nil { if parentHeader == nil {
return ctxerror.New("cannot find parent block header in DB", return ctxerror.New("cannot find parent block header in DB",
"parentHash", header.ParentHash) "parentHash", header.ParentHash())
} }
if parentHeader.Number.Cmp(common.Big0) == 0 { if parentHeader.Number().Cmp(common.Big0) == 0 {
// Parent is an epoch block, // Parent is an epoch block,
// which is not signed in the usual manner therefore rewards nothing. // which is not signed in the usual manner therefore rewards nothing.
return nil return nil
} }
parentShardState, err := bc.ReadShardState(parentHeader.Epoch) parentShardState, err := bc.ReadShardState(parentHeader.Epoch())
if err != nil { if err != nil {
return ctxerror.New("cannot read shard state", return ctxerror.New("cannot read shard state",
"epoch", parentHeader.Epoch, "epoch", parentHeader.Epoch(),
).WithCause(err) ).WithCause(err)
} }
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID) parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID())
if parentCommittee == nil { if parentCommittee == nil {
return ctxerror.New("cannot find shard in the shard state", return ctxerror.New("cannot find shard in the shard state",
"parentBlockNumber", parentHeader.Number, "parentBlockNumber", parentHeader.Number(),
"shardID", parentHeader.ShardID, "shardID", parentHeader.ShardID(),
) )
} }
var committerKeys []*bls.PublicKey var committerKeys []*bls.PublicKey
@ -70,7 +70,7 @@ func AccumulateRewards(
if err != nil { if err != nil {
return ctxerror.New("cannot create group sig mask").WithCause(err) return ctxerror.New("cannot create group sig mask").WithCause(err)
} }
if err := mask.SetMask(header.LastCommitBitmap); err != nil { if err := mask.SetMask(header.LastCommitBitmap()); err != nil {
return ctxerror.New("cannot set group sig mask bits").WithCause(err) return ctxerror.New("cannot set group sig mask bits").WithCause(err)
} }
totalAmount := big.NewInt(0) totalAmount := big.NewInt(0)

@ -99,7 +99,7 @@ func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address string, bl
// BlockNumber returns the block number of the chain head. // BlockNumber returns the block number of the chain head.
func (s *PublicBlockChainAPI) BlockNumber() hexutil.Uint64 { func (s *PublicBlockChainAPI) BlockNumber() hexutil.Uint64 {
header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available
return hexutil.Uint64(header.Number.Uint64()) return hexutil.Uint64(header.Number().Uint64())
} }
// Call executes the given transaction on the state for the given block number. // Call executes the given transaction on the state for the given block number.

@ -137,7 +137,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
if header == nil { if header == nil {
return nil, nil return nil, nil
} }
head := header.Number.Uint64() head := header.Number().Uint64()
if f.begin == -1 { if f.begin == -1 {
f.begin = int64(head) f.begin = int64(head)
@ -235,7 +235,7 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
// blockLogs returns the logs matching the filter criteria within a single block. // blockLogs returns the logs matching the filter criteria within a single block.
func (f *Filter) blockLogs(ctx context.Context, header *block.Header) (logs []*types.Log, err error) { func (f *Filter) blockLogs(ctx context.Context, header *block.Header) (logs []*types.Log, err error) {
if bloomFilter(header.Bloom, f.addresses, f.topics) { if bloomFilter(header.Bloom(), f.addresses, f.topics) {
found, err := f.checkMatches(ctx, header) found, err := f.checkMatches(ctx, header)
if err != nil { if err != nil {
return logs, err return logs, err

@ -378,13 +378,13 @@ func (es *EventSystem) lightFilterNewHead(newHeader *block.Header, callBack func
// find common ancestor, create list of rolled back and new block hashes // find common ancestor, create list of rolled back and new block hashes
var oldHeaders, newHeaders []*block.Header var oldHeaders, newHeaders []*block.Header
for oldh.Hash() != newh.Hash() { for oldh.Hash() != newh.Hash() {
if oldh.Number.Uint64() >= newh.Number.Uint64() { if oldh.Number().Uint64() >= newh.Number().Uint64() {
oldHeaders = append(oldHeaders, oldh) oldHeaders = append(oldHeaders, oldh)
oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1) oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash(), oldh.Number().Uint64()-1)
} }
if oldh.Number.Uint64() < newh.Number.Uint64() { if oldh.Number().Uint64() < newh.Number().Uint64() {
newHeaders = append(newHeaders, newh) newHeaders = append(newHeaders, newh)
newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1) newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash(), newh.Number().Uint64()-1)
if newh == nil { if newh == nil {
// happens when CHT syncing, nothing to do // happens when CHT syncing, nothing to do
newh = oldh newh = oldh
@ -403,7 +403,7 @@ func (es *EventSystem) lightFilterNewHead(newHeader *block.Header, callBack func
// filter logs of a single header in light client mode // filter logs of a single header in light client mode
func (es *EventSystem) lightFilterLogs(header *block.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log { func (es *EventSystem) lightFilterLogs(header *block.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
if bloomFilter(header.Bloom, addresses, topics) { if bloomFilter(header.Bloom(), addresses, topics) {
// Get the logs of the block // Get the logs of the block
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel() defer cancel()

@ -92,22 +92,22 @@ type RPCBlock struct {
func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
head := b.Header() // copies the header once head := b.Header() // copies the header once
fields := map[string]interface{}{ fields := map[string]interface{}{
"number": (*hexutil.Big)(head.Number), "number": (*hexutil.Big)(head.Number()),
"hash": b.Hash(), "hash": b.Hash(),
"parentHash": head.ParentHash, "parentHash": head.ParentHash(),
"nonce": 0, // Remove this because we don't have it in our header "nonce": 0, // Remove this because we don't have it in our header
"mixHash": head.MixDigest, "mixHash": head.MixDigest(),
"logsBloom": head.Bloom, "logsBloom": head.Bloom(),
"stateRoot": head.Root, "stateRoot": head.Root(),
"miner": head.Coinbase, "miner": head.Coinbase(),
"difficulty": 0, // Remove this because we don't have it in our header "difficulty": 0, // Remove this because we don't have it in our header
"extraData": hexutil.Bytes(head.Extra), "extraData": hexutil.Bytes(head.Extra()),
"size": hexutil.Uint64(b.Size()), "size": hexutil.Uint64(b.Size()),
"gasLimit": hexutil.Uint64(head.GasLimit), "gasLimit": hexutil.Uint64(head.GasLimit()),
"gasUsed": hexutil.Uint64(head.GasUsed), "gasUsed": hexutil.Uint64(head.GasUsed()),
"timestamp": hexutil.Uint64(head.Time.Uint64()), "timestamp": hexutil.Uint64(head.Time().Uint64()),
"transactionsRoot": head.TxHash, "transactionsRoot": head.TxHash(),
"receiptsRoot": head.ReceiptHash, "receiptsRoot": head.ReceiptHash(),
} }
if inclTx { if inclTx {

@ -340,7 +340,7 @@ func (node *Node) StartServer() {
// Currently used for stats reporting purpose // Currently used for stats reporting purpose
func (node *Node) countNumTransactionsInBlockchain() int { func (node *Node) countNumTransactionsInBlockchain() int {
count := 0 count := 0
for block := node.Blockchain().CurrentBlock(); block != nil; block = node.Blockchain().GetBlockByHash(block.Header().ParentHash) { for block := node.Blockchain().CurrentBlock(); block != nil; block = node.Blockchain().GetBlockByHash(block.Header().ParentHash()) {
count += len(block.Transactions()) count += len(block.Transactions())
} }
return count return count

@ -39,9 +39,9 @@ func (node *Node) ProcessHeaderMessage(msgPayload []byte) {
firstCrossLinkBlock := core.ShardingSchedule.FirstCrossLinkBlock() firstCrossLinkBlock := core.ShardingSchedule.FirstCrossLinkBlock()
for _, header := range headers { for _, header := range headers {
if header.Number.Uint64() >= firstCrossLinkBlock { if header.Number().Uint64() >= firstCrossLinkBlock {
// Only process cross link starting from FirstCrossLinkBlock // Only process cross link starting from FirstCrossLinkBlock
utils.Logger().Debug().Msgf("[ProcessHeaderMessage] Add Pending CrossLink, shardID %d, blockNum %d", header.ShardID, header.Number) utils.Logger().Debug().Msgf("[ProcessHeaderMessage] Add Pending CrossLink, shardID %d, blockNum %d", header.ShardID(), header.Number())
crossLinkHeadersToProcess = append(crossLinkHeadersToProcess, header) crossLinkHeadersToProcess = append(crossLinkHeadersToProcess, header)
} }
} }
@ -54,22 +54,22 @@ func (node *Node) ProcessHeaderMessage(msgPayload []byte) {
if len(headersToQuque) > crossLinkBatchSize { if len(headersToQuque) > crossLinkBatchSize {
break break
} }
exist, err := node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64(), false) exist, err := node.Blockchain().ReadCrossLink(header.ShardID(), header.Number().Uint64(), false)
if err == nil && exist != nil { if err == nil && exist != nil {
utils.Logger().Debug(). utils.Logger().Debug().
Msgf("[ProcessingHeader] Cross Link already exists, pass. Block num: %d, shardID %d", header.Number, header.ShardID) Msgf("[ProcessingHeader] Cross Link already exists, pass. Block num: %d, shardID %d", header.Number(), header.ShardID())
continue continue
} }
if header.Number.Uint64() > firstCrossLinkBlock { // Directly trust the first cross-link if header.Number().Uint64() > firstCrossLinkBlock { // Directly trust the first cross-link
// Sanity check on the previous link with the new link // Sanity check on the previous link with the new link
previousLink, err := node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64()-1, false) previousLink, err := node.Blockchain().ReadCrossLink(header.ShardID(), header.Number().Uint64()-1, false)
if err != nil { if err != nil {
previousLink, err = node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64()-1, true) previousLink, err = node.Blockchain().ReadCrossLink(header.ShardID(), header.Number().Uint64()-1, true)
if err != nil { if err != nil {
headersToQuque = append(headersToQuque, header) headersToQuque = append(headersToQuque, header)
utils.Logger().Error().Err(err). utils.Logger().Error().Err(err).
Msgf("[ProcessingHeader] ReadCrossLink cannot read previousLink with number %d, shardID %d", header.Number.Uint64()-1, header.ShardID) Msgf("[ProcessingHeader] ReadCrossLink cannot read previousLink with number %d, shardID %d", header.Number().Uint64()-1, header.ShardID())
continue continue
} }
} }
@ -78,14 +78,14 @@ func (node *Node) ProcessHeaderMessage(msgPayload []byte) {
if err != nil { if err != nil {
utils.Logger().Error(). utils.Logger().Error().
Err(err). Err(err).
Msgf("[ProcessingHeader] Failed to verify new cross link header for shardID %d, blockNum %d", header.ShardID, header.Number) Msgf("[ProcessingHeader] Failed to verify new cross link header for shardID %d, blockNum %d", header.ShardID(), header.Number())
continue continue
} }
} }
crossLink := types.NewCrossLink(header) crossLink := types.NewCrossLink(header)
utils.Logger().Debug(). utils.Logger().Debug().
Msgf("[ProcessingHeader] committing for shardID %d, blockNum %d", header.ShardID, header.Number.Uint64()) Msgf("[ProcessingHeader] committing for shardID %d, blockNum %d", header.ShardID(), header.Number().Uint64())
node.Blockchain().WriteCrossLinks(types.CrossLinks{crossLink}, true) node.Blockchain().WriteCrossLinks(types.CrossLinks{crossLink}, true)
} }
@ -122,7 +122,7 @@ func (node *Node) verifyIncomingReceipts(block *types.Block) error {
if len(cxps) > 0 { if len(cxps) > 0 {
incomingReceiptHash = types.DeriveSha(cxps) incomingReceiptHash = types.DeriveSha(cxps)
} }
if incomingReceiptHash != block.Header().IncomingReceiptHash { if incomingReceiptHash != block.Header().IncomingReceiptHash() {
return ctxerror.New("[verifyIncomingReceipts] Invalid IncomingReceiptHash in block header") return ctxerror.New("[verifyIncomingReceipts] Invalid IncomingReceiptHash in block header")
} }
@ -148,7 +148,7 @@ func (node *Node) compareCrosslinkWithReceipts(cxp *types.CXReceiptsProof) error
return ctxerror.New("[compareCrosslinkWithReceipts] Cannot get crosslink", "blockNum", blockNum, "shardID", shardID).WithCause(err) return ctxerror.New("[compareCrosslinkWithReceipts] Cannot get crosslink", "blockNum", blockNum, "shardID", shardID).WithCause(err)
} }
hash = crossLink.ChainHeader.Hash() hash = crossLink.ChainHeader.Hash()
outgoingReceiptHash = crossLink.ChainHeader.OutgoingReceiptHash outgoingReceiptHash = crossLink.ChainHeader.OutgoingReceiptHash()
} }
// verify the source block hash is from a finalized block // verify the source block hash is from a finalized block
if hash == cxp.MerkleProof.BlockHash && outgoingReceiptHash == cxp.MerkleProof.CXReceiptHash { if hash == cxp.MerkleProof.BlockHash && outgoingReceiptHash == cxp.MerkleProof.CXReceiptHash {
@ -161,16 +161,17 @@ func (node *Node) compareCrosslinkWithReceipts(cxp *types.CXReceiptsProof) error
func (node *Node) VerifyCrosslinkHeader(prevHeader, header *block.Header) error { func (node *Node) VerifyCrosslinkHeader(prevHeader, header *block.Header) error {
// TODO: add fork choice rule // TODO: add fork choice rule
if prevHeader.Hash() != header.ParentHash { parentHash := header.ParentHash()
return ctxerror.New("[CrossLink] Invalid cross link header - parent hash mismatch", "shardID", header.ShardID, "blockNum", header.Number) if prevHeader.Hash() != parentHash {
return ctxerror.New("[CrossLink] Invalid cross link header - parent hash mismatch", "shardID", header.ShardID(), "blockNum", header.Number())
} }
// Verify signature of the new cross link header // Verify signature of the new cross link header
shardState, err := node.Blockchain().ReadShardState(prevHeader.Epoch) shardState, err := node.Blockchain().ReadShardState(prevHeader.Epoch())
committee := shardState.FindCommitteeByID(prevHeader.ShardID) committee := shardState.FindCommitteeByID(prevHeader.ShardID())
if err != nil || committee == nil { if err != nil || committee == nil {
return ctxerror.New("[CrossLink] Failed to read shard state for cross link header", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("[CrossLink] Failed to read shard state for cross link header", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
var committerKeys []*bls.PublicKey var committerKeys []*bls.PublicKey
@ -185,29 +186,30 @@ func (node *Node) VerifyCrosslinkHeader(prevHeader, header *block.Header) error
committerKeys = append(committerKeys, committerKey) committerKeys = append(committerKeys, committerKey)
} }
if !parseKeysSuccess { if !parseKeysSuccess {
return ctxerror.New("[CrossLink] cannot convert BLS public key", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("[CrossLink] cannot convert BLS public key", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
if header.Number.Uint64() > 1 { // First block doesn't have last sig if header.Number().Uint64() > 1 { // First block doesn't have last sig
mask, err := bls_cosi.NewMask(committerKeys, nil) mask, err := bls_cosi.NewMask(committerKeys, nil)
if err != nil { if err != nil {
return ctxerror.New("cannot create group sig mask", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("cannot create group sig mask", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
if err := mask.SetMask(header.LastCommitBitmap); err != nil { if err := mask.SetMask(header.LastCommitBitmap()); err != nil {
return ctxerror.New("cannot set group sig mask bits", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) return ctxerror.New("cannot set group sig mask bits", "shardID", header.ShardID(), "blockNum", header.Number()).WithCause(err)
} }
aggSig := bls.Sign{} aggSig := bls.Sign{}
err = aggSig.Deserialize(header.LastCommitSignature[:]) sig := header.LastCommitSignature()
err = aggSig.Deserialize(sig[:])
if err != nil { if err != nil {
return ctxerror.New("unable to deserialize multi-signature from payload").WithCause(err) return ctxerror.New("unable to deserialize multi-signature from payload").WithCause(err)
} }
blockNumBytes := make([]byte, 8) blockNumBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumBytes, header.Number.Uint64()-1) binary.LittleEndian.PutUint64(blockNumBytes, header.Number().Uint64()-1)
commitPayload := append(blockNumBytes, header.ParentHash[:]...) commitPayload := append(blockNumBytes, parentHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) { if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) {
return ctxerror.New("Failed to verify the signature for cross link header ", "shardID", header.ShardID, "blockNum", header.Number) return ctxerror.New("Failed to verify the signature for cross link header ", "shardID", header.ShardID(), "blockNum", header.Number())
} }
} }
return nil return nil
@ -219,7 +221,7 @@ func (node *Node) ProposeCrossLinkDataForBeaconchain() (types.CrossLinks, error)
Uint64("blockNum", node.Blockchain().CurrentBlock().NumberU64()+1). Uint64("blockNum", node.Blockchain().CurrentBlock().NumberU64()+1).
Msg("Proposing cross links ...") Msg("Proposing cross links ...")
curBlock := node.Blockchain().CurrentBlock() curBlock := node.Blockchain().CurrentBlock()
numShards := core.ShardingSchedule.InstanceForEpoch(curBlock.Header().Epoch).NumShards() numShards := core.ShardingSchedule.InstanceForEpoch(curBlock.Header().Epoch()).NumShards()
shardCrossLinks := make([]types.CrossLinks, numShards) shardCrossLinks := make([]types.CrossLinks, numShards)

@ -297,7 +297,7 @@ func (node *Node) BroadcastCrossLinkHeader(newBlock *types.Block) {
utils.Logger().Info().Msgf("[BroadcastCrossLinkHeader] Broadcasting Block Headers, latestBlockNum %d, currentBlockNum %d, Number of Headers %d", latestBlockNum, newBlock.NumberU64(), len(headers)) utils.Logger().Info().Msgf("[BroadcastCrossLinkHeader] Broadcasting Block Headers, latestBlockNum %d, currentBlockNum %d, Number of Headers %d", latestBlockNum, newBlock.NumberU64(), len(headers))
for _, header := range headers { for _, header := range headers {
utils.Logger().Debug().Msgf("[BroadcastCrossLinkHeader] Broadcasting %d", header.Number.Uint64()) utils.Logger().Debug().Msgf("[BroadcastCrossLinkHeader] Broadcasting %d", header.Number().Uint64())
} }
node.host.SendMessageToGroups([]p2p.GroupID{node.NodeConfig.GetBeaconGroupID()}, host.ConstructP2pMessage(byte(0), proto_node.ConstructCrossLinkHeadersMessage(headers))) node.host.SendMessageToGroups([]p2p.GroupID{node.NodeConfig.GetBeaconGroupID()}, host.ConstructP2pMessage(byte(0), proto_node.ConstructCrossLinkHeadersMessage(headers)))
} }
@ -305,7 +305,7 @@ func (node *Node) BroadcastCrossLinkHeader(newBlock *types.Block) {
// BroadcastCXReceipts broadcasts cross shard receipts to correspoding // BroadcastCXReceipts broadcasts cross shard receipts to correspoding
// destination shards // destination shards
func (node *Node) BroadcastCXReceipts(newBlock *types.Block) { func (node *Node) BroadcastCXReceipts(newBlock *types.Block) {
epoch := newBlock.Header().Epoch epoch := newBlock.Header().Epoch()
shardingConfig := core.ShardingSchedule.InstanceForEpoch(epoch) shardingConfig := core.ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards()) shardNum := int(shardingConfig.NumShards())
myShardID := node.Consensus.ShardID myShardID := node.Consensus.ShardID
@ -386,22 +386,22 @@ func (node *Node) VerifyNewBlock(newBlock *types.Block) error {
// VerifyBlockCrossLinks verifies the cross links of the block // VerifyBlockCrossLinks verifies the cross links of the block
func (node *Node) VerifyBlockCrossLinks(block *types.Block) error { func (node *Node) VerifyBlockCrossLinks(block *types.Block) error {
if len(block.Header().CrossLinks) == 0 { if len(block.Header().CrossLinks()) == 0 {
return nil return nil
} }
crossLinks := &types.CrossLinks{} crossLinks := &types.CrossLinks{}
err := rlp.DecodeBytes(block.Header().CrossLinks, crossLinks) err := rlp.DecodeBytes(block.Header().CrossLinks(), crossLinks)
if err != nil { if err != nil {
return ctxerror.New("[CrossLinkVerification] failed to decode cross links", return ctxerror.New("[CrossLinkVerification] failed to decode cross links",
"blockHash", block.Hash(), "blockHash", block.Hash(),
"crossLinks", len(block.Header().CrossLinks), "crossLinks", len(block.Header().CrossLinks()),
).WithCause(err) ).WithCause(err)
} }
if !crossLinks.IsSorted() { if !crossLinks.IsSorted() {
return ctxerror.New("[CrossLinkVerification] cross links are not sorted", return ctxerror.New("[CrossLinkVerification] cross links are not sorted",
"blockHash", block.Hash(), "blockHash", block.Hash(),
"crossLinks", len(block.Header().CrossLinks), "crossLinks", len(block.Header().CrossLinks()),
) )
} }
@ -420,7 +420,7 @@ func (node *Node) VerifyBlockCrossLinks(block *types.Block) error {
} }
} }
} else { } else {
if (*crossLinks)[i-1].Header().ShardID != crossLink.Header().ShardID { if (*crossLinks)[i-1].Header().ShardID() != crossLink.Header().ShardID() {
if crossLink.BlockNum().Uint64() > firstCrossLinkBlock { if crossLink.BlockNum().Uint64() > firstCrossLinkBlock {
lastLink, err = node.Blockchain().ReadShardLastCrossLink(crossLink.ShardID()) lastLink, err = node.Blockchain().ReadShardLastCrossLink(crossLink.ShardID())
if err != nil { if err != nil {
@ -457,7 +457,7 @@ var BigMaxUint64 = new(big.Int).SetBytes([]byte{
func (node *Node) validateNewShardState(block *types.Block, stakeInfo *map[common.Address]*structs.StakeInfo) error { func (node *Node) validateNewShardState(block *types.Block, stakeInfo *map[common.Address]*structs.StakeInfo) error {
// Common case first – blocks without resharding proposal // Common case first – blocks without resharding proposal
header := block.Header() header := block.Header()
if header.ShardStateHash == (common.Hash{}) { if header.ShardStateHash() == (common.Hash{}) {
// No new shard state was proposed // No new shard state was proposed
if block.ShardID() == 0 { if block.ShardID() == 0 {
if core.IsEpochLastBlock(block) { if core.IsEpochLastBlock(block) {
@ -475,7 +475,7 @@ func (node *Node) validateNewShardState(block *types.Block, stakeInfo *map[commo
return nil return nil
} }
shardState := &shard.State{} shardState := &shard.State{}
err := rlp.DecodeBytes(header.ShardState, shardState) err := rlp.DecodeBytes(header.ShardState(), shardState)
if err != nil { if err != nil {
return err return err
} }
@ -483,7 +483,7 @@ func (node *Node) validateNewShardState(block *types.Block, stakeInfo *map[commo
if block.ShardID() == 0 { if block.ShardID() == 0 {
// Beacon validators independently recalculate the master state and // Beacon validators independently recalculate the master state and
// compare it against the proposed copy. // compare it against the proposed copy.
nextEpoch := new(big.Int).Add(block.Header().Epoch, common.Big1) nextEpoch := new(big.Int).Add(block.Header().Epoch(), common.Big1)
// TODO ek – this may be called from regular shards, // TODO ek – this may be called from regular shards,
// for vetting beacon chain blocks received during block syncing. // for vetting beacon chain blocks received during block syncing.
// DRand may or or may not get in the way. Test this out. // DRand may or or may not get in the way. Test this out.
@ -650,7 +650,7 @@ func (node *Node) broadcastEpochShardState(newBlock *types.Block) error {
} }
epochShardStateMessage := proto_node.ConstructEpochShardStateMessage( epochShardStateMessage := proto_node.ConstructEpochShardStateMessage(
shard.EpochShardState{ shard.EpochShardState{
Epoch: newBlock.Header().Epoch.Uint64() + 1, Epoch: newBlock.Header().Epoch().Uint64() + 1,
ShardState: shardState, ShardState: shardState,
}, },
) )
@ -666,7 +666,7 @@ func (node *Node) AddNewBlock(newBlock *types.Block) error {
utils.Logger().Error(). utils.Logger().Error().
Err(err). Err(err).
Uint64("blockNum", newBlock.NumberU64()). Uint64("blockNum", newBlock.NumberU64()).
Bytes("parentHash", newBlock.Header().ParentHash.Bytes()[:]). Bytes("parentHash", newBlock.Header().ParentHash().Bytes()[:]).
Bytes("hash", newBlock.Header().Hash().Bytes()[:]). Bytes("hash", newBlock.Header().Hash().Bytes()[:]).
Msg("Error Adding new block to blockchain") Msg("Error Adding new block to blockchain")
} else { } else {

@ -147,7 +147,7 @@ func (node *Node) proposeShardStateWithoutBeaconSync(block *types.Block) error {
if block == nil || !core.IsEpochLastBlock(block) { if block == nil || !core.IsEpochLastBlock(block) {
return nil return nil
} }
nextEpoch := new(big.Int).Add(block.Header().Epoch, common.Big1) nextEpoch := new(big.Int).Add(block.Header().Epoch(), common.Big1)
shardState := core.GetShardState(nextEpoch) shardState := core.GetShardState(nextEpoch)
return block.AddShardState(shardState) return block.AddShardState(shardState)
} }
@ -168,7 +168,7 @@ func (node *Node) proposeBeaconShardState(block *types.Block) error {
// We haven't reached the end of this epoch; don't propose yet. // We haven't reached the end of this epoch; don't propose yet.
return nil return nil
} }
nextEpoch := new(big.Int).Add(block.Header().Epoch, common.Big1) nextEpoch := new(big.Int).Add(block.Header().Epoch(), common.Big1)
shardState, err := core.CalculateNewShardState( shardState, err := core.CalculateNewShardState(
node.Blockchain(), nextEpoch, &node.CurrentStakes) node.Blockchain(), nextEpoch, &node.CurrentStakes)
if err != nil { if err != nil {

@ -87,7 +87,7 @@ func (w *Worker) throttleTxs(selected types.Transactions, recentTxsStats types.R
// SelectTransactionsForNewBlock selects transactions for new block. // SelectTransactionsForNewBlock selects transactions for new block.
func (w *Worker) SelectTransactionsForNewBlock(newBlockNum uint64, txs types.Transactions, recentTxsStats types.RecentTxsStats, txsThrottleConfig *shardingconfig.TxsThrottleConfig, coinbase common.Address) (types.Transactions, types.Transactions, types.Transactions) { func (w *Worker) SelectTransactionsForNewBlock(newBlockNum uint64, txs types.Transactions, recentTxsStats types.RecentTxsStats, txsThrottleConfig *shardingconfig.TxsThrottleConfig, coinbase common.Address) (types.Transactions, types.Transactions, types.Transactions) {
if w.current.gasPool == nil { if w.current.gasPool == nil {
w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit())
} }
selected := types.Transactions{} selected := types.Transactions{}
@ -131,7 +131,7 @@ func (w *Worker) SelectTransactionsForNewBlock(newBlockNum uint64, txs types.Tra
utils.Logger().Info().Str("txId", tx.Hash().Hex()).Uint64("txGasLimit", tx.Gas()).Msg("Transaction gas limit info") utils.Logger().Info().Str("txId", tx.Hash().Hex()).Uint64("txGasLimit", tx.Gas()).Msg("Transaction gas limit info")
} }
utils.Logger().Info().Uint64("newBlockNum", newBlockNum).Uint64("blockGasLimit", w.current.header.GasLimit).Uint64("blockGasUsed", w.current.header.GasUsed).Msg("Block gas limit and usage info") utils.Logger().Info().Uint64("newBlockNum", newBlockNum).Uint64("blockGasLimit", w.current.header.GasLimit()).Uint64("blockGasUsed", w.current.header.GasUsed()).Msg("Block gas limit and usage info")
return selected, unselected, invalid return selected, unselected, invalid
} }
@ -139,7 +139,9 @@ func (w *Worker) SelectTransactionsForNewBlock(newBlockNum uint64, txs types.Tra
func (w *Worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { func (w *Worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
snap := w.current.state.Snapshot() snap := w.current.state.Snapshot()
receipt, cx, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{}) gasUsed := w.current.header.GasUsed()
receipt, cx, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &gasUsed, vm.Config{})
w.current.header.SetGasUsed(gasUsed)
if err != nil { if err != nil {
w.current.state.RevertToSnapshot(snap) w.current.state.RevertToSnapshot(snap)
return nil, err return nil, err
@ -160,7 +162,7 @@ func (w *Worker) commitTransaction(tx *types.Transaction, coinbase common.Addres
// CommitTransactions commits transactions. // CommitTransactions commits transactions.
func (w *Worker) CommitTransactions(txs types.Transactions, coinbase common.Address) error { func (w *Worker) CommitTransactions(txs types.Transactions, coinbase common.Address) error {
if w.current.gasPool == nil { if w.current.gasPool == nil {
w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit())
} }
for _, tx := range txs { for _, tx := range txs {
snap := w.current.state.Snapshot() snap := w.current.state.Snapshot()
@ -177,13 +179,13 @@ func (w *Worker) CommitTransactions(txs types.Transactions, coinbase common.Addr
// CommitReceipts commits a list of already verified incoming cross shard receipts // CommitReceipts commits a list of already verified incoming cross shard receipts
func (w *Worker) CommitReceipts(receiptsList []*types.CXReceiptsProof) error { func (w *Worker) CommitReceipts(receiptsList []*types.CXReceiptsProof) error {
if w.current.gasPool == nil { if w.current.gasPool == nil {
w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit())
} }
if len(receiptsList) == 0 { if len(receiptsList) == 0 {
w.current.header.IncomingReceiptHash = types.EmptyRootHash w.current.header.SetIncomingReceiptHash(types.EmptyRootHash)
} else { } else {
w.current.header.IncomingReceiptHash = types.DeriveSha(types.CXReceiptsProofs(receiptsList)) w.current.header.SetIncomingReceiptHash(types.DeriveSha(types.CXReceiptsProofs(receiptsList)))
} }
for _, cx := range receiptsList { for _, cx := range receiptsList {
@ -205,22 +207,22 @@ func (w *Worker) UpdateCurrent(coinbase common.Address) error {
num := parent.Number() num := parent.Number()
timestamp := time.Now().Unix() timestamp := time.Now().Unix()
// New block's epoch is the same as parent's... // New block's epoch is the same as parent's...
epoch := new(big.Int).Set(parent.Header().Epoch) epoch := new(big.Int).Set(parent.Header().Epoch())
// TODO: Don't depend on sharding state for epoch change. // TODO: Don't depend on sharding state for epoch change.
if len(parent.Header().ShardState) > 0 && parent.NumberU64() != 0 { if len(parent.Header().ShardState()) > 0 && parent.NumberU64() != 0 {
// ... except if parent has a resharding assignment it increases by 1. // ... except if parent has a resharding assignment it increases by 1.
epoch = epoch.Add(epoch, common.Big1) epoch = epoch.Add(epoch, common.Big1)
} }
header := &block.Header{ header := block.NewHeaderWith().
ParentHash: parent.Hash(), ParentHash(parent.Hash()).
Number: num.Add(num, common.Big1), Number(num.Add(num, common.Big1)).
GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil), GasLimit(core.CalcGasLimit(parent, w.gasFloor, w.gasCeil)).
Time: big.NewInt(timestamp), Time(big.NewInt(timestamp)).
Epoch: epoch, Epoch(epoch).
ShardID: w.chain.ShardID(), ShardID(w.chain.ShardID()).
Coinbase: coinbase, Coinbase(coinbase).
} Header()
return w.makeCurrent(parent, header) return w.makeCurrent(parent, header)
} }
@ -262,13 +264,14 @@ func (w *Worker) IncomingReceipts() []*types.CXReceiptsProof {
// CommitWithCrossLinks generate a new block with cross links for the new txs. // CommitWithCrossLinks generate a new block with cross links for the new txs.
func (w *Worker) CommitWithCrossLinks(sig []byte, signers []byte, viewID uint64, coinbase common.Address, crossLinks []byte) (*types.Block, error) { func (w *Worker) CommitWithCrossLinks(sig []byte, signers []byte, viewID uint64, coinbase common.Address, crossLinks []byte) (*types.Block, error) {
if len(sig) > 0 && len(signers) > 0 { if len(sig) > 0 && len(signers) > 0 {
copy(w.current.header.LastCommitSignature[:], sig[:]) sig2 := w.current.header.LastCommitSignature()
w.current.header.LastCommitBitmap = append(signers[:0:0], signers...) copy(sig2[:], sig[:])
w.current.header.SetLastCommitSignature(sig2)
w.current.header.SetLastCommitBitmap(signers)
} }
w.current.header.Coinbase = coinbase w.current.header.SetCoinbase(coinbase)
w.current.header.ViewID = new(big.Int) w.current.header.SetViewID(new(big.Int).SetUint64(viewID))
w.current.header.ViewID.SetUint64(viewID) w.current.header.SetCrossLinks(crossLinks)
w.current.header.CrossLinks = crossLinks
s := w.current.state.Copy() s := w.current.state.Copy()
@ -300,21 +303,21 @@ func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus_en
num := parent.Number() num := parent.Number()
timestamp := time.Now().Unix() timestamp := time.Now().Unix()
// New block's epoch is the same as parent's... // New block's epoch is the same as parent's...
epoch := new(big.Int).Set(parent.Header().Epoch) epoch := parent.Header().Epoch()
// TODO: Don't depend on sharding state for epoch change. // TODO: Don't depend on sharding state for epoch change.
if len(parent.Header().ShardState) > 0 && parent.NumberU64() != 0 { if len(parent.Header().ShardState()) > 0 && parent.NumberU64() != 0 {
// ... except if parent has a resharding assignment it increases by 1. // ... except if parent has a resharding assignment it increases by 1.
epoch = epoch.Add(epoch, common.Big1) epoch = epoch.Add(epoch, common.Big1)
} }
header := &block.Header{ header := block.NewHeaderWith().
ParentHash: parent.Hash(), ParentHash(parent.Hash()).
Number: num.Add(num, common.Big1), Number(num.Add(num, common.Big1)).
GasLimit: core.CalcGasLimit(parent, worker.gasFloor, worker.gasCeil), GasLimit(core.CalcGasLimit(parent, worker.gasFloor, worker.gasCeil)).
Time: big.NewInt(timestamp), Time(big.NewInt(timestamp)).
Epoch: epoch, Epoch(epoch).
ShardID: worker.chain.ShardID(), ShardID(worker.chain.ShardID()).
} Header()
worker.makeCurrent(parent, header) worker.makeCurrent(parent, header)
return worker return worker

@ -174,8 +174,8 @@ func GetHashFromNodeList(nodeList []NodeID) []byte {
} }
d := sha3.NewLegacyKeccak256() d := sha3.NewLegacyKeccak256()
for i := range nodeList { for _, nodeID := range nodeList {
d.Write(nodeList[i].Serialize()) d.Write(nodeID.Serialize())
} }
return d.Sum(nil) return d.Sum(nil)
} }

Loading…
Cancel
Save