[engine][stream] updated engine VerifyHeaderWithSignature and fix downloader test

pull/3612/head
Jacky Wang 4 years ago
parent c4a9b77bee
commit cd50682097
No known key found for this signature in database
GPG Key ID: 1085CE5F4FF5842C
  1. 7
      consensus/engine/consensus_engine.go
  2. 86
      consensus/quorum/verifier.go
  3. 17
      consensus/votepower/roster.go
  4. 8
      core/block_validator.go
  5. 3
      core/tx_pool_test.go
  6. 29
      crypto/hash/rlp.go
  7. 6
      hmy/downloader/adapter.go
  8. 41
      hmy/downloader/adapter_test.go
  9. 4
      hmy/downloader/beaconhelper.go
  10. 64
      hmy/downloader/downloader.go
  11. 1
      hmy/downloader/downloader_test.go
  12. 220
      hmy/downloader/inserthelper.go
  13. 19
      hmy/downloader/inserthelper_test.go
  14. 4
      hmy/downloader/longrange.go
  15. 5
      hmy/downloader/longrange_test.go
  16. 36
      hmy/downloader/shortrange.go
  17. 3
      hmy/downloader/shortrange_test.go
  18. 7
      hmy/downloader/types_test.go
  19. 284
      internal/chain/engine.go
  20. 37
      internal/chain/sig.go
  21. 26
      node/node.go
  22. 10
      node/worker/worker_test.go
  23. 3
      test/chain/reward/main.go

@ -8,6 +8,7 @@ import (
"github.com/harmony-one/harmony/consensus/reward"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/shard"
"github.com/harmony-one/harmony/shard/committee"
@ -82,10 +83,8 @@ type Engine interface {
// is used for verifying "incoming" block header against commit signature and bitmap sent from the other chain cross-shard via libp2p.
// i.e. this header verification api is more flexible since the caller specifies which commit signature and bitmap to use
// for verifying the block header, which is necessary for cross-shard block header verification. Example of such is cross-shard transaction.
// (TODO) For now, when doing cross shard, we need recalcualte the shard state since we don't have shard state of other shards
VerifyHeaderWithSignature(
chain ChainReader, header *block.Header,
commitSig, commitBitmap []byte, reCalculate bool,
VerifyHeaderSignature(
chain ChainReader, header *block.Header, commitSig bls.SerializedSignature, commitBitmap []byte,
) error
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers

@ -0,0 +1,86 @@
package quorum
import (
"math/big"
"github.com/harmony-one/harmony/consensus/votepower"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/numeric"
"github.com/harmony-one/harmony/shard"
"github.com/pkg/errors"
)
// Verifier is the interface to verify the whether the quorum is achieved by mask at each epoch.
// TODO: Add some unit tests to make sure Verifier get exactly the same result as Decider
type Verifier interface {
IsQuorumAchievedByMask(mask *bls_cosi.Mask) bool
}
// NewVerifier creates the quorum verifier for the given committee, epoch and whether the scenario
// is staking.
func NewVerifier(committee *shard.Committee, epoch *big.Int, isStaking bool) (Verifier, error) {
if isStaking {
return newStakeVerifier(committee, epoch)
}
return newUniformVerifier(committee)
}
// stakeVerifier is the quorum verifier for staking period. Each validator has staked token as
// a voting power of the final result.
type stakeVerifier struct {
r votepower.Roster
}
// newStakeVerifier creates a stake verifier from the given committee
func newStakeVerifier(committee *shard.Committee, epoch *big.Int) (*stakeVerifier, error) {
r, err := votepower.Compute(committee, epoch)
if err != nil {
return nil, errors.Wrap(err, "compute staking vote-power")
}
return &stakeVerifier{
r: *r,
}, nil
}
// IsQuorumAchievedByMask returns whether the quorum is achieved with the provided mask
func (sv *stakeVerifier) IsQuorumAchievedByMask(mask *bls_cosi.Mask) bool {
if mask == nil {
return false
}
vp := sv.r.VotePowerByMask(mask)
return vp.GT(sv.threshold())
}
func (sv *stakeVerifier) threshold() numeric.Dec {
return twoThird
}
// uniformVerifier is the quorum verifier for non-staking period. All nodes has a uniform voting power.
type uniformVerifier struct {
pubKeyCnt int64
}
func newUniformVerifier(committee *shard.Committee) (*uniformVerifier, error) {
keys, err := committee.BLSPublicKeys()
if err != nil {
return nil, err
}
return &uniformVerifier{
pubKeyCnt: int64(len(keys)),
}, nil
}
// IsQuorumAchievedByMask returns whether the quorum is achieved with the provided mask,
// which is whether more than (2/3+1) nodes is included in mask.
func (uv *uniformVerifier) IsQuorumAchievedByMask(mask *bls_cosi.Mask) bool {
got := int64(len(mask.Publics))
exp := uv.thresholdKeyCount()
// Theoretically speaking, greater or equal will do the work. But current logic is more strict
// without equal, thus conform to current logic implemented.
// (engineImpl.VerifySeal, uniformVoteWeight.IsQuorumAchievedByMask)
return got > exp
}
func (uv *uniformVerifier) thresholdKeyCount() int64 {
return uv.pubKeyCnt*2/3 + 1
}

@ -261,3 +261,20 @@ func NewRoster(shardID uint32) *Roster {
ShardID: shardID,
}
}
// VotePowerByMask return the vote power with the given BLS mask. The result is a number between 0 and 1.
func (r *Roster) VotePowerByMask(mask *bls.Mask) numeric.Dec {
res := numeric.ZeroDec()
for key, index := range mask.PublicsIndex {
if enabled, err := mask.IndexEnabled(index); err != nil || !enabled {
continue
}
voter, ok := r.Voters[key]
if !ok {
continue
}
res = res.Add(voter.OverallPercent)
}
return res
}

@ -25,12 +25,14 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/pkg/errors"
"github.com/harmony-one/harmony/block"
consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/params"
"github.com/pkg/errors"
)
// BlockValidator is responsible for validating block headers, uncles and
@ -245,5 +247,7 @@ func (v *BlockValidator) ValidateCXReceiptsProof(cxp *types.CXReceiptsProof) err
}
// (4) verify blockHeader with seal
return v.engine.VerifyHeaderWithSignature(v.bc, cxp.Header, cxp.CommitSig, cxp.CommitBitmap, true)
var commitSig bls.SerializedSignature
copy(commitSig[:], cxp.CommitSig)
return v.engine.VerifyHeaderSignature(v.bc, cxp.Header, commitSig, cxp.CommitBitmap)
}

@ -157,7 +157,8 @@ func createBlockChain() *BlockChain {
database := rawdb.NewMemoryDatabase()
genesis := gspec.MustCommit(database)
_ = genesis
blockchain, _ := NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil)
engine := chain2.NewEngine(0)
blockchain, _ := NewBlockChain(database, nil, gspec.Config, engine, vm.Config{}, nil)
return blockchain
}

@ -1,22 +1,47 @@
package hash
import (
"hash"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var kec256Pool = sync.Pool{
New: func() interface{} {
return sha3.NewLegacyKeccak256()
},
}
// FromRLP hashes the RLP representation of the given object.
func FromRLP(x interface{}) (h common.Hash) {
hw := sha3.NewLegacyKeccak256()
hw := kec256Pool.Get().(hash.Hash)
defer func() {
hw.Reset()
kec256Pool.Put(hw)
}()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
}
var sha256Pool = sync.Pool{
New: func() interface{} {
return sha3.New256()
},
}
// FromRLPNew256 hashes the RLP representation of the given object using New256
func FromRLPNew256(x interface{}) (h common.Hash) {
hw := sha3.New256()
hw := sha256Pool.Get().(hash.Hash)
defer func() {
hw.Reset()
sha256Pool.Put(hw)
}()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h

@ -30,9 +30,3 @@ type blockChain interface {
InsertChain(chain types.Blocks, verifyHeaders bool) (int, error)
WriteCommitSig(blockNum uint64, lastCommits []byte) error
}
// insertHelper is the interface help to verify and insert a block.
type insertHelper interface {
verifyAndInsertBlocks(blocks types.Blocks) (int, error)
verifyAndInsertBlock(block *types.Block) error
}

@ -6,20 +6,22 @@ import (
"math/big"
"sync"
"github.com/harmony-one/harmony/consensus/engine"
staking "github.com/harmony-one/harmony/staking/types"
"github.com/harmony-one/harmony/block"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/consensus/reward"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/p2p/stream/common/streammanager"
syncproto "github.com/harmony-one/harmony/p2p/stream/protocols/sync"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/harmony-one/harmony/shard"
"github.com/harmony-one/harmony/staking/slash"
staking "github.com/harmony-one/harmony/staking/types"
)
type testBlockChain struct {
@ -96,7 +98,7 @@ func (bc *testBlockChain) ReadValidatorList() ([]common.Address, error)
func (bc *testBlockChain) ReadCommitSig(blockNum uint64) ([]byte, error) { return nil, nil }
func (bc *testBlockChain) ReadBlockRewardAccumulator(uint64) (*big.Int, error) { return nil, nil }
func (bc *testBlockChain) ValidatorCandidates() []common.Address { return nil }
func (bc *testBlockChain) Engine() engine.Engine { return nil }
func (bc *testBlockChain) Engine() engine.Engine { return &dummyEngine{} }
func (bc *testBlockChain) ReadValidatorInformation(addr common.Address) (*staking.ValidatorWrapper, error) {
return nil, nil
}
@ -113,6 +115,33 @@ func (bc *testBlockChain) SuperCommitteeForNextEpoch(beacon engine.ChainReader,
return nil, nil
}
type dummyEngine struct{}
func (e *dummyEngine) VerifyHeader(engine.ChainReader, *block.Header, bool) error {
return nil
}
func (e *dummyEngine) VerifyHeaderSignature(engine.ChainReader, *block.Header, bls.SerializedSignature, []byte) error {
return nil
}
func (e *dummyEngine) VerifyHeaders(engine.ChainReader, []*block.Header, []bool) (chan<- struct{}, <-chan error) {
return nil, nil
}
func (e *dummyEngine) VerifySeal(engine.ChainReader, *block.Header) error { return nil }
func (e *dummyEngine) VerifyShardState(engine.ChainReader, engine.ChainReader, *block.Header) error {
return nil
}
func (e *dummyEngine) Beaconchain() engine.ChainReader { return nil }
func (e *dummyEngine) SetBeaconchain(engine.ChainReader) {}
func (e *dummyEngine) Finalize(
chain engine.ChainReader, header *block.Header,
state *state.DB, txs []*types.Transaction,
receipts []*types.Receipt, outcxs []*types.CXReceipt,
incxs []*types.CXReceiptsProof, stks staking.StakingTransactions,
doubleSigners slash.Records, sigsReady chan bool, viewID func() uint64,
) (*types.Block, reward.Reader, error) {
return nil, nil, nil
}
type testInsertHelper struct {
bc *testBlockChain
}

@ -17,7 +17,6 @@ type (
// insert the latest blocks to the beacon chain.
beaconHelper struct {
bc blockChain
ih insertHelper
blockC <-chan *types.Block
// TODO: refactor this hook to consensus module. We'd better put it in
// consensus module under a subscription.
@ -34,10 +33,9 @@ type (
}
)
func newBeaconHelper(bc blockChain, ih insertHelper, blockC <-chan *types.Block, insertHook func()) *beaconHelper {
func newBeaconHelper(bc blockChain, blockC <-chan *types.Block, insertHook func()) *beaconHelper {
return &beaconHelper{
bc: bc,
ih: ih,
blockC: blockC,
insertHook: insertHook,
lastMileCache: newBlocksByNumber(lastMileCap),

@ -2,23 +2,28 @@ package downloader
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/event"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/chain"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/stream/common/streammanager"
"github.com/harmony-one/harmony/p2p/stream/protocols/sync"
"github.com/rs/zerolog"
)
type (
// Downloader is responsible for sync task of one shard
Downloader struct {
bc blockChain
ih insertHelper
syncProtocol syncProtocol
bh *beaconHelper
@ -40,8 +45,6 @@ type (
func NewDownloader(host p2p.Host, bc *core.BlockChain, config Config) *Downloader {
config.fixValues()
ih := newInsertHelper(bc)
sp := sync.NewProtocol(sync.Config{
Chain: bc,
Host: host.GetP2PHost(),
@ -58,14 +61,13 @@ func NewDownloader(host p2p.Host, bc *core.BlockChain, config Config) *Downloade
var bh *beaconHelper
if config.BHConfig != nil && bc.ShardID() == 0 {
bh = newBeaconHelper(bc, ih, config.BHConfig.BlockC, config.BHConfig.InsertHook)
bh = newBeaconHelper(bc, config.BHConfig.BlockC, config.BHConfig.InsertHook)
}
ctx, cancel := context.WithCancel(context.Background())
return &Downloader{
bc: bc,
ih: ih,
syncProtocol: sp,
bh: bh,
@ -262,3 +264,53 @@ func (d *Downloader) finishSyncing() {
d.status.finishSyncing()
d.evtDownloadFinished.Send(struct{}{})
}
var emptySigVerifyErr *sigVerifyErr
type sigVerifyErr struct {
err error
}
func (e *sigVerifyErr) Error() string {
return fmt.Sprintf("[VerifyHeaderSignature] %v", e.err.Error())
}
func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks) (int, error) {
for i, block := range blocks {
if err := verifyAndInsertBlock(bc, block, blocks[i+1:]...); err != nil {
return i, err
}
}
return len(blocks), nil
}
func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error {
var (
sigBytes bls.SerializedSignature
bitmap []byte
err error
)
if len(nextBlocks) > 0 {
// get commit sig from the next block
next := nextBlocks[0]
sigBytes = next.Header().LastCommitSignature()
bitmap = next.Header().LastCommitBitmap()
} else {
// get commit sig from current block
sigBytes, bitmap, err = chain.ParseCommitSigAndBitmap(block.GetCurrentCommitSig())
if err != nil {
return errors.Wrap(err, "parse commitSigAndBitmap")
}
}
if err := bc.Engine().VerifyHeaderSignature(bc, block.Header(), sigBytes, bitmap); err != nil {
return &sigVerifyErr{err}
}
if err := bc.Engine().VerifyHeader(bc, block.Header(), true); err != nil {
return errors.Wrap(err, "[VerifyHeader]")
}
if _, err := bc.InsertChain(types.Blocks{block}, false); err != nil {
return errors.Wrap(err, "[InsertChain]")
}
return bc.WriteCommitSig(block.NumberU64(), block.GetCurrentCommitSig())
}

@ -16,7 +16,6 @@ func TestDownloader_Integration(t *testing.T) {
d := &Downloader{
bc: bc,
ih: &testInsertHelper{bc},
syncProtocol: sp,
downloadC: make(chan struct{}),
closeC: make(chan struct{}),

@ -1,220 +0,0 @@
package downloader
import (
"fmt"
"hash"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
bls_core "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/consensus/quorum"
"github.com/harmony-one/harmony/consensus/signature"
"github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/chain"
"github.com/harmony-one/harmony/multibls"
"github.com/harmony-one/harmony/shard"
)
// sigVerifyError is the error type of failing verify the signature of the current block.
// Since this is a sanity field and is not included the block hash, it needs extra verification.
// The error types is used to differentiate the error of signature verification VS insert error.
type sigVerifyError struct {
err error
}
func (err *sigVerifyError) Error() string {
return fmt.Sprintf("failed verify signature: %v", err.err.Error())
}
// insertHelperImpl helps to verify and insert blocks, along with some caching mechanism.
type insertHelperImpl struct {
bc blockChain
deciderCache *lru.Cache // Epoch -> quorum.Decider
shardStateCache *lru.Cache // Epoch -> *shard.State
verifiedSigCache *lru.Cache // verifiedSigKey -> struct{}{}
}
func newInsertHelper(bc blockChain) insertHelper {
deciderCache, _ := lru.New(5)
shardStateCache, _ := lru.New(5)
sigCache, _ := lru.New(20)
return &insertHelperImpl{
bc: bc,
deciderCache: deciderCache,
shardStateCache: shardStateCache,
verifiedSigCache: sigCache,
}
}
func (ch *insertHelperImpl) verifyAndInsertBlocks(blocks types.Blocks) (int, error) {
for i, block := range blocks {
if err := ch.verifyAndInsertBlock(block); err != nil {
return i, err
}
}
return len(blocks), nil
}
func (ch *insertHelperImpl) verifyAndInsertBlock(block *types.Block) error {
// verify the commit sig of current block
if err := ch.verifyBlockSignature(block); err != nil {
return &sigVerifyError{err}
}
ch.markBlockSigVerified(block, block.GetCurrentCommitSig())
// verify header. Skip verify the previous seal if we have already verified
verifySeal := !ch.isBlockLastSigVerified(block)
if err := ch.bc.Engine().VerifyHeader(ch.bc, block.Header(), verifySeal); err != nil {
return err
}
// Insert chain.
if _, err := ch.bc.InsertChain(types.Blocks{block}, false); err != nil {
return err
}
// Write commit sig data
return ch.bc.WriteCommitSig(block.NumberU64(), block.GetCurrentCommitSig())
}
func (ch *insertHelperImpl) verifyBlockSignature(block *types.Block) error {
// TODO: This is the duplicate logic to the implementation of verifySeal and consensus.
// Better refactor to the blockchain or engine structure
decider, err := ch.getDeciderByEpoch(block.Epoch())
if err != nil {
return err
}
sig, mask, err := decodeCommitSig(block.GetCurrentCommitSig(), decider.Participants())
if err != nil {
return err
}
if !decider.IsQuorumAchievedByMask(mask) {
return errors.New("quorum not achieved")
}
commitSigBytes := signature.ConstructCommitPayload(ch.bc, block.Epoch(), block.Hash(),
block.NumberU64(), block.Header().ViewID().Uint64())
if !sig.VerifyHash(mask.AggregatePublic, commitSigBytes) {
return errors.New("aggregate signature failed verification")
}
return nil
}
func (ch *insertHelperImpl) writeBlockSignature(block *types.Block) error {
return ch.bc.WriteCommitSig(block.NumberU64(), block.GetCurrentCommitSig())
}
func (ch *insertHelperImpl) getDeciderByEpoch(epoch *big.Int) (quorum.Decider, error) {
epochUint := epoch.Uint64()
if decider, ok := ch.deciderCache.Get(epochUint); ok && decider != nil {
return decider.(quorum.Decider), nil
}
decider, err := ch.readDeciderByEpoch(epoch)
if err != nil {
return nil, errors.Wrapf(err, "unable to read quorum of epoch %v", epoch.Uint64())
}
ch.deciderCache.Add(epochUint, decider)
return decider, nil
}
func (ch *insertHelperImpl) readDeciderByEpoch(epoch *big.Int) (quorum.Decider, error) {
isStaking := ch.bc.Config().IsStaking(epoch)
decider := ch.getNewDecider(isStaking)
ss, err := ch.getShardState(epoch)
if err != nil {
return nil, err
}
subComm, err := ss.FindCommitteeByID(ch.shardID())
if err != nil {
return nil, err
}
pubKeys, err := subComm.BLSPublicKeys()
if err != nil {
return nil, err
}
decider.UpdateParticipants(pubKeys)
if _, err := decider.SetVoters(subComm, epoch); err != nil {
return nil, err
}
return decider, nil
}
func (ch *insertHelperImpl) getNewDecider(isStaking bool) quorum.Decider {
if isStaking {
return quorum.NewDecider(quorum.SuperMajorityVote, ch.bc.ShardID())
} else {
return quorum.NewDecider(quorum.SuperMajorityStake, ch.bc.ShardID())
}
}
func (ch *insertHelperImpl) getShardState(epoch *big.Int) (*shard.State, error) {
if ss, ok := ch.shardStateCache.Get(epoch.Uint64()); ok && ss != nil {
return ss.(*shard.State), nil
}
ss, err := ch.bc.ReadShardState(epoch)
if err != nil {
return nil, err
}
ch.shardStateCache.Add(epoch.Uint64(), ss)
return ss, nil
}
func (ch *insertHelperImpl) markBlockSigVerified(block *types.Block, sigAndBitmap []byte) {
key := newVerifiedSigKey(block.Hash(), sigAndBitmap)
ch.verifiedSigCache.Add(key, struct{}{})
}
func (ch *insertHelperImpl) isBlockLastSigVerified(block *types.Block) bool {
lastSig := block.Header().LastCommitSignature()
lastBM := block.Header().LastCommitBitmap()
lastSigAndBM := append(lastSig[:], lastBM...)
key := newVerifiedSigKey(block.Hash(), lastSigAndBM)
_, ok := ch.verifiedSigCache.Get(key)
return ok
}
func (ch *insertHelperImpl) shardID() uint32 {
return ch.bc.ShardID()
}
func decodeCommitSig(commitBytes []byte, publicKeys multibls.PublicKeys) (*bls_core.Sign, *bls_cosi.Mask, error) {
if len(commitBytes) < bls_cosi.BLSSignatureSizeInBytes {
return nil, nil, fmt.Errorf("unexpected signature bytes size: %v / %v", len(commitBytes),
bls_cosi.BLSSignatureSizeInBytes)
}
return chain.ReadSignatureBitmapByPublicKeys(commitBytes, publicKeys)
}
type verifiedSigKey struct {
blockHash common.Hash
sbHash common.Hash // hash of block signature + bitmap
}
var hasherPool = sync.Pool{
New: func() interface{} {
return sha3.New256()
},
}
func newVerifiedSigKey(blockHash common.Hash, sigAndBitmap []byte) verifiedSigKey {
hasher := hasherPool.Get().(hash.Hash)
defer func() {
hasher.Reset()
hasherPool.Put(hasher)
}()
var sbHash common.Hash
hasher.Write(sigAndBitmap)
hasher.Sum(sbHash[0:])
return verifiedSigKey{
blockHash: blockHash,
sbHash: sbHash,
}
}

@ -1,19 +0,0 @@
package downloader
import (
"testing"
"github.com/ethereum/go-ethereum/common"
)
func BenchmarkNewVerifiedSigKey(b *testing.B) {
var bh common.Hash
commitSig := make([]byte, 100)
for i := 0; i != len(commitSig); i++ {
commitSig[i] = 0xf
}
for i := 0; i != b.N; i++ {
newVerifiedSigKey(bh, commitSig)
}
}

@ -28,7 +28,6 @@ func (d *Downloader) doLongRangeSync() (int, error) {
iter := &lrSyncIter{
bc: d.bc,
p: d.syncProtocol,
ih: d.ih,
d: d,
ctx: ctx,
config: d.config,
@ -54,7 +53,6 @@ func (d *Downloader) doLongRangeSync() (int, error) {
type lrSyncIter struct {
bc blockChain
p syncProtocol
ih insertHelper
d *Downloader
gbm *getBlocksManager // initialized when finished get block number
@ -206,7 +204,7 @@ func (lsi *lrSyncIter) processBlocks(results []*blockResult, targetBN uint64) {
blocks := blockResultsToBlocks(results)
for i, block := range blocks {
if err := lsi.ih.verifyAndInsertBlock(block); err != nil {
if err := verifyAndInsertBlock(lsi.bc, block); err != nil {
lsi.logger.Warn().Err(err).Uint64("target block", targetBN).
Uint64("block number", block.NumberU64()).
Msg("insert blocks failed in long range")

@ -17,7 +17,6 @@ func TestDownloader_doLongRangeSync(t *testing.T) {
d := &Downloader{
bc: bc,
ih: &testInsertHelper{bc},
syncProtocol: newTestSyncProtocol(targetBN, 32, nil),
config: Config{
Concurrency: 16,
@ -128,7 +127,6 @@ func TestLrSyncIter_FetchAndInsertBlocks(t *testing.T) {
lsi := &lrSyncIter{
bc: chain,
ih: &testInsertHelper{chain},
d: &Downloader{bc: chain},
p: protocol,
gbm: nil,
@ -161,7 +159,6 @@ func TestLrSyncIter_FetchAndInsertBlocks_ErrRequest(t *testing.T) {
lsi := &lrSyncIter{
bc: chain,
ih: &testInsertHelper{chain},
d: &Downloader{bc: chain},
p: protocol,
gbm: nil,
@ -194,7 +191,6 @@ func TestLrSyncIter_FetchAndInsertBlocks_ErrInsert(t *testing.T) {
lsi := &lrSyncIter{
bc: chain,
ih: &testInsertHelper{chain},
d: &Downloader{bc: chain},
p: protocol,
gbm: nil,
@ -227,7 +223,6 @@ func TestLrSyncIter_FetchAndInsertBlocks_RandomErr(t *testing.T) {
lsi := &lrSyncIter{
bc: chain,
ih: &testInsertHelper{chain},
d: &Downloader{bc: chain},
p: protocol,
gbm: nil,

@ -15,8 +15,6 @@ import (
"github.com/rs/zerolog"
)
var emptySigVerifyError *sigVerifyError
// doShortRangeSync does the short range sync.
// Compared with long range sync, short range sync is more focused on syncing to the latest block.
// It consist of 3 steps:
@ -59,7 +57,7 @@ func (d *Downloader) doShortRangeSync() (int, error) {
d.finishSyncing()
}()
blocks, err := sh.getBlocksByHashes(hashChain, whitelist)
blocks, stids, err := sh.getBlocksByHashes(hashChain, whitelist)
if err != nil {
if !errors.Is(err, context.Canceled) {
sh.removeStreams(whitelist) // Remote nodes cannot provide blocks with target hashes
@ -67,13 +65,17 @@ func (d *Downloader) doShortRangeSync() (int, error) {
return 0, errors.Wrap(err, "getBlocksByHashes")
}
n, err := d.ih.verifyAndInsertBlocks(blocks)
n, err := verifyAndInsertBlocks(d.bc, blocks)
numBlocksInsertedShortRangeHistogramVec.With(d.promLabels()).Observe(float64(n))
if err != nil {
if !errors.As(err, &emptySigVerifyError) {
if sh.blameAllStreams(blocks, n, err) {
sh.removeStreams(whitelist) // Data provided by remote nodes is corrupted
} else {
// It is the last block gives a wrong commit sig. Blame the provider of the last block.
criminal := stids[len(stids)-1]
sh.removeStreams([]sttypes.StreamID{criminal})
}
return n, errors.Wrap(err, "InsertChain")
return n, err
}
return len(blocks), nil
}
@ -116,7 +118,7 @@ func (sh *srHelper) getHashChain(curBN uint64) ([]common.Hash, []sttypes.StreamI
return hashChain, wl, nil
}
func (sh *srHelper) getBlocksByHashes(hashes []common.Hash, whitelist []sttypes.StreamID) ([]*types.Block, error) {
func (sh *srHelper) getBlocksByHashes(hashes []common.Hash, whitelist []sttypes.StreamID) ([]*types.Block, []sttypes.StreamID, error) {
ctx, cancel := context.WithCancel(sh.ctx)
m := newGetBlocksByHashManager(hashes, whitelist)
@ -169,11 +171,11 @@ func (sh *srHelper) getBlocksByHashes(hashes []common.Hash, whitelist []sttypes.
wg.Wait()
if gErr != nil {
return nil, gErr
return nil, nil, gErr
}
select {
case <-sh.ctx.Done():
return nil, sh.ctx.Err()
return nil, nil, sh.ctx.Err()
default:
}
@ -238,6 +240,14 @@ func (sh *srHelper) removeStreams(sts []sttypes.StreamID) {
}
}
// Only not to blame all whitelisted streams when the it's not the last block signature verification failed.
func (sh *srHelper) blameAllStreams(blocks types.Blocks, errIndex int, err error) bool {
if errors.As(err, &emptySigVerifyErr) && errIndex == len(blocks)-1 {
return false
}
return true
}
func checkGetBlockByHashesResult(blocks []*types.Block, hashes []common.Hash) error {
if len(blocks) != len(hashes) {
return errors.New("unexpected number of getBlocksByHashes result")
@ -429,18 +439,20 @@ func (m *getBlocksByHashManager) handleResultError(hashes []common.Hash, stid st
}
}
func (m *getBlocksByHashManager) getResults() ([]*types.Block, error) {
func (m *getBlocksByHashManager) getResults() ([]*types.Block, []sttypes.StreamID, error) {
m.lock.Lock()
defer m.lock.Unlock()
blocks := make([]*types.Block, 0, len(m.hashes))
stids := make([]sttypes.StreamID, 0, len(m.hashes))
for _, hash := range m.hashes {
if m.results[hash].block == nil {
return nil, errors.New("SANITY: nil block found")
return nil, nil, errors.New("SANITY: nil block found")
}
blocks = append(blocks, m.results[hash].block)
stids = append(stids, m.results[hash].stid)
}
return blocks, nil
return blocks, stids, nil
}
func (m *getBlocksByHashManager) isDone() bool {

@ -17,7 +17,6 @@ func TestDownloader_doShortRangeSync(t *testing.T) {
d := &Downloader{
bc: chain,
ih: &testInsertHelper{chain},
syncProtocol: newTestSyncProtocol(105, 32, nil),
config: Config{
Concurrency: 16,
@ -209,7 +208,7 @@ func TestSrHelper_GetBlocksByHashes(t *testing.T) {
ctx: context.Background(),
config: test.config,
}
blocks, err := sh.getBlocksByHashes(test.hashes, makeStreamIDs(5))
blocks, _, err := sh.getBlocksByHashes(test.hashes, makeStreamIDs(5))
if (err == nil) != (test.expErr == nil) {
t.Errorf("Test %v: unexpected error %v / %v", i, err, test.expErr)
}

@ -10,6 +10,7 @@ import (
"github.com/harmony-one/harmony/block"
headerV3 "github.com/harmony-one/harmony/block/v3"
"github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
)
@ -244,7 +245,11 @@ func makeTestBlocks(bns []uint64) []*types.Block {
func makeTestBlock(bn uint64) *types.Block {
testHeader := &block.Header{Header: headerV3.NewHeader()}
testHeader.SetNumber(big.NewInt(int64(bn)))
return types.NewBlockWithHeader(testHeader)
testHeader.SetLastCommitSignature(bls_cosi.SerializedSignature{})
testHeader.SetLastCommitBitmap(make([]byte, 10))
block := types.NewBlockWithHeader(testHeader)
block.SetCurrentCommitSig(make([]byte, 106))
return block
}
func assertError(got, expect error) error {

@ -5,11 +5,10 @@ import (
"math/big"
"sort"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
harmony_bls "github.com/harmony-one/harmony/crypto/bls"
"github.com/ethereum/go-ethereum/common"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/consensus/quorum"
@ -17,22 +16,36 @@ import (
"github.com/harmony-one/harmony/consensus/signature"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/multibls"
"github.com/harmony-one/harmony/shard"
"github.com/harmony-one/harmony/shard/committee"
"github.com/harmony-one/harmony/staking/availability"
"github.com/harmony-one/harmony/staking/slash"
staking "github.com/harmony-one/harmony/staking/types"
"github.com/pkg/errors"
)
const (
verifiedSigCache = 20
)
type engineImpl struct {
beacon engine.ChainReader
beacon engine.ChainReader
shardID uint32
// Caching field
verifiedSigCache *lru.Cache // verifiedSigKey -> struct{}{}
}
// Engine is an algorithm-agnostic consensus engine.
var Engine = &engineImpl{nil}
// NewEngine creates Engine with some cache
func NewEngine(shardID uint32) *engineImpl {
sigCache, _ := lru.New(verifiedSigCache)
return &engineImpl{
beacon: nil,
shardID: shardID,
verifiedSigCache: sigCache,
}
}
func (e *engineImpl) Beaconchain() engine.ChainReader {
return e.beacon
@ -61,6 +74,9 @@ func (e *engineImpl) VerifyHeader(chain engine.ChainReader, header *block.Header
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications.
// WARN: Do not use VerifyHeaders for now. Currently a header verification can only
// success when the previous header is written to block chain
// TODO: Revisit and correct this function when adding epochChain
func (e *engineImpl) VerifyHeaders(chain engine.ChainReader, headers []*block.Header, seals []bool) (chan<- struct{}, <-chan error) {
abort, results := make(chan struct{}), make(chan error, len(headers))
@ -79,14 +95,6 @@ func (e *engineImpl) VerifyHeaders(chain engine.ChainReader, headers []*block.He
return abort, results
}
// ReadPublicKeysFromLastBlock finds the public keys of last block's committee
func ReadPublicKeysFromLastBlock(
bc engine.ChainReader, header *block.Header,
) ([]harmony_bls.PublicKeyWrapper, error) {
parentHeader := bc.GetHeaderByHash(header.ParentHash())
return GetPublicKeys(bc, parentHeader, false)
}
// VerifyShardState implements Engine, checking the shardstate is valid at epoch transition
func (e *engineImpl) VerifyShardState(
bc engine.ChainReader, beacon engine.ChainReader, header *block.Header,
@ -134,75 +142,18 @@ func (e *engineImpl) VerifySeal(chain engine.ChainReader, header *block.Header)
if header == nil {
return errors.New("[VerifySeal] nil block header")
}
publicKeys, err := ReadPublicKeysFromLastBlock(chain, header)
if err != nil {
return errors.New("[VerifySeal] Cannot retrieve publickeys from last block")
}
sig := header.LastCommitSignature()
payload := append(sig[:], header.LastCommitBitmap()...)
aggSig, mask, err := ReadSignatureBitmapByPublicKeys(payload, publicKeys)
if err != nil {
return errors.New(
"[VerifySeal] Unable to deserialize the LastCommitSignature" +
" and LastCommitBitmap in Block Header",
)
}
parentHash := header.ParentHash()
parentHeader := chain.GetHeader(parentHash, header.Number().Uint64()-1)
if parentHeader == nil {
return errors.New(
"[VerifySeal] no parent header found",
)
return errors.New("[VerifySeal] no parent header found")
}
if chain.Config().IsStaking(parentHeader.Epoch()) {
slotList, err := chain.ReadShardState(parentHeader.Epoch())
if err != nil {
return errors.Wrapf(err, "cannot decoded shard state")
}
subComm, err := slotList.FindCommitteeByID(parentHeader.ShardID())
if err != nil {
return err
}
// TODO(audit): reuse a singleton decider and not recreate it for every single block
d := quorum.NewDecider(
quorum.SuperMajorityStake, subComm.ShardID,
)
d.SetMyPublicKeyProvider(func() (multibls.PublicKeys, error) {
return nil, nil
})
if _, err := d.SetVoters(subComm, slotList.Epoch); err != nil {
return err
}
if !d.IsQuorumAchievedByMask(mask) {
return errors.New(
"[VerifySeal] Not enough voting power in LastCommitSignature from Block Header",
)
}
} else {
parentQuorum, err := QuorumForBlock(chain, parentHeader, false)
if err != nil {
return errors.Wrapf(err,
"cannot calculate quorum for block %s", header.Number())
}
if count := utils.CountOneBits(mask.Bitmap); count < int64(parentQuorum) {
return errors.Errorf(
"[VerifySeal] need %d signature in LastCommitSignature have %d",
parentQuorum, count,
)
}
}
sig := header.LastCommitSignature()
bitmap := header.LastCommitBitmap()
lastCommitPayload := signature.ConstructCommitPayload(chain,
parentHeader.Epoch(), parentHeader.Hash(), parentHeader.Number().Uint64(), parentHeader.ViewID().Uint64())
if nodeconfig.GetDefaultConfig().GetNetworkType() == nodeconfig.Testnet && header.ShardID() == 0 && header.Number().Int64() == 5698545 {
// Testnet hack to workaround a bad block with invalid multi-signature in shard 0.
return nil
}
if !aggSig.VerifyHash(mask.AggregatePublic, lastCommitPayload) {
const msg = "[VerifySeal] Unable to verify aggregated signature from last block: %x"
return errors.Errorf(msg, payload)
if err := e.verifyHeaderSignatureCached(chain, parentHeader, sig, bitmap); err != nil {
return errors.Wrapf(err, "verify signature for parent %s", parentHash.String())
}
return nil
}
@ -438,127 +389,110 @@ func applySlashes(
return nil
}
// QuorumForBlock returns the quorum for the given block header.
func QuorumForBlock(
chain engine.ChainReader, h *block.Header, reCalculate bool,
) (quorum int, err error) {
ss := new(shard.State)
if reCalculate {
ss, _ = committee.WithStakingEnabled.Compute(h.Epoch(), chain)
} else {
ss, err = chain.ReadShardState(h.Epoch())
if err != nil {
return 0, errors.Wrapf(
err, "failed to read shard state of epoch %d", h.Epoch().Uint64(),
)
}
}
subComm, err := ss.FindCommitteeByID(h.ShardID())
if err != nil {
return 0, errors.Errorf("cannot find shard %d in shard state", h.ShardID())
}
return (len(subComm.Slots))*2/3 + 1, nil
}
// VerifyHeaderSignature verifies the signature of the given header.
// Similiar to VerifyHeader, which is only for verifying the block headers of one's own chain, this verification
// is used for verifying "incoming" block header against commit signature and bitmap sent from the other chain cross-shard via libp2p.
// i.e. this header verification api is more flexible since the caller specifies which commit signature and bitmap to use
// for verifying the block header, which is necessary for cross-shard block header verification. Example of such is cross-shard transaction.
func (e *engineImpl) VerifyHeaderWithSignature(chain engine.ChainReader, header *block.Header, commitSig []byte, commitBitmap []byte, reCalculate bool) error {
if chain.Config().IsStaking(header.Epoch()) {
// Never recalculate after staking is enabled
reCalculate = false
func (e *engineImpl) VerifyHeaderSignature(chain engine.ChainReader, header *block.Header, commitSig bls_cosi.SerializedSignature, commitBitmap []byte) error {
return e.verifyHeaderSignatureCached(chain, header, commitSig, commitBitmap)
}
func (e *engineImpl) verifyHeaderSignatureCached(chain engine.ChainReader, header *block.Header, commitSig bls_cosi.SerializedSignature, commitBitmap []byte) error {
key := newVerifiedSigKey(header.Hash(), commitSig, commitBitmap)
if _, ok := e.verifiedSigCache.Get(key); ok {
return nil
}
publicKeys, err := GetPublicKeys(chain, header, reCalculate)
if err != nil {
return errors.New("[VerifyHeaderWithSignature] Cannot get publickeys for block header")
if err := e.verifyHeaderSignature(chain, header, commitSig, commitBitmap); err != nil {
return err
}
e.verifiedSigCache.Add(key, struct{}{})
return nil
}
payload := append(commitSig[:], commitBitmap[:]...)
aggSig, mask, err := ReadSignatureBitmapByPublicKeys(payload, publicKeys)
func (e *engineImpl) verifyHeaderSignature(chain engine.ChainReader, header *block.Header, commitSig bls_cosi.SerializedSignature, commitBitmap []byte) error {
ss, err := e.getShardState(chain, header.Epoch(), header.ShardID())
if err != nil {
return errors.Wrapf(
err,
"[VerifyHeaderWithSignature] Unable to deserialize signatures",
)
return err
}
shardComm, err := ss.FindCommitteeByID(chain.ShardID())
if err != nil {
return err
}
pubKeys, err := shardComm.BLSPublicKeys()
if err != nil {
return err
}
aggSig, mask, err := decodeSigBitmap(commitSig, commitBitmap, pubKeys)
if err != nil {
return errors.Wrap(err, "deserialize signature and bitmap")
}
isStaking := chain.Config().IsStaking(header.Epoch())
qrVerifier, err := quorum.NewVerifier(shardComm, header.Epoch(), isStaking)
if err != nil {
return err
}
if e := header.Epoch(); chain.Config().IsStaking(e) {
slotList, err := chain.ReadShardState(e)
if err != nil {
return errors.Wrapf(err, "cannot read shard state")
}
subComm, err := slotList.FindCommitteeByID(header.ShardID())
if err != nil {
return err
}
// TODO(audit): reuse a singleton decider and not recreate it for every single block
d := quorum.NewDecider(quorum.SuperMajorityStake, subComm.ShardID)
d.SetMyPublicKeyProvider(func() (multibls.PublicKeys, error) {
return nil, nil
})
if _, err := d.SetVoters(subComm, e); err != nil {
return err
}
if !d.IsQuorumAchievedByMask(mask) {
return errors.New(
"[VerifySeal] Not enough voting power in commitSignature from Block Header",
)
}
} else {
quorumCount, err := QuorumForBlock(chain, header, reCalculate)
if err != nil {
return errors.Wrapf(err,
"cannot calculate quorum for block %s", header.Number())
}
if count := utils.CountOneBits(mask.Bitmap); count < int64(quorumCount) {
return errors.New(
"[VerifyHeaderWithSignature] Not enough signature in commitSignature from Block Header",
)
}
// Verify signature, mask against quorum.Verifier and publicKeys
if !qrVerifier.IsQuorumAchievedByMask(mask) {
return errors.New("not enough signature collected")
}
commitPayload := signature.ConstructCommitPayload(chain,
header.Epoch(), header.Hash(), header.Number().Uint64(), header.ViewID().Uint64())
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) {
return errors.New("[VerifySeal] Unable to verify aggregated signature for block")
return errors.New("Unable to verify aggregated signature for block")
}
return nil
}
// GetPublicKeys finds the public keys of the committee that signed the block header
func GetPublicKeys(
chain engine.ChainReader, header *block.Header, reCalculate bool,
) ([]harmony_bls.PublicKeyWrapper, error) {
if header == nil {
return nil, errors.New("nil header provided")
}
shardState := new(shard.State)
var err error
if reCalculate {
shardState, _ = committee.WithStakingEnabled.Compute(header.Epoch(), chain)
func (e *engineImpl) getShardState(chain engine.ChainReader, epoch *big.Int, targetShardID uint32) (*shard.State, error) {
// (TODO) For now, when doing cross shard, we need recalcualte the shard state since we don't have
// hard state of other shards
if e.needRecalculateStateShard(chain, epoch, targetShardID) {
shardState, err := committee.WithStakingEnabled.Compute(epoch, chain)
if err != nil {
return nil, errors.Wrapf(err, "compute shard state for epoch %v", epoch)
}
return shardState, nil
} else {
shardState, err = chain.ReadShardState(header.Epoch())
shardState, err := chain.ReadShardState(epoch)
if err != nil {
return nil, errors.Wrapf(
err, "failed to read shard state of epoch %d", header.Epoch().Uint64(),
)
return nil, errors.Wrapf(err, "read shard state for epoch %v", epoch)
}
return shardState, nil
}
}
subCommittee, err := shardState.FindCommitteeByID(header.ShardID())
if err != nil {
return nil, errors.Wrapf(
err,
"cannot find shard in the shard state at block %d shard %d",
header.Number(),
header.ShardID(),
)
// only recalculate for non-staking epoch and targetShardID is not the same
// as engine
func (e *engineImpl) needRecalculateStateShard(chain engine.ChainReader, epoch *big.Int, targetShardID uint32) bool {
if chain.Config().IsStaking(epoch) {
return false
}
return targetShardID != e.shardID
}
// Support 512 at most validator nodes
const bitmapKeyBytes = 64
// verifiedSigKey is the key for caching header verification results
type verifiedSigKey struct {
blockHash common.Hash
signature bls_cosi.SerializedSignature
bitmap [bitmapKeyBytes]byte
}
func newVerifiedSigKey(blockHash common.Hash, sig bls_cosi.SerializedSignature, bitmap []byte) verifiedSigKey {
var keyBM [bitmapKeyBytes]byte
copy(keyBM[:], bitmap)
return verifiedSigKey{
blockHash: blockHash,
signature: sig,
bitmap: keyBM,
}
return subCommittee.BLSPublicKeys()
}
// GetLockPeriodInEpoch returns the delegation lock period for the given chain

@ -11,25 +11,36 @@ import (
// ReadSignatureBitmapByPublicKeys read the payload of signature and bitmap based on public keys
func ReadSignatureBitmapByPublicKeys(recvPayload []byte, publicKeys []bls.PublicKeyWrapper) (*bls_core.Sign, *bls.Mask, error) {
if len(recvPayload) < bls.BLSSignatureSizeInBytes {
return nil, nil, errors.New("payload not have enough length")
sig, bitmap, err := ParseCommitSigAndBitmap(recvPayload)
if err != nil {
return nil, nil, err
}
return decodeSigBitmap(sig, bitmap, publicKeys)
}
// ParseCommitSigAndBitmap parse the commitSigAndBitmap to signature + bitmap
func ParseCommitSigAndBitmap(payload []byte) (bls.SerializedSignature, []byte, error) {
if len(payload) < bls.BLSSignatureSizeInBytes {
return bls.SerializedSignature{}, nil, errors.New("payload not have enough length")
}
payload := append(recvPayload[:0:0], recvPayload...)
//#### Read payload data
// 96 byte of multi-sig
offset := 0
multiSig := payload[offset : offset+bls.BLSSignatureSizeInBytes]
offset += bls.BLSSignatureSizeInBytes
// bitmap
bitmap := payload[offset:]
//#### END Read payload data
var (
sig bls.SerializedSignature
bitmap = make([]byte, len(payload)-bls.BLSSignatureSizeInBytes)
)
copy(sig[:], payload[:bls.BLSSignatureSizeInBytes])
copy(bitmap, payload[bls.BLSSignatureSizeInBytes:])
return sig, bitmap, nil
}
// decodeSigBitmap decode and parse the signature, bitmap with the given public keys
func decodeSigBitmap(sigBytes bls.SerializedSignature, bitmap []byte, pubKeys []bls.PublicKeyWrapper) (*bls_core.Sign, *bls.Mask, error) {
aggSig := bls_core.Sign{}
err := aggSig.Deserialize(multiSig)
err := aggSig.Deserialize(sigBytes[:])
if err != nil {
return nil, nil, errors.New("unable to deserialize multi-signature from payload")
}
mask, err := bls.NewMask(publicKeys, nil)
mask, err := bls.NewMask(pubKeys, nil)
if err != nil {
utils.Logger().Warn().Err(err).Msg("onNewView unable to setup mask for prepared message")
return nil, nil, errors.New("unable to setup mask from payload")

@ -13,6 +13,14 @@ import (
protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/abool"
bls_core "github.com/harmony-one/bls/ffi/go/bls"
lru "github.com/hashicorp/golang-lru"
libp2p_peer "github.com/libp2p/go-libp2p-core/peer"
libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/rcrowley/go-metrics"
"golang.org/x/sync/semaphore"
"github.com/harmony-one/harmony/api/proto"
msg_pb "github.com/harmony-one/harmony/api/proto/message"
proto_node "github.com/harmony-one/harmony/api/proto/node"
@ -38,14 +46,6 @@ import (
"github.com/harmony-one/harmony/staking/slash"
staking "github.com/harmony-one/harmony/staking/types"
"github.com/harmony-one/harmony/webhooks"
lru "github.com/hashicorp/golang-lru"
libp2p_peer "github.com/libp2p/go-libp2p-core/peer"
libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sync/semaphore"
"github.com/rcrowley/go-metrics"
)
const (
@ -931,8 +931,10 @@ func New(
chainConfig := networkType.ChainConfig()
node.chainConfig = chainConfig
engine := chain.NewEngine(consensusObj.ShardID)
collection := shardchain.NewCollection(
chainDBFactory, &genesisInitializer{&node}, chain.Engine, &chainConfig,
chainDBFactory, &genesisInitializer{&node}, engine, &chainConfig,
)
for shardID, archival := range isArchival {
@ -971,21 +973,21 @@ func New(
txPoolConfig.Journal = fmt.Sprintf("%v/%v", node.NodeConfig.DBDir, txPoolConfig.Journal)
node.TxPool = core.NewTxPool(txPoolConfig, node.Blockchain().Config(), blockchain, node.TransactionErrorSink)
node.CxPool = core.NewCxPool(core.CxPoolSize)
node.Worker = worker.New(node.Blockchain().Config(), blockchain, chain.Engine)
node.Worker = worker.New(node.Blockchain().Config(), blockchain, engine)
node.deciderCache, _ = lru.New(16)
node.committeeCache, _ = lru.New(16)
if node.Blockchain().ShardID() != shard.BeaconChainShardID {
node.BeaconWorker = worker.New(
node.Beaconchain().Config(), beaconChain, chain.Engine,
node.Beaconchain().Config(), beaconChain, engine,
)
}
node.pendingCXReceipts = map[string]*types.CXReceiptsProof{}
node.proposedBlock = map[uint64]*types.Block{}
node.Consensus.VerifiedNewBlock = make(chan *types.Block, 1)
chain.Engine.SetBeaconchain(beaconChain)
engine.SetBeaconchain(beaconChain)
// the sequence number is the next block number to be added in consensus protocol, which is
// always one more than current chain header block
node.Consensus.SetBlockNum(blockchain.CurrentBlock().NumberU64() + 1)

@ -38,17 +38,18 @@ func TestNewWorker(t *testing.T) {
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
ShardID: 10,
}
engine = chain2.NewEngine(10)
)
genesis := gspec.MustCommit(database)
_ = genesis
chain, err := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil)
chain, err := core.NewBlockChain(database, nil, gspec.Config, engine, vm.Config{}, nil)
if err != nil {
t.Error(err)
}
// Create a new worker
worker := New(params.TestChainConfig, chain, chain2.Engine)
worker := New(params.TestChainConfig, chain, engine)
if worker.GetCurrentState().GetBalance(crypto.PubkeyToAddress(testBankKey.PublicKey)).Cmp(testBankFunds) != 0 {
t.Error("Worker state is not setup correctly")
@ -65,13 +66,14 @@ func TestCommitTransactions(t *testing.T) {
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
ShardID: 0,
}
engine = chain2.NewEngine(0)
)
gspec.MustCommit(database)
chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil)
chain, _ := core.NewBlockChain(database, nil, gspec.Config, engine, vm.Config{}, nil)
// Create a new worker
worker := New(params.TestChainConfig, chain, chain2.Engine)
worker := New(params.TestChainConfig, chain, engine)
// Generate a test tx
baseNonce := worker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(testBankKey.PublicKey))

@ -108,7 +108,8 @@ func main() {
database := rawdb.NewMemoryDatabase()
genesis := gspec.MustCommit(database)
_ = genesis
bc, _ := core.NewBlockChain(database, nil, gspec.Config, chain.Engine, vm.Config{}, nil)
engine := chain.NewEngine(0)
bc, _ := core.NewBlockChain(database, nil, gspec.Config, engine, vm.Config{}, nil)
statedb, _ := state.New(common2.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
msg := createValidator()
statedb.AddBalance(msg.ValidatorAddress, new(big.Int).Mul(big.NewInt(5e18), big.NewInt(2000)))

Loading…
Cancel
Save