Merge branch 'dev' into feature/clear-stale-staking-data

feature/dev-engine_test
static 1 year ago
commit 5a1bc7a226
  1. 3
      Makefile
  2. 3
      cmd/harmony/main.go
  3. 70
      consensus/consensus_v2.go
  4. 2
      consensus/quorum/quorum.go
  5. 2
      core/blockchain_impl.go
  6. 10
      core/state_processor.go
  7. 17
      internal/utils/math.go

@ -180,6 +180,3 @@ debug_external: clean
build_localnet_validator: build_localnet_validator:
bash test/build-localnet-validator.sh bash test/build-localnet-validator.sh
tt:
go test -v -test.run OnDisconnectCheck ./p2p/security

@ -788,6 +788,8 @@ func setupChain(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigTyp
} }
func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType, registry *registry.Registry) *node.Node { func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType, registry *registry.Registry) *node.Node {
decider := quorum.NewDecider(quorum.SuperMajorityVote, uint32(hc.General.ShardID))
// Parse minPeers from harmonyconfig.HarmonyConfig // Parse minPeers from harmonyconfig.HarmonyConfig
var minPeers int var minPeers int
var aggregateSig bool var aggregateSig bool
@ -821,7 +823,6 @@ func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfi
registry.SetCxPool(cxPool) registry.SetCxPool(cxPool)
// Consensus object. // Consensus object.
decider := quorum.NewDecider(quorum.SuperMajorityVote, nodeConfig.ShardID)
registry.SetIsBackup(isBackup(hc)) registry.SetIsBackup(isBackup(hc))
currentConsensus, err := consensus.New( currentConsensus, err := consensus.New(
myHost, nodeConfig.ShardID, nodeConfig.ConsensusPriKey, registry, decider, minPeers, aggregateSig) myHost, nodeConfig.ShardID, nodeConfig.ConsensusPriKey, registry, decider, minPeers, aggregateSig)

@ -690,10 +690,15 @@ func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMess
// This function must be called with enabled leader rotation. // This function must be called with enabled leader rotation.
func (consensus *Consensus) rotateLeader(epoch *big.Int) { func (consensus *Consensus) rotateLeader(epoch *big.Int) {
var ( var (
bc = consensus.Blockchain() bc = consensus.Blockchain()
prev = consensus.getLeaderPubKey() prev = consensus.getLeaderPubKey()
leader = consensus.getLeaderPubKey() leader = consensus.getLeaderPubKey()
curBlock = bc.CurrentBlock()
curNumber = curBlock.NumberU64()
curEpoch = curBlock.Epoch().Uint64()
) )
const blocksCountAliveness = 10
utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotationInternalValidators(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch)) utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotationInternalValidators(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch))
ss, err := bc.ReadShardState(epoch) ss, err := bc.ReadShardState(epoch)
if err != nil { if err != nil {
@ -741,18 +746,59 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) {
var ( var (
wasFound bool wasFound bool
next *bls.PublicKeyWrapper next *bls.PublicKeyWrapper
offset = 1
) )
if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) {
wasFound, next = consensus.Decider.NthNextValidator(committee.Slots, leader, 1) for {
} else { if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) {
wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, 1) wasFound, next = consensus.Decider.NthNextValidator(committee.Slots, leader, offset)
} } else {
if !wasFound { wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, offset)
utils.Logger().Error().Msg("Failed to get next leader") }
return if !wasFound {
} else { utils.Logger().Error().Msg("Failed to get next leader")
// Seems like nothing we can do here.
return
}
members := consensus.Decider.Participants()
mask := bls.NewMask(members)
skipped := 0
for i := 0; i < blocksCountAliveness; i++ {
header := bc.GetHeaderByNumber(curNumber - uint64(i))
if header == nil {
utils.Logger().Error().Msgf("Failed to get header by number %d", curNumber-uint64(i))
return
}
// if epoch is different, we should not check this block.
if header.Epoch().Uint64() != curEpoch {
break
}
// Populate the mask with the bitmap.
err = mask.SetMask(header.LastCommitBitmap())
if err != nil {
utils.Logger().Err(err).Msg("Failed to set mask")
return
}
ok, err := mask.KeyEnabled(next.Bytes)
if err != nil {
utils.Logger().Err(err).Msg("Failed to get key enabled")
return
}
if !ok {
skipped++
}
}
// no signature from the next leader at all, we should skip it.
if skipped >= blocksCountAliveness {
// Next leader is not signing blocks, we should skip it.
offset++
continue
}
consensus.setLeaderPubKey(next) consensus.setLeaderPubKey(next)
break
} }
if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) { if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) {
// leader changed // leader changed
go func() { go func() {

@ -77,7 +77,7 @@ type ParticipantTracker interface {
ParticipantsCount() int64 ParticipantsCount() int64
// NthNextValidator returns key for next validator. It assumes external validators and leader rotation. // NthNextValidator returns key for next validator. It assumes external validators and leader rotation.
NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper)
NthNextHmy(shardingconfig.Instance, *bls.PublicKeyWrapper, int) (bool, *bls.PublicKeyWrapper) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper)
NthNextHmyExt(shardingconfig.Instance, *bls.PublicKeyWrapper, int) (bool, *bls.PublicKeyWrapper) NthNextHmyExt(shardingconfig.Instance, *bls.PublicKeyWrapper, int) (bool, *bls.PublicKeyWrapper)
FirstParticipant(shardingconfig.Instance) *bls.PublicKeyWrapper FirstParticipant(shardingconfig.Instance) *bls.PublicKeyWrapper
UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper) UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper)

@ -1683,8 +1683,6 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i
if len(chain) == 0 { if len(chain) == 0 {
return 0, nil, nil, ErrEmptyChain return 0, nil, nil, ErrEmptyChain
} }
first := chain[0]
fmt.Println("insertChain", utils.GetPort(), first.ShardID(), first.Epoch().Uint64(), first.NumberU64())
// Do a sanity check that the provided chain is actually ordered and linked // Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ { for i := 1; i < len(chain); i++ {
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {

@ -312,7 +312,15 @@ func ApplyTransaction(bc ChainContext, author *common.Address, gp *GasPool, stat
// Apply the transaction to the current state (included in the env) // Apply the transaction to the current state (included in the env)
result, err := ApplyMessage(vmenv, msg, gp) result, err := ApplyMessage(vmenv, msg, gp)
if err != nil { if err != nil {
return nil, nil, nil, 0, errors.Wrapf(err, "apply failed from='%s' to='%s' balance='%s'", msg.From().Hex(), msg.To().Hex(), statedb.GetBalance(msg.From()).String()) to := ""
if m := msg.To(); m != nil {
to = m.Hex()
}
balance := ""
if a := statedb.GetBalance(msg.From()); a != nil {
balance = a.String()
}
return nil, nil, nil, 0, errors.Wrapf(err, "apply failed from='%s' to='%s' balance='%s'", msg.From().Hex(), to, balance)
} }
// Update the state with pending changes // Update the state with pending changes
var root []byte var root []byte

@ -0,0 +1,17 @@
package utils
import "golang.org/x/exp/constraints"
func Min[T constraints.Ordered](a, b T) T {
if a < b {
return a
}
return b
}
func Max[T constraints.Ordered](a, b T) T {
if a > b {
return a
}
return b
}
Loading…
Cancel
Save