Cleanup and fix update pub keys.

pull/4377/head
frozen 2 years ago committed by Casey Gardiner
parent 2706ea0478
commit 05b423c3c4
  1. 10
      cmd/harmony/main.go
  2. 2
      consensus/consensus.go
  3. 4
      consensus/consensus_service.go
  4. 1
      consensus/consensus_v2.go
  5. 11
      consensus/leader.go
  6. 11
      consensus/view_change.go

@ -677,6 +677,16 @@ func createGlobalConfig(hc harmonyconfig.HarmonyConfig) (*nodeconfig.ConfigType,
}
func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType, registry *registry.Registry) *node.Node {
// Consensus object.
// TODO: consensus object shouldn't start here
decider := quorum.NewDecider(quorum.SuperMajorityVote, uint32(hc.General.ShardID))
currentConsensus, err := consensus.New(
myHost, nodeConfig.ShardID, p2p.Peer{}, nodeConfig.ConsensusPriKey, decider)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error :%v \n", err)
os.Exit(1)
}
// Parse minPeers from harmonyconfig.HarmonyConfig
var minPeers int
var aggregateSig bool

@ -230,7 +230,7 @@ func (consensus *Consensus) BlockNum() uint64 {
// New create a new Consensus record
func New(
host p2p.Host, shard uint32, multiBLSPriKey multibls.PrivateKeys,
host p2p.Host, shard uint32, leader p2p.Peer, multiBLSPriKey multibls.PrivateKeys,
registry *registry.Registry,
Decider quorum.Decider, minPeers int, aggregateSig bool,
) (*Consensus, error) {

@ -74,6 +74,10 @@ func (consensus *Consensus) signAndMarshalConsensusMessage(message *msg_pb.Messa
// UpdatePublicKeys updates the PublicKeys for
// quorum on current subcommittee, protected by a mutex
func (consensus *Consensus) UpdatePublicKeys(pubKeys, allowlist []bls_cosi.PublicKeyWrapper) int64 {
if utils.GetPort() == 9000 {
//utils.Logger().Info().Msg("UpdatePublicKeys")
fmt.Println("UpdatePublicKeys", len(pubKeys), len(allowlist))
}
// TODO: use mutex for updating public keys pointer. No need to lock on all these logic.
consensus.pubKeyLock.Lock()
consensus.Decider.UpdateParticipants(pubKeys, allowlist)

@ -650,7 +650,6 @@ func (consensus *Consensus) tryCatchup() error {
consensus.getLogger().Error().Err(err).Msg("[TryCatchup] Failed to add block to chain")
return err
}
//fmt.Println("tryCatchup ", utils.GetPort(), blk.NumberU64())
select {
// TODO: Remove this when removing dns sync and stream sync is fully up
case consensus.VerifiedNewBlock <- blk:

@ -200,16 +200,12 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) {
func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) {
// TODO HERE
//if recvMsg.ViewID == 10 {
// return
//}
if recvMsg.ViewID == 21 {
return
}
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
//// Read - Start
if consensus.ShardID == 0 {
//fmt.Println("onCommit ", recvMsg.BlockNum)
}
if !consensus.isRightBlockNumAndViewID(recvMsg) {
return
}
@ -341,5 +337,4 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) {
consensus.msgSender.StopRetry(msg_pb.MessageType_PREPARED)
}
//fmt.Println("onCommit99: ", utils.GetPort(), recvMsg.BlockNum)
}

@ -1,7 +1,6 @@
package consensus
import (
"fmt"
"math/big"
"sync"
"time"
@ -162,7 +161,6 @@ func (consensus *Consensus) getNextViewID() (uint64, time.Duration) {
Uint64("stuckBlockViewID", stuckBlockViewID).
Msg("[getNextViewID]")
fmt.Println("end getNextViewID: ", nextViewID, viewChangeDuration)
// duration is always the fixed view change duration for synchronous view change
return nextViewID, viewChangeDuration
}
@ -235,7 +233,6 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64) *bls.PublicKeyWrappe
lastLeaderPubKey,
gap)
}
fmt.Println("wasfoundNext", consensus.Blockchain.Config().IsAllowlistEpoch(epoch), wasFound, next.Bytes.Hex(), lastLeaderPubKey.Bytes.Hex())
if !wasFound {
consensus.getLogger().Warn().
Str("key", consensus.LeaderPubKey.Bytes.Hex()).
@ -257,7 +254,6 @@ func createTimeout() map[TimeoutType]*utils.Timeout {
// startViewChange start the view change process
func (consensus *Consensus) startViewChange() {
fmt.Printf("Message to send leader111: %d %s \n", utils.GetPort(), consensus.LeaderPubKey.Bytes.Hex())
if consensus.disableViewChange || consensus.IsBackup() {
return
}
@ -268,7 +264,6 @@ func (consensus *Consensus) startViewChange() {
consensus.consensusTimeout[timeoutBootstrap].Stop()
consensus.current.SetMode(ViewChanging)
nextViewID, duration := consensus.getNextViewID()
//fmt.Println("startViewChange", nextViewID)
consensus.SetViewChangingID(nextViewID)
// TODO: set the Leader PubKey to the next leader for view change
// this is dangerous as the leader change is not succeeded yet
@ -312,9 +307,7 @@ func (consensus *Consensus) startViewChange() {
if !consensus.IsValidatorInCommittee(key.Pub.Bytes) {
continue
}
// Тут уже другой leader
msgToSend := consensus.constructViewChangeMessage(&key)
fmt.Println("Message to send leader222: ", consensus.LeaderPubKey.Bytes.Hex())
if err := consensus.msgSender.SendWithRetry(
consensus.BlockNum(),
msg_pb.MessageType_VIEWCHANGE,
@ -372,7 +365,6 @@ func (consensus *Consensus) startNewView(viewID uint64, newLeaderPriKey *bls.Pri
if reset {
consensus.ResetState()
}
fmt.Println("[startNewView]", newLeaderPriKey.Pub.Bytes.Hex())
consensus.SetLeaderPubKey(newLeaderPriKey.Pub)
return nil
@ -490,8 +482,6 @@ func (consensus *Consensus) onNewView(recvMsg *FBFTMessage) {
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
fmt.Printf("[onNewView] received new view message from %+v\n", recvMsg)
consensus.getLogger().Info().
Uint64("viewID", recvMsg.ViewID).
Uint64("blockNum", recvMsg.BlockNum).
@ -580,7 +570,6 @@ func (consensus *Consensus) onNewView(recvMsg *FBFTMessage) {
// newView message verified success, override my state
consensus.SetViewIDs(recvMsg.ViewID)
consensus.pubKeyLock.Lock()
fmt.Println("[onNewView1221] new leader key cur:", consensus.LeaderPubKey.Bytes.Hex(), " new: ", senderKey.Bytes.Hex())
consensus.LeaderPubKey = senderKey
consensus.pubKeyLock.Unlock()
consensus.ResetViewChangeState()

Loading…
Cancel
Save