pull/535/head
Rongjian Lan 6 years ago
commit 63763c74ba
  1. 4
      api/proto/discovery/pingpong.go
  2. 6
      api/proto/discovery/pingpong_test.go
  3. 2
      api/service/discovery/discovery_test.go
  4. 2
      api/service/networkinfo/service_test.go
  5. 2
      cmd/client/txgen/main.go
  6. 6
      cmd/harmony/main.go
  7. 26
      consensus/consensus.go
  8. 14
      consensus/consensus_leader.go
  9. 4
      consensus/consensus_leader_msg_test.go
  10. 12
      consensus/consensus_leader_test.go
  11. 10
      consensus/consensus_test.go
  12. 6
      consensus/consensus_validator.go
  13. 24
      consensus/consensus_validator_test.go
  14. 14
      drand/drand.go
  15. 8
      drand/drand_leader.go
  16. 2
      drand/drand_validator.go
  17. 4
      internal/configs/node/config.go
  18. 4
      node/node.go
  19. 10
      node/node_handler.go
  20. 4
      node/node_handler_test.go
  21. 24
      node/node_test.go
  22. 2
      node/staking_test.go
  23. 2
      p2p/p2p.go
  24. 14
      specs/test/testplan.md

@ -54,7 +54,7 @@ func NewPingMessage(peer p2p.Peer) *PingMessageType {
ping.Node.Port = peer.Port
ping.Node.PeerID = peer.PeerID
ping.Node.ValidatorID = peer.ValidatorID
ping.Node.PubKey = peer.BlsPubKey.Serialize()
ping.Node.PubKey = peer.ConsensusPubKey.Serialize()
ping.Node.Role = node.ValidatorRole
return ping
@ -75,7 +75,7 @@ func NewPongMessage(peers []p2p.Peer, pubKeys []*bls.PublicKey, leaderKey *bls.P
n.Port = p.Port
n.ValidatorID = p.ValidatorID
n.PeerID = p.PeerID
n.PubKey = p.BlsPubKey.Serialize()
n.PubKey = p.ConsensusPubKey.Serialize()
if err != nil {
fmt.Printf("Error Marshal PubKey: %v", err)
continue

@ -19,7 +19,7 @@ var (
IP: "127.0.0.1",
Port: "9999",
ValidatorID: -1,
BlsPubKey: pubKey1,
ConsensusPubKey: pubKey1,
}
e1 = "ping:Validator/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]"
e3 = "ping:Client/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]"
@ -30,13 +30,13 @@ var (
{
IP: "127.0.0.1",
Port: "8888",
BlsPubKey: pubKey1,
ConsensusPubKey: pubKey1,
ValidatorID: -1,
},
{
IP: "127.0.0.1",
Port: "9999",
BlsPubKey: pubKey2,
ConsensusPubKey: pubKey2,
ValidatorID: -2,
},
}

@ -25,7 +25,7 @@ func TestDiscoveryService(t *testing.T) {
if peerPriKey == nil || peerPubKey == nil {
t.Fatal("generate key error")
}
selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, BlsPubKey: peerPubKey}
selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, ConsensusPubKey: peerPubKey}
host, err := p2pimpl.NewHost(&selfPeer, nodePriKey)
if err != nil {

@ -18,7 +18,7 @@ func TestService(t *testing.T) {
if peerPriKey == nil || peerPubKey == nil {
t.Fatal("generate key error")
}
selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, BlsPubKey: peerPubKey}
selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, ConsensusPubKey: peerPubKey}
host, err := p2pimpl.NewHost(&selfPeer, nodePriKey)
if err != nil {

@ -83,7 +83,7 @@ func main() {
panic(fmt.Errorf("generate key error"))
}
selfPeer := p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, BlsPubKey: peerPubKey}
selfPeer := p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, ConsensusPubKey: peerPubKey}
// Init with LibP2P enabled, FIXME: (leochen) right now we support only one shard
shardIDLeaderMap = make(map[uint32]p2p.Peer)

@ -127,8 +127,8 @@ func createGlobalConfig() *nodeconfig.ConfigType {
}
// Setup Bls keys
nodeConfig.BlsPriKey, nodeConfig.BlsPubKey = utils.GenKey(*ip, *port)
if nodeConfig.BlsPriKey == nil || nodeConfig.BlsPubKey == nil {
nodeConfig.ConsensusPriKey, nodeConfig.ConsensusPubKey = utils.GenKey(*ip, *port)
if nodeConfig.ConsensusPriKey == nil || nodeConfig.ConsensusPubKey == nil {
panic(fmt.Errorf("generate key error"))
}
@ -142,7 +142,7 @@ func createGlobalConfig() *nodeconfig.ConfigType {
}
}
nodeConfig.SelfPeer = p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, BlsPubKey: nodeConfig.BlsPubKey}
nodeConfig.SelfPeer = p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, ConsensusPubKey: nodeConfig.ConsensusPubKey}
if *isLeader {
nodeConfig.StringRole = "leader"
nodeConfig.Leader = nodeConfig.SelfPeer

@ -192,14 +192,14 @@ func New(host p2p.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Cons
// Initialize cosign bitmap
allPublicKeys := make([]*bls.PublicKey, 0)
for _, validatorPeer := range peers {
allPublicKeys = append(allPublicKeys, validatorPeer.BlsPubKey)
allPublicKeys = append(allPublicKeys, validatorPeer.ConsensusPubKey)
}
allPublicKeys = append(allPublicKeys, leader.BlsPubKey)
allPublicKeys = append(allPublicKeys, leader.ConsensusPubKey)
consensus.PublicKeys = allPublicKeys
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey)
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
consensus.prepareBitmap = prepareBitmap
consensus.commitBitmap = commitBitmap
@ -384,8 +384,8 @@ func (consensus *Consensus) ResetState() {
consensus.prepareSigs = map[uint32]*bls.Sign{}
consensus.commitSigs = map[uint32]*bls.Sign{}
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey)
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
consensus.prepareBitmap = prepareBitmap
consensus.commitBitmap = commitBitmap
@ -421,7 +421,7 @@ func (consensus *Consensus) AddPeers(peers []*p2p.Peer) int {
}
consensus.validators.Store(utils.GetUniqueIDFromPeer(*peer), *peer)
consensus.pubKeyLock.Lock()
consensus.PublicKeys = append(consensus.PublicKeys, peer.BlsPubKey)
consensus.PublicKeys = append(consensus.PublicKeys, peer.ConsensusPubKey)
consensus.pubKeyLock.Unlock()
// utils.GetLogInstance().Debug("[SYNC]", "new peer added", peer)
}
@ -458,7 +458,7 @@ func (consensus *Consensus) RemovePeers(peers []p2p.Peer) int {
for i, pp := range newList {
// Not Found the pubkey, if found pubkey, ignore it
if reflect.DeepEqual(peer.BlsPubKey, pp) {
if reflect.DeepEqual(peer.ConsensusPubKey, pp) {
// consensus.Log.Debug("RemovePeers", "i", i, "pp", pp, "peer.PubKey", peer.PubKey)
newList = append(newList[:i], newList[i+1:]...)
count2++
@ -473,7 +473,7 @@ func (consensus *Consensus) RemovePeers(peers []p2p.Peer) int {
// Or the shard won't be able to reach consensus if public keys are mismatch
validators := consensus.GetValidatorPeers()
pong := proto_discovery.NewPongMessage(validators, consensus.PublicKeys, consensus.leader.BlsPubKey)
pong := proto_discovery.NewPongMessage(validators, consensus.PublicKeys, consensus.leader.ConsensusPubKey)
buffer := pong.ConstructPongMessage()
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), buffer))
@ -497,7 +497,7 @@ func (consensus *Consensus) DebugPrintValidators() {
count := 0
consensus.validators.Range(func(k, v interface{}) bool {
if p, ok := v.(p2p.Peer); ok {
str2 := fmt.Sprintf("%s", p.BlsPubKey.Serialize())
str2 := fmt.Sprintf("%s", p.ConsensusPubKey.Serialize())
utils.GetLogInstance().Debug("validator:", "IP", p.IP, "Port", p.Port, "VID", p.ValidatorID, "Key", str2)
count++
return true
@ -653,11 +653,11 @@ func (consensus *Consensus) signAndMarshalConsensusMessage(message *msg_pb.Messa
// SetLeaderPubKey deserialize the public key of consensus leader
func (consensus *Consensus) SetLeaderPubKey(k []byte) error {
consensus.leader.BlsPubKey = &bls.PublicKey{}
return consensus.leader.BlsPubKey.Deserialize(k)
consensus.leader.ConsensusPubKey = &bls.PublicKey{}
return consensus.leader.ConsensusPubKey.Deserialize(k)
}
// GetLeaderPubKey returns the public key of consensus leader
func (consensus *Consensus) GetLeaderPubKey() *bls.PublicKey {
return consensus.leader.BlsPubKey
return consensus.leader.ConsensusPubKey
}

@ -66,7 +66,7 @@ func (consensus *Consensus) WaitForNewBlock(blockChannel chan *types.Block, stop
pRnd := [32]byte{}
copy(pRnd[:], pRndAndBitmap[:32])
bitmap := pRndAndBitmap[32:]
vrfBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey)
vrfBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
vrfBitmap.SetMask(bitmap)
// TODO: check validity of pRnd
@ -166,7 +166,7 @@ func (consensus *Consensus) processPrepareMessage(message *msg_pb.Message) {
return
}
if err := consensus.checkConsensusMessage(message, validatorPeer.BlsPubKey); err != nil {
if err := consensus.checkConsensusMessage(message, validatorPeer.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID)
return
}
@ -191,14 +191,14 @@ func (consensus *Consensus) processPrepareMessage(message *msg_pb.Message) {
return
}
if !sign.VerifyHash(validatorPeer.BlsPubKey, consensus.blockHash[:]) {
if !sign.VerifyHash(validatorPeer.ConsensusPubKey, consensus.blockHash[:]) {
utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID)
return
}
utils.GetLogInstance().Debug("Received new prepare signature", "numReceivedSoFar", len(prepareSigs), "validatorID", validatorID, "PublicKeys", len(consensus.PublicKeys))
prepareSigs[validatorID] = &sign
prepareBitmap.SetKey(validatorPeer.BlsPubKey, true) // Set the bitmap indicating that this validator signed.
prepareBitmap.SetKey(validatorPeer.ConsensusPubKey, true) // Set the bitmap indicating that this validator signed.
targetState := PreparedDone
if len(prepareSigs) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state < targetState {
@ -237,7 +237,7 @@ func (consensus *Consensus) processCommitMessage(message *msg_pb.Message) {
return
}
if err := consensus.checkConsensusMessage(message, validatorPeer.BlsPubKey); err != nil {
if err := consensus.checkConsensusMessage(message, validatorPeer.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID)
return
}
@ -265,7 +265,7 @@ func (consensus *Consensus) processCommitMessage(message *msg_pb.Message) {
return
}
aggSig := bls_cosi.AggregateSig(consensus.GetPrepareSigsArray())
if !sign.VerifyHash(validatorPeer.BlsPubKey, append(aggSig.Serialize(), consensus.prepareBitmap.Bitmap...)) {
if !sign.VerifyHash(validatorPeer.ConsensusPubKey, append(aggSig.Serialize(), consensus.prepareBitmap.Bitmap...)) {
utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID)
return
}
@ -273,7 +273,7 @@ func (consensus *Consensus) processCommitMessage(message *msg_pb.Message) {
utils.GetLogInstance().Debug("Received new commit message", "numReceivedSoFar", len(commitSigs), "validatorID", strconv.Itoa(int(validatorID)))
commitSigs[validatorID] = &sign
// Set the bitmap indicating that this validator signed.
commitBitmap.SetKey(validatorPeer.BlsPubKey, true)
commitBitmap.SetKey(validatorPeer.ConsensusPubKey, true)
targetState := CommittedDone
if len(commitSigs) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state != targetState {

@ -36,10 +36,10 @@ func TestConstructAnnounceMessage(test *testing.T) {
func TestConstructPreparedMessage(test *testing.T) {
leaderPriKey, leaderPubKey := utils.GenKey("127.0.0.1", "6000")
leader := p2p.Peer{IP: "127.0.0.1", Port: "6000", BlsPubKey: leaderPubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "6000", ConsensusPubKey: leaderPubKey}
validatorPriKey, validatorPubKey := utils.GenKey("127.0.0.1", "5555")
validator := p2p.Peer{IP: "127.0.0.1", Port: "5555", BlsPubKey: validatorPubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "5555", ConsensusPubKey: validatorPubKey}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {

@ -30,7 +30,7 @@ func TestProcessMessageLeaderPrepare(test *testing.T) {
defer ctrl.Finish()
leader := p2p.Peer{IP: ip, Port: "7777"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port)
_, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3)
@ -38,7 +38,7 @@ func TestProcessMessageLeaderPrepare(test *testing.T) {
for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 7788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].BlsPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
_, validators[i].ConsensusPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
}
m := mock_host.NewMockHost(ctrl)
@ -76,7 +76,7 @@ func TestProcessMessageLeaderPrepareInvalidSignature(test *testing.T) {
defer ctrl.Finish()
leader := p2p.Peer{IP: ip, Port: "7777"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port)
_, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3)
@ -84,7 +84,7 @@ func TestProcessMessageLeaderPrepareInvalidSignature(test *testing.T) {
for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 7788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].BlsPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
_, validators[i].ConsensusPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
}
m := mock_host.NewMockHost(ctrl)
@ -130,7 +130,7 @@ func TestProcessMessageLeaderCommit(test *testing.T) {
defer ctrl.Finish()
leader := p2p.Peer{IP: ip, Port: "8889"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port)
_, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3)
@ -138,7 +138,7 @@ func TestProcessMessageLeaderCommit(test *testing.T) {
for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 8788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].BlsPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
_, validators[i].ConsensusPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
}
m := mock_host.NewMockHost(ctrl)

@ -39,16 +39,16 @@ func TestRemovePeers(t *testing.T) {
_, pk4 := utils.GenKey("4", "4")
_, pk5 := utils.GenKey("5", "5")
p1 := p2p.Peer{IP: "127.0.0.1", Port: "19901", BlsPubKey: pk1}
p2 := p2p.Peer{IP: "127.0.0.1", Port: "19902", BlsPubKey: pk2}
p3 := p2p.Peer{IP: "127.0.0.1", Port: "19903", BlsPubKey: pk3}
p4 := p2p.Peer{IP: "127.0.0.1", Port: "19904", BlsPubKey: pk4}
p1 := p2p.Peer{IP: "127.0.0.1", Port: "19901", ConsensusPubKey: pk1}
p2 := p2p.Peer{IP: "127.0.0.1", Port: "19902", ConsensusPubKey: pk2}
p3 := p2p.Peer{IP: "127.0.0.1", Port: "19903", ConsensusPubKey: pk3}
p4 := p2p.Peer{IP: "127.0.0.1", Port: "19904", ConsensusPubKey: pk4}
peers := []p2p.Peer{p1, p2, p3, p4}
peerRemove := []p2p.Peer{p1, p2}
leader := p2p.Peer{IP: "127.0.0.1", Port: "9000", BlsPubKey: pk5}
leader := p2p.Peer{IP: "127.0.0.1", Port: "9000", ConsensusPubKey: pk5}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {

@ -91,7 +91,7 @@ func (consensus *Consensus) processAnnounceMessage(message *msg_pb.Message) {
copy(consensus.blockHash[:], blockHash[:])
consensus.block = block
if err := consensus.checkConsensusMessage(message, consensus.leader.BlsPubKey); err != nil {
if err := consensus.checkConsensusMessage(message, consensus.leader.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("Failed to check the leader message")
if err == consensus_engine.ErrConsensusIDNotMatch {
utils.GetLogInstance().Debug("sending bft block to state syncing")
@ -152,7 +152,7 @@ func (consensus *Consensus) processPreparedMessage(message *msg_pb.Message) {
// Update readyByConsensus for attack.
attack.GetInstance().UpdateConsensusReady(consensusID)
if err := consensus.checkConsensusMessage(message, consensus.leader.BlsPubKey); err != nil {
if err := consensus.checkConsensusMessage(message, consensus.leader.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("processPreparedMessage error", "error", err)
return
}
@ -213,7 +213,7 @@ func (consensus *Consensus) processCommittedMessage(message *msg_pb.Message) {
// Update readyByConsensus for attack.
attack.GetInstance().UpdateConsensusReady(consensusID)
if err := consensus.checkConsensusMessage(message, consensus.leader.BlsPubKey); err != nil {
if err := consensus.checkConsensusMessage(message, consensus.leader.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("processCommittedMessage error", "error", err)
return
}

@ -53,14 +53,14 @@ func TestProcessMessageValidatorAnnounce(test *testing.T) {
defer ctrl.Finish()
leader := p2p.Peer{IP: "127.0.0.1", Port: "9982"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port)
_, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "9984", ValidatorID: 1}
_, validator1.BlsPubKey = utils.GenKey(validator1.IP, validator1.Port)
_, validator1.ConsensusPubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "9986", ValidatorID: 2}
_, validator2.BlsPubKey = utils.GenKey(validator2.IP, validator2.Port)
_, validator2.ConsensusPubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "9988", ValidatorID: 3}
_, validator3.BlsPubKey = utils.GenKey(validator3.IP, validator3.Port)
_, validator3.ConsensusPubKey = utils.GenKey(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
@ -107,14 +107,14 @@ func TestProcessMessageValidatorPrepared(test *testing.T) {
defer ctrl.Finish()
leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port)
_, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1}
_, validator1.BlsPubKey = utils.GenKey(validator1.IP, validator1.Port)
_, validator1.ConsensusPubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2}
_, validator2.BlsPubKey = utils.GenKey(validator2.IP, validator2.Port)
_, validator2.ConsensusPubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3}
_, validator3.BlsPubKey = utils.GenKey(validator3.IP, validator3.Port)
_, validator3.ConsensusPubKey = utils.GenKey(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
@ -175,14 +175,14 @@ func TestProcessMessageValidatorCommitted(test *testing.T) {
defer ctrl.Finish()
leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port)
_, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1}
_, validator1.BlsPubKey = utils.GenKey(validator1.IP, validator1.Port)
_, validator1.ConsensusPubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2}
_, validator2.BlsPubKey = utils.GenKey(validator2.IP, validator2.Port)
_, validator2.ConsensusPubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3}
_, validator3.BlsPubKey = utils.GenKey(validator3.IP, validator3.Port)
_, validator3.ConsensusPubKey = utils.GenKey(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.

@ -91,13 +91,13 @@ func New(host p2p.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer, confi
// Initialize cosign bitmap
allPublicKeys := make([]*bls.PublicKey, 0)
for _, validatorPeer := range peers {
allPublicKeys = append(allPublicKeys, validatorPeer.BlsPubKey)
allPublicKeys = append(allPublicKeys, validatorPeer.ConsensusPubKey)
}
allPublicKeys = append(allPublicKeys, leader.BlsPubKey)
allPublicKeys = append(allPublicKeys, leader.ConsensusPubKey)
dRand.PublicKeys = allPublicKeys
bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.BlsPubKey)
bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.ConsensusPubKey)
dRand.bitmap = bitmap
dRand.pRand = nil
@ -139,7 +139,7 @@ func (dRand *DRand) AddPeers(peers []*p2p.Peer) int {
if !ok {
dRand.validators.Store(utils.GetUniqueIDFromPeer(*peer), *peer)
dRand.pubKeyLock.Lock()
dRand.PublicKeys = append(dRand.PublicKeys, peer.BlsPubKey)
dRand.PublicKeys = append(dRand.PublicKeys, peer.ConsensusPubKey)
dRand.pubKeyLock.Unlock()
utils.GetLogInstance().Debug("[DRAND]", "AddPeers", *peer)
}
@ -238,7 +238,7 @@ func (dRand *DRand) getValidatorPeerByID(validatorID uint32) *p2p.Peer {
func (dRand *DRand) ResetState() {
dRand.vrfs = &map[uint32][]byte{}
bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.BlsPubKey)
bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.ConsensusPubKey)
dRand.bitmap = bitmap
dRand.pRand = nil
dRand.rand = nil
@ -246,8 +246,8 @@ func (dRand *DRand) ResetState() {
// SetLeaderPubKey deserialize the public key of drand leader
func (dRand *DRand) SetLeaderPubKey(k []byte) error {
dRand.leader.BlsPubKey = &bls.PublicKey{}
return dRand.leader.BlsPubKey.Deserialize(k)
dRand.leader.ConsensusPubKey = &bls.PublicKey{}
return dRand.leader.ConsensusPubKey.Deserialize(k)
}
// UpdatePublicKeys updates the PublicKeys variable, protected by a mutex

@ -74,7 +74,7 @@ func (dRand *DRand) init(epochBlock *types.Block) {
(*dRand.vrfs)[dRand.nodeID] = append(rand[:], proof...)
utils.GetLogInstance().Info("[DRG] sent init", "msg", msgToSend, "leader.PubKey", dRand.leader.BlsPubKey)
utils.GetLogInstance().Info("[DRG] sent init", "msg", msgToSend, "leader.PubKey", dRand.leader.ConsensusPubKey)
dRand.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend))
}
@ -114,9 +114,9 @@ func (dRand *DRand) processCommitMessage(message drand_proto.Message) {
}
// Verify message signature
err := verifyMessageSig(validatorPeer.BlsPubKey, message)
err := verifyMessageSig(validatorPeer.ConsensusPubKey, message)
if err != nil {
utils.GetLogInstance().Warn("[DRAND] failed to verify the message signature", "Error", err, "PubKey", validatorPeer.BlsPubKey)
utils.GetLogInstance().Warn("[DRAND] failed to verify the message signature", "Error", err, "PubKey", validatorPeer.ConsensusPubKey)
return
}
@ -136,7 +136,7 @@ func (dRand *DRand) processCommitMessage(message drand_proto.Message) {
utils.GetLogInstance().Debug("Received new VRF commit", "numReceivedSoFar", len((*vrfs)), "validatorID", validatorID, "PublicKeys", len(dRand.PublicKeys))
(*vrfs)[validatorID] = message.Payload
dRand.bitmap.SetKey(validatorPeer.BlsPubKey, true) // Set the bitmap indicating that this validator signed.
dRand.bitmap.SetKey(validatorPeer.ConsensusPubKey, true) // Set the bitmap indicating that this validator signed.
if len((*vrfs)) >= ((len(dRand.PublicKeys))/3 + 1) {
// Construct pRand and initiate consensus on it

@ -37,7 +37,7 @@ func (dRand *DRand) processInitMessage(message drand_proto.Message) {
blockHash := message.BlockHash
// Verify message signature
err := verifyMessageSig(dRand.leader.BlsPubKey, message)
err := verifyMessageSig(dRand.leader.ConsensusPubKey, message)
if err != nil {
utils.GetLogInstance().Warn("[DRG] Failed to verify the message signature", "Error", err)
return

@ -71,8 +71,8 @@ type ConfigType struct {
Host p2p.Host
StakingPriKey *ecdsa.PrivateKey
P2pPriKey p2p_crypto.PrivKey
BlsPriKey *bls.SecretKey
BlsPubKey *bls.PublicKey
ConsensusPriKey *bls.SecretKey
ConsensusPubKey *bls.PublicKey
MainDB *ethdb.LDBDatabase
BeaconDB *ethdb.LDBDatabase

@ -244,7 +244,7 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, db ethdb.Database) *N
node.BeaconBlockChannel = make(chan *types.Block)
node.TxPool = core.NewTxPool(core.DefaultTxPoolConfig, params.TestChainConfig, chain)
node.Worker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.BlsPubKey), node.Consensus.ShardID)
node.Worker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.ConsensusPubKey), node.Consensus.ShardID)
utils.GetLogInstance().Debug("Created Genesis Block", "blockHash", chain.GetBlockByNumber(0).Hash().Hex())
node.Consensus.ConsensusBlock = make(chan *consensus.BFTBlockInfo)
@ -470,5 +470,5 @@ func (node *Node) AddBeaconChainDatabase(db ethdb.Database) {
os.Exit(1)
}
node.beaconChain = chain
node.BeaconWorker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.BlsPubKey), node.Consensus.ShardID)
node.BeaconWorker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.ConsensusPubKey), node.Consensus.ShardID)
}

@ -268,7 +268,7 @@ func (node *Node) BroadcastNewBlock(newBlock *types.Block) {
// VerifyNewBlock is called by consensus participants to verify the block (account model) they are running consensus on
func (node *Node) VerifyNewBlock(newBlock *types.Block) bool {
err := node.blockchain.ValidateNewBlock(newBlock, pki.GetAddressFromPublicKey(node.SelfPeer.BlsPubKey))
err := node.blockchain.ValidateNewBlock(newBlock, pki.GetAddressFromPublicKey(node.SelfPeer.ConsensusPubKey))
if err != nil {
utils.GetLogInstance().Debug("Failed verifying new block", "Error", err, "tx", newBlock.Transactions()[0])
return false
@ -343,8 +343,8 @@ func (node *Node) pingMessageHandler(msgPayload []byte, sender string) int {
peer.PeerID = ping.Node.PeerID
peer.ValidatorID = ping.Node.ValidatorID
peer.BlsPubKey = &bls.PublicKey{}
err = peer.BlsPubKey.Deserialize(ping.Node.PubKey[:])
peer.ConsensusPubKey = &bls.PublicKey{}
err = peer.ConsensusPubKey.Deserialize(ping.Node.PubKey[:])
if err != nil {
utils.GetLogInstance().Error("UnmarshalBinary Failed", "error", err)
return -1
@ -450,8 +450,8 @@ func (node *Node) pongMessageHandler(msgPayload []byte) int {
peer.ValidatorID = p.ValidatorID
peer.PeerID = p.PeerID
peer.BlsPubKey = &bls.PublicKey{}
err = peer.BlsPubKey.Deserialize(p.PubKey[:])
peer.ConsensusPubKey = &bls.PublicKey{}
err = peer.ConsensusPubKey.Deserialize(p.PubKey[:])
if err != nil {
utils.GetLogInstance().Error("UnmarshalBinary Failed", "error", err)
continue

@ -11,7 +11,7 @@ import (
func TestAddNewBlock(t *testing.T) {
_, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
@ -34,7 +34,7 @@ func TestAddNewBlock(t *testing.T) {
func TestVerifyNewBlock(t *testing.T) {
_, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)

@ -19,7 +19,7 @@ import (
func TestNewNode(t *testing.T) {
_, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
@ -43,7 +43,7 @@ func TestNewNode(t *testing.T) {
func TestGetSyncingPeers(t *testing.T) {
_, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
@ -75,18 +75,18 @@ func TestAddPeers(t *testing.T) {
&p2p.Peer{
IP: "127.0.0.1",
Port: "8888",
BlsPubKey: pubKey1,
ConsensusPubKey: pubKey1,
ValidatorID: 1,
},
&p2p.Peer{
IP: "127.0.0.1",
Port: "9999",
BlsPubKey: pubKey2,
ConsensusPubKey: pubKey2,
ValidatorID: 2,
},
}
_, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8985"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
@ -118,20 +118,20 @@ func TestAddBeaconPeer(t *testing.T) {
&p2p.Peer{
IP: "127.0.0.1",
Port: "8888",
BlsPubKey: pubKey1,
ConsensusPubKey: pubKey1,
ValidatorID: 1,
PeerID: "1234",
},
&p2p.Peer{
IP: "127.0.0.1",
Port: "9999",
BlsPubKey: pubKey2,
ConsensusPubKey: pubKey2,
ValidatorID: 2,
PeerID: "4567",
},
}
_, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8985"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
@ -163,7 +163,7 @@ func sendPingMessage(node *Node, leader p2p.Peer) {
p1 := p2p.Peer{
IP: "127.0.0.1",
Port: "9999",
BlsPubKey: pubKey1,
ConsensusPubKey: pubKey1,
}
ping1 := proto_discovery.NewPingMessage(p1)
@ -176,12 +176,12 @@ func sendPongMessage(node *Node, leader p2p.Peer) {
p1 := p2p.Peer{
IP: "127.0.0.1",
Port: "9998",
BlsPubKey: pubKey1,
ConsensusPubKey: pubKey1,
}
p2 := p2p.Peer{
IP: "127.0.0.1",
Port: "9999",
BlsPubKey: pubKey2,
ConsensusPubKey: pubKey2,
}
pubKeys := []*bls.PublicKey{pubKey1, pubKey2}
@ -200,7 +200,7 @@ func exitServer() {
func TestPingPongHandler(t *testing.T) {
_, pubKey := utils.GenKey("127.0.0.1", "8881")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8881", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "8881", ConsensusPubKey: pubKey}
// validator := p2p.Peer{IP: "127.0.0.1", Port: "9991"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)

@ -21,7 +21,7 @@ var (
func TestUpdateStakingList(t *testing.T) {
_, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", BlsPubKey: pubKey}
leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)

@ -16,7 +16,7 @@ type StreamHandler func(Stream)
type Peer struct {
IP string // IP address of the peer
Port string // Port number of the peer
BlsPubKey *bls.PublicKey // Public key of the peer, used for consensus signing
ConsensusPubKey *bls.PublicKey // Public key of the peer, used for consensus signing
ValidatorID int // -1 is the default value, means not assigned any validator ID in the shard
Addrs []ma.Multiaddr // MultiAddress of the peer
PeerID libp2p_peer.ID // PeerID, the pubkey for communication

@ -57,9 +57,9 @@ It should cover the basic function to pass, to fail, and error conditions.
### state syncing
* test case # : SS1
* description : beacon chain node ss
* test procedure : one new beacon node join in the beacon chain after beacon chain reach a few consensuses
* passing criteria : the new node can join in the consensus after state syncing
* description : node state sync basic
* test procedure : node joins network and is able to sync to latest block
* passing criteria : blockheight of node is equal to that of the shards blockchain and node has joined consensus.
* dependency
* note
* automated? N
@ -72,6 +72,14 @@ It should cover the basic function to pass, to fail, and error conditions.
* note
* automated? N
---
* test case # : SS1
* description : beacon chain node ss
* test procedure : one new beacon node join in the beacon chain after beacon chain reach a few consensuses
* passing criteria : the new node can join in the consensus after state syncing
* dependency
* note
* automated? N
---
### consensus

Loading…
Cancel
Save