pull/535/head
Rongjian Lan 6 years ago
commit 63763c74ba
  1. 4
      api/proto/discovery/pingpong.go
  2. 6
      api/proto/discovery/pingpong_test.go
  3. 2
      api/service/discovery/discovery_test.go
  4. 2
      api/service/networkinfo/service_test.go
  5. 2
      cmd/client/txgen/main.go
  6. 6
      cmd/harmony/main.go
  7. 26
      consensus/consensus.go
  8. 14
      consensus/consensus_leader.go
  9. 4
      consensus/consensus_leader_msg_test.go
  10. 12
      consensus/consensus_leader_test.go
  11. 10
      consensus/consensus_test.go
  12. 6
      consensus/consensus_validator.go
  13. 24
      consensus/consensus_validator_test.go
  14. 14
      drand/drand.go
  15. 8
      drand/drand_leader.go
  16. 2
      drand/drand_validator.go
  17. 4
      internal/configs/node/config.go
  18. 4
      node/node.go
  19. 10
      node/node_handler.go
  20. 4
      node/node_handler_test.go
  21. 24
      node/node_test.go
  22. 2
      node/staking_test.go
  23. 2
      p2p/p2p.go
  24. 14
      specs/test/testplan.md

@ -54,7 +54,7 @@ func NewPingMessage(peer p2p.Peer) *PingMessageType {
ping.Node.Port = peer.Port ping.Node.Port = peer.Port
ping.Node.PeerID = peer.PeerID ping.Node.PeerID = peer.PeerID
ping.Node.ValidatorID = peer.ValidatorID ping.Node.ValidatorID = peer.ValidatorID
ping.Node.PubKey = peer.BlsPubKey.Serialize() ping.Node.PubKey = peer.ConsensusPubKey.Serialize()
ping.Node.Role = node.ValidatorRole ping.Node.Role = node.ValidatorRole
return ping return ping
@ -75,7 +75,7 @@ func NewPongMessage(peers []p2p.Peer, pubKeys []*bls.PublicKey, leaderKey *bls.P
n.Port = p.Port n.Port = p.Port
n.ValidatorID = p.ValidatorID n.ValidatorID = p.ValidatorID
n.PeerID = p.PeerID n.PeerID = p.PeerID
n.PubKey = p.BlsPubKey.Serialize() n.PubKey = p.ConsensusPubKey.Serialize()
if err != nil { if err != nil {
fmt.Printf("Error Marshal PubKey: %v", err) fmt.Printf("Error Marshal PubKey: %v", err)
continue continue

@ -19,7 +19,7 @@ var (
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "9999", Port: "9999",
ValidatorID: -1, ValidatorID: -1,
BlsPubKey: pubKey1, ConsensusPubKey: pubKey1,
} }
e1 = "ping:Validator/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]" e1 = "ping:Validator/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]"
e3 = "ping:Client/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]" e3 = "ping:Client/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]"
@ -30,13 +30,13 @@ var (
{ {
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "8888", Port: "8888",
BlsPubKey: pubKey1, ConsensusPubKey: pubKey1,
ValidatorID: -1, ValidatorID: -1,
}, },
{ {
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "9999", Port: "9999",
BlsPubKey: pubKey2, ConsensusPubKey: pubKey2,
ValidatorID: -2, ValidatorID: -2,
}, },
} }

@ -25,7 +25,7 @@ func TestDiscoveryService(t *testing.T) {
if peerPriKey == nil || peerPubKey == nil { if peerPriKey == nil || peerPubKey == nil {
t.Fatal("generate key error") t.Fatal("generate key error")
} }
selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, BlsPubKey: peerPubKey} selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, ConsensusPubKey: peerPubKey}
host, err := p2pimpl.NewHost(&selfPeer, nodePriKey) host, err := p2pimpl.NewHost(&selfPeer, nodePriKey)
if err != nil { if err != nil {

@ -18,7 +18,7 @@ func TestService(t *testing.T) {
if peerPriKey == nil || peerPubKey == nil { if peerPriKey == nil || peerPubKey == nil {
t.Fatal("generate key error") t.Fatal("generate key error")
} }
selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, BlsPubKey: peerPubKey} selfPeer := p2p.Peer{IP: "127.0.0.1", Port: "12345", ValidatorID: -1, ConsensusPubKey: peerPubKey}
host, err := p2pimpl.NewHost(&selfPeer, nodePriKey) host, err := p2pimpl.NewHost(&selfPeer, nodePriKey)
if err != nil { if err != nil {

@ -83,7 +83,7 @@ func main() {
panic(fmt.Errorf("generate key error")) panic(fmt.Errorf("generate key error"))
} }
selfPeer := p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, BlsPubKey: peerPubKey} selfPeer := p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, ConsensusPubKey: peerPubKey}
// Init with LibP2P enabled, FIXME: (leochen) right now we support only one shard // Init with LibP2P enabled, FIXME: (leochen) right now we support only one shard
shardIDLeaderMap = make(map[uint32]p2p.Peer) shardIDLeaderMap = make(map[uint32]p2p.Peer)

@ -127,8 +127,8 @@ func createGlobalConfig() *nodeconfig.ConfigType {
} }
// Setup Bls keys // Setup Bls keys
nodeConfig.BlsPriKey, nodeConfig.BlsPubKey = utils.GenKey(*ip, *port) nodeConfig.ConsensusPriKey, nodeConfig.ConsensusPubKey = utils.GenKey(*ip, *port)
if nodeConfig.BlsPriKey == nil || nodeConfig.BlsPubKey == nil { if nodeConfig.ConsensusPriKey == nil || nodeConfig.ConsensusPubKey == nil {
panic(fmt.Errorf("generate key error")) panic(fmt.Errorf("generate key error"))
} }
@ -142,7 +142,7 @@ func createGlobalConfig() *nodeconfig.ConfigType {
} }
} }
nodeConfig.SelfPeer = p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, BlsPubKey: nodeConfig.BlsPubKey} nodeConfig.SelfPeer = p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, ConsensusPubKey: nodeConfig.ConsensusPubKey}
if *isLeader { if *isLeader {
nodeConfig.StringRole = "leader" nodeConfig.StringRole = "leader"
nodeConfig.Leader = nodeConfig.SelfPeer nodeConfig.Leader = nodeConfig.SelfPeer

@ -192,14 +192,14 @@ func New(host p2p.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Cons
// Initialize cosign bitmap // Initialize cosign bitmap
allPublicKeys := make([]*bls.PublicKey, 0) allPublicKeys := make([]*bls.PublicKey, 0)
for _, validatorPeer := range peers { for _, validatorPeer := range peers {
allPublicKeys = append(allPublicKeys, validatorPeer.BlsPubKey) allPublicKeys = append(allPublicKeys, validatorPeer.ConsensusPubKey)
} }
allPublicKeys = append(allPublicKeys, leader.BlsPubKey) allPublicKeys = append(allPublicKeys, leader.ConsensusPubKey)
consensus.PublicKeys = allPublicKeys consensus.PublicKeys = allPublicKeys
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey) prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey) commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
consensus.prepareBitmap = prepareBitmap consensus.prepareBitmap = prepareBitmap
consensus.commitBitmap = commitBitmap consensus.commitBitmap = commitBitmap
@ -384,8 +384,8 @@ func (consensus *Consensus) ResetState() {
consensus.prepareSigs = map[uint32]*bls.Sign{} consensus.prepareSigs = map[uint32]*bls.Sign{}
consensus.commitSigs = map[uint32]*bls.Sign{} consensus.commitSigs = map[uint32]*bls.Sign{}
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey) prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey) commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
consensus.prepareBitmap = prepareBitmap consensus.prepareBitmap = prepareBitmap
consensus.commitBitmap = commitBitmap consensus.commitBitmap = commitBitmap
@ -421,7 +421,7 @@ func (consensus *Consensus) AddPeers(peers []*p2p.Peer) int {
} }
consensus.validators.Store(utils.GetUniqueIDFromPeer(*peer), *peer) consensus.validators.Store(utils.GetUniqueIDFromPeer(*peer), *peer)
consensus.pubKeyLock.Lock() consensus.pubKeyLock.Lock()
consensus.PublicKeys = append(consensus.PublicKeys, peer.BlsPubKey) consensus.PublicKeys = append(consensus.PublicKeys, peer.ConsensusPubKey)
consensus.pubKeyLock.Unlock() consensus.pubKeyLock.Unlock()
// utils.GetLogInstance().Debug("[SYNC]", "new peer added", peer) // utils.GetLogInstance().Debug("[SYNC]", "new peer added", peer)
} }
@ -458,7 +458,7 @@ func (consensus *Consensus) RemovePeers(peers []p2p.Peer) int {
for i, pp := range newList { for i, pp := range newList {
// Not Found the pubkey, if found pubkey, ignore it // Not Found the pubkey, if found pubkey, ignore it
if reflect.DeepEqual(peer.BlsPubKey, pp) { if reflect.DeepEqual(peer.ConsensusPubKey, pp) {
// consensus.Log.Debug("RemovePeers", "i", i, "pp", pp, "peer.PubKey", peer.PubKey) // consensus.Log.Debug("RemovePeers", "i", i, "pp", pp, "peer.PubKey", peer.PubKey)
newList = append(newList[:i], newList[i+1:]...) newList = append(newList[:i], newList[i+1:]...)
count2++ count2++
@ -473,7 +473,7 @@ func (consensus *Consensus) RemovePeers(peers []p2p.Peer) int {
// Or the shard won't be able to reach consensus if public keys are mismatch // Or the shard won't be able to reach consensus if public keys are mismatch
validators := consensus.GetValidatorPeers() validators := consensus.GetValidatorPeers()
pong := proto_discovery.NewPongMessage(validators, consensus.PublicKeys, consensus.leader.BlsPubKey) pong := proto_discovery.NewPongMessage(validators, consensus.PublicKeys, consensus.leader.ConsensusPubKey)
buffer := pong.ConstructPongMessage() buffer := pong.ConstructPongMessage()
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), buffer)) consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), buffer))
@ -497,7 +497,7 @@ func (consensus *Consensus) DebugPrintValidators() {
count := 0 count := 0
consensus.validators.Range(func(k, v interface{}) bool { consensus.validators.Range(func(k, v interface{}) bool {
if p, ok := v.(p2p.Peer); ok { if p, ok := v.(p2p.Peer); ok {
str2 := fmt.Sprintf("%s", p.BlsPubKey.Serialize()) str2 := fmt.Sprintf("%s", p.ConsensusPubKey.Serialize())
utils.GetLogInstance().Debug("validator:", "IP", p.IP, "Port", p.Port, "VID", p.ValidatorID, "Key", str2) utils.GetLogInstance().Debug("validator:", "IP", p.IP, "Port", p.Port, "VID", p.ValidatorID, "Key", str2)
count++ count++
return true return true
@ -653,11 +653,11 @@ func (consensus *Consensus) signAndMarshalConsensusMessage(message *msg_pb.Messa
// SetLeaderPubKey deserialize the public key of consensus leader // SetLeaderPubKey deserialize the public key of consensus leader
func (consensus *Consensus) SetLeaderPubKey(k []byte) error { func (consensus *Consensus) SetLeaderPubKey(k []byte) error {
consensus.leader.BlsPubKey = &bls.PublicKey{} consensus.leader.ConsensusPubKey = &bls.PublicKey{}
return consensus.leader.BlsPubKey.Deserialize(k) return consensus.leader.ConsensusPubKey.Deserialize(k)
} }
// GetLeaderPubKey returns the public key of consensus leader // GetLeaderPubKey returns the public key of consensus leader
func (consensus *Consensus) GetLeaderPubKey() *bls.PublicKey { func (consensus *Consensus) GetLeaderPubKey() *bls.PublicKey {
return consensus.leader.BlsPubKey return consensus.leader.ConsensusPubKey
} }

@ -66,7 +66,7 @@ func (consensus *Consensus) WaitForNewBlock(blockChannel chan *types.Block, stop
pRnd := [32]byte{} pRnd := [32]byte{}
copy(pRnd[:], pRndAndBitmap[:32]) copy(pRnd[:], pRndAndBitmap[:32])
bitmap := pRndAndBitmap[32:] bitmap := pRndAndBitmap[32:]
vrfBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.BlsPubKey) vrfBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.ConsensusPubKey)
vrfBitmap.SetMask(bitmap) vrfBitmap.SetMask(bitmap)
// TODO: check validity of pRnd // TODO: check validity of pRnd
@ -166,7 +166,7 @@ func (consensus *Consensus) processPrepareMessage(message *msg_pb.Message) {
return return
} }
if err := consensus.checkConsensusMessage(message, validatorPeer.BlsPubKey); err != nil { if err := consensus.checkConsensusMessage(message, validatorPeer.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID) utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID)
return return
} }
@ -191,14 +191,14 @@ func (consensus *Consensus) processPrepareMessage(message *msg_pb.Message) {
return return
} }
if !sign.VerifyHash(validatorPeer.BlsPubKey, consensus.blockHash[:]) { if !sign.VerifyHash(validatorPeer.ConsensusPubKey, consensus.blockHash[:]) {
utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID) utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID)
return return
} }
utils.GetLogInstance().Debug("Received new prepare signature", "numReceivedSoFar", len(prepareSigs), "validatorID", validatorID, "PublicKeys", len(consensus.PublicKeys)) utils.GetLogInstance().Debug("Received new prepare signature", "numReceivedSoFar", len(prepareSigs), "validatorID", validatorID, "PublicKeys", len(consensus.PublicKeys))
prepareSigs[validatorID] = &sign prepareSigs[validatorID] = &sign
prepareBitmap.SetKey(validatorPeer.BlsPubKey, true) // Set the bitmap indicating that this validator signed. prepareBitmap.SetKey(validatorPeer.ConsensusPubKey, true) // Set the bitmap indicating that this validator signed.
targetState := PreparedDone targetState := PreparedDone
if len(prepareSigs) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state < targetState { if len(prepareSigs) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state < targetState {
@ -237,7 +237,7 @@ func (consensus *Consensus) processCommitMessage(message *msg_pb.Message) {
return return
} }
if err := consensus.checkConsensusMessage(message, validatorPeer.BlsPubKey); err != nil { if err := consensus.checkConsensusMessage(message, validatorPeer.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID) utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID)
return return
} }
@ -265,7 +265,7 @@ func (consensus *Consensus) processCommitMessage(message *msg_pb.Message) {
return return
} }
aggSig := bls_cosi.AggregateSig(consensus.GetPrepareSigsArray()) aggSig := bls_cosi.AggregateSig(consensus.GetPrepareSigsArray())
if !sign.VerifyHash(validatorPeer.BlsPubKey, append(aggSig.Serialize(), consensus.prepareBitmap.Bitmap...)) { if !sign.VerifyHash(validatorPeer.ConsensusPubKey, append(aggSig.Serialize(), consensus.prepareBitmap.Bitmap...)) {
utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID) utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID)
return return
} }
@ -273,7 +273,7 @@ func (consensus *Consensus) processCommitMessage(message *msg_pb.Message) {
utils.GetLogInstance().Debug("Received new commit message", "numReceivedSoFar", len(commitSigs), "validatorID", strconv.Itoa(int(validatorID))) utils.GetLogInstance().Debug("Received new commit message", "numReceivedSoFar", len(commitSigs), "validatorID", strconv.Itoa(int(validatorID)))
commitSigs[validatorID] = &sign commitSigs[validatorID] = &sign
// Set the bitmap indicating that this validator signed. // Set the bitmap indicating that this validator signed.
commitBitmap.SetKey(validatorPeer.BlsPubKey, true) commitBitmap.SetKey(validatorPeer.ConsensusPubKey, true)
targetState := CommittedDone targetState := CommittedDone
if len(commitSigs) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state != targetState { if len(commitSigs) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state != targetState {

@ -36,10 +36,10 @@ func TestConstructAnnounceMessage(test *testing.T) {
func TestConstructPreparedMessage(test *testing.T) { func TestConstructPreparedMessage(test *testing.T) {
leaderPriKey, leaderPubKey := utils.GenKey("127.0.0.1", "6000") leaderPriKey, leaderPubKey := utils.GenKey("127.0.0.1", "6000")
leader := p2p.Peer{IP: "127.0.0.1", Port: "6000", BlsPubKey: leaderPubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "6000", ConsensusPubKey: leaderPubKey}
validatorPriKey, validatorPubKey := utils.GenKey("127.0.0.1", "5555") validatorPriKey, validatorPubKey := utils.GenKey("127.0.0.1", "5555")
validator := p2p.Peer{IP: "127.0.0.1", Port: "5555", BlsPubKey: validatorPubKey} validator := p2p.Peer{IP: "127.0.0.1", Port: "5555", ConsensusPubKey: validatorPubKey}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil { if err != nil {

@ -30,7 +30,7 @@ func TestProcessMessageLeaderPrepare(test *testing.T) {
defer ctrl.Finish() defer ctrl.Finish()
leader := p2p.Peer{IP: ip, Port: "7777"} leader := p2p.Peer{IP: ip, Port: "7777"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port) _, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3) validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3) hosts := make([]p2p.Host, 3)
@ -38,7 +38,7 @@ func TestProcessMessageLeaderPrepare(test *testing.T) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 7788+i) port := fmt.Sprintf("%d", 7788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1} validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].BlsPubKey = utils.GenKey(validators[i].IP, validators[i].Port) _, validators[i].ConsensusPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
} }
m := mock_host.NewMockHost(ctrl) m := mock_host.NewMockHost(ctrl)
@ -76,7 +76,7 @@ func TestProcessMessageLeaderPrepareInvalidSignature(test *testing.T) {
defer ctrl.Finish() defer ctrl.Finish()
leader := p2p.Peer{IP: ip, Port: "7777"} leader := p2p.Peer{IP: ip, Port: "7777"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port) _, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3) validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3) hosts := make([]p2p.Host, 3)
@ -84,7 +84,7 @@ func TestProcessMessageLeaderPrepareInvalidSignature(test *testing.T) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 7788+i) port := fmt.Sprintf("%d", 7788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1} validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].BlsPubKey = utils.GenKey(validators[i].IP, validators[i].Port) _, validators[i].ConsensusPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
} }
m := mock_host.NewMockHost(ctrl) m := mock_host.NewMockHost(ctrl)
@ -130,7 +130,7 @@ func TestProcessMessageLeaderCommit(test *testing.T) {
defer ctrl.Finish() defer ctrl.Finish()
leader := p2p.Peer{IP: ip, Port: "8889"} leader := p2p.Peer{IP: ip, Port: "8889"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port) _, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3) validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3) hosts := make([]p2p.Host, 3)
@ -138,7 +138,7 @@ func TestProcessMessageLeaderCommit(test *testing.T) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 8788+i) port := fmt.Sprintf("%d", 8788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1} validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].BlsPubKey = utils.GenKey(validators[i].IP, validators[i].Port) _, validators[i].ConsensusPubKey = utils.GenKey(validators[i].IP, validators[i].Port)
} }
m := mock_host.NewMockHost(ctrl) m := mock_host.NewMockHost(ctrl)

@ -39,16 +39,16 @@ func TestRemovePeers(t *testing.T) {
_, pk4 := utils.GenKey("4", "4") _, pk4 := utils.GenKey("4", "4")
_, pk5 := utils.GenKey("5", "5") _, pk5 := utils.GenKey("5", "5")
p1 := p2p.Peer{IP: "127.0.0.1", Port: "19901", BlsPubKey: pk1} p1 := p2p.Peer{IP: "127.0.0.1", Port: "19901", ConsensusPubKey: pk1}
p2 := p2p.Peer{IP: "127.0.0.1", Port: "19902", BlsPubKey: pk2} p2 := p2p.Peer{IP: "127.0.0.1", Port: "19902", ConsensusPubKey: pk2}
p3 := p2p.Peer{IP: "127.0.0.1", Port: "19903", BlsPubKey: pk3} p3 := p2p.Peer{IP: "127.0.0.1", Port: "19903", ConsensusPubKey: pk3}
p4 := p2p.Peer{IP: "127.0.0.1", Port: "19904", BlsPubKey: pk4} p4 := p2p.Peer{IP: "127.0.0.1", Port: "19904", ConsensusPubKey: pk4}
peers := []p2p.Peer{p1, p2, p3, p4} peers := []p2p.Peer{p1, p2, p3, p4}
peerRemove := []p2p.Peer{p1, p2} peerRemove := []p2p.Peer{p1, p2}
leader := p2p.Peer{IP: "127.0.0.1", Port: "9000", BlsPubKey: pk5} leader := p2p.Peer{IP: "127.0.0.1", Port: "9000", ConsensusPubKey: pk5}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil { if err != nil {

@ -91,7 +91,7 @@ func (consensus *Consensus) processAnnounceMessage(message *msg_pb.Message) {
copy(consensus.blockHash[:], blockHash[:]) copy(consensus.blockHash[:], blockHash[:])
consensus.block = block consensus.block = block
if err := consensus.checkConsensusMessage(message, consensus.leader.BlsPubKey); err != nil { if err := consensus.checkConsensusMessage(message, consensus.leader.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("Failed to check the leader message") utils.GetLogInstance().Debug("Failed to check the leader message")
if err == consensus_engine.ErrConsensusIDNotMatch { if err == consensus_engine.ErrConsensusIDNotMatch {
utils.GetLogInstance().Debug("sending bft block to state syncing") utils.GetLogInstance().Debug("sending bft block to state syncing")
@ -152,7 +152,7 @@ func (consensus *Consensus) processPreparedMessage(message *msg_pb.Message) {
// Update readyByConsensus for attack. // Update readyByConsensus for attack.
attack.GetInstance().UpdateConsensusReady(consensusID) attack.GetInstance().UpdateConsensusReady(consensusID)
if err := consensus.checkConsensusMessage(message, consensus.leader.BlsPubKey); err != nil { if err := consensus.checkConsensusMessage(message, consensus.leader.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("processPreparedMessage error", "error", err) utils.GetLogInstance().Debug("processPreparedMessage error", "error", err)
return return
} }
@ -213,7 +213,7 @@ func (consensus *Consensus) processCommittedMessage(message *msg_pb.Message) {
// Update readyByConsensus for attack. // Update readyByConsensus for attack.
attack.GetInstance().UpdateConsensusReady(consensusID) attack.GetInstance().UpdateConsensusReady(consensusID)
if err := consensus.checkConsensusMessage(message, consensus.leader.BlsPubKey); err != nil { if err := consensus.checkConsensusMessage(message, consensus.leader.ConsensusPubKey); err != nil {
utils.GetLogInstance().Debug("processCommittedMessage error", "error", err) utils.GetLogInstance().Debug("processCommittedMessage error", "error", err)
return return
} }

@ -53,14 +53,14 @@ func TestProcessMessageValidatorAnnounce(test *testing.T) {
defer ctrl.Finish() defer ctrl.Finish()
leader := p2p.Peer{IP: "127.0.0.1", Port: "9982"} leader := p2p.Peer{IP: "127.0.0.1", Port: "9982"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port) _, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "9984", ValidatorID: 1} validator1 := p2p.Peer{IP: "127.0.0.1", Port: "9984", ValidatorID: 1}
_, validator1.BlsPubKey = utils.GenKey(validator1.IP, validator1.Port) _, validator1.ConsensusPubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "9986", ValidatorID: 2} validator2 := p2p.Peer{IP: "127.0.0.1", Port: "9986", ValidatorID: 2}
_, validator2.BlsPubKey = utils.GenKey(validator2.IP, validator2.Port) _, validator2.ConsensusPubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "9988", ValidatorID: 3} validator3 := p2p.Peer{IP: "127.0.0.1", Port: "9988", ValidatorID: 3}
_, validator3.BlsPubKey = utils.GenKey(validator3.IP, validator3.Port) _, validator3.ConsensusPubKey = utils.GenKey(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl) m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99. // Asserts that the first and only call to Bar() is passed 99.
@ -107,14 +107,14 @@ func TestProcessMessageValidatorPrepared(test *testing.T) {
defer ctrl.Finish() defer ctrl.Finish()
leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"} leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port) _, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1} validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1}
_, validator1.BlsPubKey = utils.GenKey(validator1.IP, validator1.Port) _, validator1.ConsensusPubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2} validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2}
_, validator2.BlsPubKey = utils.GenKey(validator2.IP, validator2.Port) _, validator2.ConsensusPubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3} validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3}
_, validator3.BlsPubKey = utils.GenKey(validator3.IP, validator3.Port) _, validator3.ConsensusPubKey = utils.GenKey(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl) m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99. // Asserts that the first and only call to Bar() is passed 99.
@ -175,14 +175,14 @@ func TestProcessMessageValidatorCommitted(test *testing.T) {
defer ctrl.Finish() defer ctrl.Finish()
leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"} leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"}
_, leader.BlsPubKey = utils.GenKey(leader.IP, leader.Port) _, leader.ConsensusPubKey = utils.GenKey(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1} validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1}
_, validator1.BlsPubKey = utils.GenKey(validator1.IP, validator1.Port) _, validator1.ConsensusPubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2} validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2}
_, validator2.BlsPubKey = utils.GenKey(validator2.IP, validator2.Port) _, validator2.ConsensusPubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3} validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3}
_, validator3.BlsPubKey = utils.GenKey(validator3.IP, validator3.Port) _, validator3.ConsensusPubKey = utils.GenKey(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl) m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99. // Asserts that the first and only call to Bar() is passed 99.

@ -91,13 +91,13 @@ func New(host p2p.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer, confi
// Initialize cosign bitmap // Initialize cosign bitmap
allPublicKeys := make([]*bls.PublicKey, 0) allPublicKeys := make([]*bls.PublicKey, 0)
for _, validatorPeer := range peers { for _, validatorPeer := range peers {
allPublicKeys = append(allPublicKeys, validatorPeer.BlsPubKey) allPublicKeys = append(allPublicKeys, validatorPeer.ConsensusPubKey)
} }
allPublicKeys = append(allPublicKeys, leader.BlsPubKey) allPublicKeys = append(allPublicKeys, leader.ConsensusPubKey)
dRand.PublicKeys = allPublicKeys dRand.PublicKeys = allPublicKeys
bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.BlsPubKey) bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.ConsensusPubKey)
dRand.bitmap = bitmap dRand.bitmap = bitmap
dRand.pRand = nil dRand.pRand = nil
@ -139,7 +139,7 @@ func (dRand *DRand) AddPeers(peers []*p2p.Peer) int {
if !ok { if !ok {
dRand.validators.Store(utils.GetUniqueIDFromPeer(*peer), *peer) dRand.validators.Store(utils.GetUniqueIDFromPeer(*peer), *peer)
dRand.pubKeyLock.Lock() dRand.pubKeyLock.Lock()
dRand.PublicKeys = append(dRand.PublicKeys, peer.BlsPubKey) dRand.PublicKeys = append(dRand.PublicKeys, peer.ConsensusPubKey)
dRand.pubKeyLock.Unlock() dRand.pubKeyLock.Unlock()
utils.GetLogInstance().Debug("[DRAND]", "AddPeers", *peer) utils.GetLogInstance().Debug("[DRAND]", "AddPeers", *peer)
} }
@ -238,7 +238,7 @@ func (dRand *DRand) getValidatorPeerByID(validatorID uint32) *p2p.Peer {
func (dRand *DRand) ResetState() { func (dRand *DRand) ResetState() {
dRand.vrfs = &map[uint32][]byte{} dRand.vrfs = &map[uint32][]byte{}
bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.BlsPubKey) bitmap, _ := bls_cosi.NewMask(dRand.PublicKeys, dRand.leader.ConsensusPubKey)
dRand.bitmap = bitmap dRand.bitmap = bitmap
dRand.pRand = nil dRand.pRand = nil
dRand.rand = nil dRand.rand = nil
@ -246,8 +246,8 @@ func (dRand *DRand) ResetState() {
// SetLeaderPubKey deserialize the public key of drand leader // SetLeaderPubKey deserialize the public key of drand leader
func (dRand *DRand) SetLeaderPubKey(k []byte) error { func (dRand *DRand) SetLeaderPubKey(k []byte) error {
dRand.leader.BlsPubKey = &bls.PublicKey{} dRand.leader.ConsensusPubKey = &bls.PublicKey{}
return dRand.leader.BlsPubKey.Deserialize(k) return dRand.leader.ConsensusPubKey.Deserialize(k)
} }
// UpdatePublicKeys updates the PublicKeys variable, protected by a mutex // UpdatePublicKeys updates the PublicKeys variable, protected by a mutex

@ -74,7 +74,7 @@ func (dRand *DRand) init(epochBlock *types.Block) {
(*dRand.vrfs)[dRand.nodeID] = append(rand[:], proof...) (*dRand.vrfs)[dRand.nodeID] = append(rand[:], proof...)
utils.GetLogInstance().Info("[DRG] sent init", "msg", msgToSend, "leader.PubKey", dRand.leader.BlsPubKey) utils.GetLogInstance().Info("[DRG] sent init", "msg", msgToSend, "leader.PubKey", dRand.leader.ConsensusPubKey)
dRand.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend)) dRand.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend))
} }
@ -114,9 +114,9 @@ func (dRand *DRand) processCommitMessage(message drand_proto.Message) {
} }
// Verify message signature // Verify message signature
err := verifyMessageSig(validatorPeer.BlsPubKey, message) err := verifyMessageSig(validatorPeer.ConsensusPubKey, message)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("[DRAND] failed to verify the message signature", "Error", err, "PubKey", validatorPeer.BlsPubKey) utils.GetLogInstance().Warn("[DRAND] failed to verify the message signature", "Error", err, "PubKey", validatorPeer.ConsensusPubKey)
return return
} }
@ -136,7 +136,7 @@ func (dRand *DRand) processCommitMessage(message drand_proto.Message) {
utils.GetLogInstance().Debug("Received new VRF commit", "numReceivedSoFar", len((*vrfs)), "validatorID", validatorID, "PublicKeys", len(dRand.PublicKeys)) utils.GetLogInstance().Debug("Received new VRF commit", "numReceivedSoFar", len((*vrfs)), "validatorID", validatorID, "PublicKeys", len(dRand.PublicKeys))
(*vrfs)[validatorID] = message.Payload (*vrfs)[validatorID] = message.Payload
dRand.bitmap.SetKey(validatorPeer.BlsPubKey, true) // Set the bitmap indicating that this validator signed. dRand.bitmap.SetKey(validatorPeer.ConsensusPubKey, true) // Set the bitmap indicating that this validator signed.
if len((*vrfs)) >= ((len(dRand.PublicKeys))/3 + 1) { if len((*vrfs)) >= ((len(dRand.PublicKeys))/3 + 1) {
// Construct pRand and initiate consensus on it // Construct pRand and initiate consensus on it

@ -37,7 +37,7 @@ func (dRand *DRand) processInitMessage(message drand_proto.Message) {
blockHash := message.BlockHash blockHash := message.BlockHash
// Verify message signature // Verify message signature
err := verifyMessageSig(dRand.leader.BlsPubKey, message) err := verifyMessageSig(dRand.leader.ConsensusPubKey, message)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("[DRG] Failed to verify the message signature", "Error", err) utils.GetLogInstance().Warn("[DRG] Failed to verify the message signature", "Error", err)
return return

@ -71,8 +71,8 @@ type ConfigType struct {
Host p2p.Host Host p2p.Host
StakingPriKey *ecdsa.PrivateKey StakingPriKey *ecdsa.PrivateKey
P2pPriKey p2p_crypto.PrivKey P2pPriKey p2p_crypto.PrivKey
BlsPriKey *bls.SecretKey ConsensusPriKey *bls.SecretKey
BlsPubKey *bls.PublicKey ConsensusPubKey *bls.PublicKey
MainDB *ethdb.LDBDatabase MainDB *ethdb.LDBDatabase
BeaconDB *ethdb.LDBDatabase BeaconDB *ethdb.LDBDatabase

@ -244,7 +244,7 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, db ethdb.Database) *N
node.BeaconBlockChannel = make(chan *types.Block) node.BeaconBlockChannel = make(chan *types.Block)
node.TxPool = core.NewTxPool(core.DefaultTxPoolConfig, params.TestChainConfig, chain) node.TxPool = core.NewTxPool(core.DefaultTxPoolConfig, params.TestChainConfig, chain)
node.Worker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.BlsPubKey), node.Consensus.ShardID) node.Worker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.ConsensusPubKey), node.Consensus.ShardID)
utils.GetLogInstance().Debug("Created Genesis Block", "blockHash", chain.GetBlockByNumber(0).Hash().Hex()) utils.GetLogInstance().Debug("Created Genesis Block", "blockHash", chain.GetBlockByNumber(0).Hash().Hex())
node.Consensus.ConsensusBlock = make(chan *consensus.BFTBlockInfo) node.Consensus.ConsensusBlock = make(chan *consensus.BFTBlockInfo)
@ -470,5 +470,5 @@ func (node *Node) AddBeaconChainDatabase(db ethdb.Database) {
os.Exit(1) os.Exit(1)
} }
node.beaconChain = chain node.beaconChain = chain
node.BeaconWorker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.BlsPubKey), node.Consensus.ShardID) node.BeaconWorker = worker.New(params.TestChainConfig, chain, node.Consensus, pki.GetAddressFromPublicKey(node.SelfPeer.ConsensusPubKey), node.Consensus.ShardID)
} }

@ -268,7 +268,7 @@ func (node *Node) BroadcastNewBlock(newBlock *types.Block) {
// VerifyNewBlock is called by consensus participants to verify the block (account model) they are running consensus on // VerifyNewBlock is called by consensus participants to verify the block (account model) they are running consensus on
func (node *Node) VerifyNewBlock(newBlock *types.Block) bool { func (node *Node) VerifyNewBlock(newBlock *types.Block) bool {
err := node.blockchain.ValidateNewBlock(newBlock, pki.GetAddressFromPublicKey(node.SelfPeer.BlsPubKey)) err := node.blockchain.ValidateNewBlock(newBlock, pki.GetAddressFromPublicKey(node.SelfPeer.ConsensusPubKey))
if err != nil { if err != nil {
utils.GetLogInstance().Debug("Failed verifying new block", "Error", err, "tx", newBlock.Transactions()[0]) utils.GetLogInstance().Debug("Failed verifying new block", "Error", err, "tx", newBlock.Transactions()[0])
return false return false
@ -343,8 +343,8 @@ func (node *Node) pingMessageHandler(msgPayload []byte, sender string) int {
peer.PeerID = ping.Node.PeerID peer.PeerID = ping.Node.PeerID
peer.ValidatorID = ping.Node.ValidatorID peer.ValidatorID = ping.Node.ValidatorID
peer.BlsPubKey = &bls.PublicKey{} peer.ConsensusPubKey = &bls.PublicKey{}
err = peer.BlsPubKey.Deserialize(ping.Node.PubKey[:]) err = peer.ConsensusPubKey.Deserialize(ping.Node.PubKey[:])
if err != nil { if err != nil {
utils.GetLogInstance().Error("UnmarshalBinary Failed", "error", err) utils.GetLogInstance().Error("UnmarshalBinary Failed", "error", err)
return -1 return -1
@ -450,8 +450,8 @@ func (node *Node) pongMessageHandler(msgPayload []byte) int {
peer.ValidatorID = p.ValidatorID peer.ValidatorID = p.ValidatorID
peer.PeerID = p.PeerID peer.PeerID = p.PeerID
peer.BlsPubKey = &bls.PublicKey{} peer.ConsensusPubKey = &bls.PublicKey{}
err = peer.BlsPubKey.Deserialize(p.PubKey[:]) err = peer.ConsensusPubKey.Deserialize(p.PubKey[:])
if err != nil { if err != nil {
utils.GetLogInstance().Error("UnmarshalBinary Failed", "error", err) utils.GetLogInstance().Error("UnmarshalBinary Failed", "error", err)
continue continue

@ -11,7 +11,7 @@ import (
func TestAddNewBlock(t *testing.T) { func TestAddNewBlock(t *testing.T) {
_, pubKey := utils.GenKey("1", "2") _, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9885"} validator := p2p.Peer{IP: "127.0.0.1", Port: "9885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)
@ -34,7 +34,7 @@ func TestAddNewBlock(t *testing.T) {
func TestVerifyNewBlock(t *testing.T) { func TestVerifyNewBlock(t *testing.T) {
_, pubKey := utils.GenKey("1", "2") _, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"} validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)

@ -19,7 +19,7 @@ import (
func TestNewNode(t *testing.T) { func TestNewNode(t *testing.T) {
_, pubKey := utils.GenKey("1", "2") _, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"} validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)
@ -43,7 +43,7 @@ func TestNewNode(t *testing.T) {
func TestGetSyncingPeers(t *testing.T) { func TestGetSyncingPeers(t *testing.T) {
_, pubKey := utils.GenKey("1", "2") _, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"} validator := p2p.Peer{IP: "127.0.0.1", Port: "8885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)
@ -75,18 +75,18 @@ func TestAddPeers(t *testing.T) {
&p2p.Peer{ &p2p.Peer{
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "8888", Port: "8888",
BlsPubKey: pubKey1, ConsensusPubKey: pubKey1,
ValidatorID: 1, ValidatorID: 1,
}, },
&p2p.Peer{ &p2p.Peer{
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "9999", Port: "9999",
BlsPubKey: pubKey2, ConsensusPubKey: pubKey2,
ValidatorID: 2, ValidatorID: 2,
}, },
} }
_, pubKey := utils.GenKey("1", "2") _, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8985"} validator := p2p.Peer{IP: "127.0.0.1", Port: "8985"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)
@ -118,20 +118,20 @@ func TestAddBeaconPeer(t *testing.T) {
&p2p.Peer{ &p2p.Peer{
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "8888", Port: "8888",
BlsPubKey: pubKey1, ConsensusPubKey: pubKey1,
ValidatorID: 1, ValidatorID: 1,
PeerID: "1234", PeerID: "1234",
}, },
&p2p.Peer{ &p2p.Peer{
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "9999", Port: "9999",
BlsPubKey: pubKey2, ConsensusPubKey: pubKey2,
ValidatorID: 2, ValidatorID: 2,
PeerID: "4567", PeerID: "4567",
}, },
} }
_, pubKey := utils.GenKey("1", "2") _, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "8982", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "8985"} validator := p2p.Peer{IP: "127.0.0.1", Port: "8985"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)
@ -163,7 +163,7 @@ func sendPingMessage(node *Node, leader p2p.Peer) {
p1 := p2p.Peer{ p1 := p2p.Peer{
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "9999", Port: "9999",
BlsPubKey: pubKey1, ConsensusPubKey: pubKey1,
} }
ping1 := proto_discovery.NewPingMessage(p1) ping1 := proto_discovery.NewPingMessage(p1)
@ -176,12 +176,12 @@ func sendPongMessage(node *Node, leader p2p.Peer) {
p1 := p2p.Peer{ p1 := p2p.Peer{
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "9998", Port: "9998",
BlsPubKey: pubKey1, ConsensusPubKey: pubKey1,
} }
p2 := p2p.Peer{ p2 := p2p.Peer{
IP: "127.0.0.1", IP: "127.0.0.1",
Port: "9999", Port: "9999",
BlsPubKey: pubKey2, ConsensusPubKey: pubKey2,
} }
pubKeys := []*bls.PublicKey{pubKey1, pubKey2} pubKeys := []*bls.PublicKey{pubKey1, pubKey2}
@ -200,7 +200,7 @@ func exitServer() {
func TestPingPongHandler(t *testing.T) { func TestPingPongHandler(t *testing.T) {
_, pubKey := utils.GenKey("127.0.0.1", "8881") _, pubKey := utils.GenKey("127.0.0.1", "8881")
leader := p2p.Peer{IP: "127.0.0.1", Port: "8881", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "8881", ConsensusPubKey: pubKey}
// validator := p2p.Peer{IP: "127.0.0.1", Port: "9991"} // validator := p2p.Peer{IP: "127.0.0.1", Port: "9991"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)

@ -21,7 +21,7 @@ var (
func TestUpdateStakingList(t *testing.T) { func TestUpdateStakingList(t *testing.T) {
_, pubKey := utils.GenKey("1", "2") _, pubKey := utils.GenKey("1", "2")
leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", BlsPubKey: pubKey} leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", ConsensusPubKey: pubKey}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9885"} validator := p2p.Peer{IP: "127.0.0.1", Port: "9885"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey) host, err := p2pimpl.NewHost(&leader, priKey)

@ -16,7 +16,7 @@ type StreamHandler func(Stream)
type Peer struct { type Peer struct {
IP string // IP address of the peer IP string // IP address of the peer
Port string // Port number of the peer Port string // Port number of the peer
BlsPubKey *bls.PublicKey // Public key of the peer, used for consensus signing ConsensusPubKey *bls.PublicKey // Public key of the peer, used for consensus signing
ValidatorID int // -1 is the default value, means not assigned any validator ID in the shard ValidatorID int // -1 is the default value, means not assigned any validator ID in the shard
Addrs []ma.Multiaddr // MultiAddress of the peer Addrs []ma.Multiaddr // MultiAddress of the peer
PeerID libp2p_peer.ID // PeerID, the pubkey for communication PeerID libp2p_peer.ID // PeerID, the pubkey for communication

@ -57,9 +57,9 @@ It should cover the basic function to pass, to fail, and error conditions.
### state syncing ### state syncing
* test case # : SS1 * test case # : SS1
* description : beacon chain node ss * description : node state sync basic
* test procedure : one new beacon node join in the beacon chain after beacon chain reach a few consensuses * test procedure : node joins network and is able to sync to latest block
* passing criteria : the new node can join in the consensus after state syncing * passing criteria : blockheight of node is equal to that of the shards blockchain and node has joined consensus.
* dependency * dependency
* note * note
* automated? N * automated? N
@ -72,6 +72,14 @@ It should cover the basic function to pass, to fail, and error conditions.
* note * note
* automated? N * automated? N
--- ---
* test case # : SS1
* description : beacon chain node ss
* test procedure : one new beacon node join in the beacon chain after beacon chain reach a few consensuses
* passing criteria : the new node can join in the consensus after state syncing
* dependency
* note
* automated? N
---
### consensus ### consensus

Loading…
Cancel
Save