Merge pull request #1563 from LeoHChen/merge_s3_to_master_0910

Merge s3 to master 0910
pull/1564/head v1-20190910.0
Leo Chen 5 years ago committed by GitHub
commit 211438923f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      api/proto/message/server.go
  2. 6
      api/service/syncing/syncing.go
  3. 59
      cmd/client/txgen/main.go
  4. 33
      cmd/harmony/main.go
  5. 246
      consensus/consensus_v2.go
  6. 8
      consensus/consensus_viewchange_msg.go
  7. 110
      consensus/view_change.go
  8. 2
      core/blockchain.go
  9. 3
      core/rawdb/accessors_chain.go
  10. 2
      core/rawdb/accessors_indexes.go
  11. 2
      core/rawdb/accessors_metadata.go
  12. 6
      drand/drand_leader.go
  13. 12
      internal/attack/attack.go
  14. 4
      internal/configs/sharding/mainnet.go
  15. 6
      internal/configs/sharding/shardingconfig_test.go
  16. 28
      internal/genesis/foundational.go
  17. 2
      internal/genesis/genesis.go
  18. 9
      internal/hmyapi/blockchain.go
  19. 7
      internal/hmyapi/private_account.go
  20. 10
      internal/hmyapi/util.go
  21. 17
      internal/memprofiling/lib.go
  22. 17
      internal/profiler/profiler.go
  23. 21
      internal/shardchain/shardchains.go
  24. 18
      internal/utils/utils.go
  25. 2
      node/node.go
  26. 4
      node/node_handler.go
  27. 700
      scripts/api_test.sh

@ -48,7 +48,7 @@ func (s *Server) Process(ctx context.Context, message *Message) (*Response, erro
} }
address := crypto.PubkeyToAddress(key.PublicKey) address := crypto.PubkeyToAddress(key.PublicKey)
utils.Logger().Info().Int64("amount", amount).Bytes("address", address[:]).Msg("Enter") utils.Logger().Info().Int64("amount", amount).Hex("address", address[:]).Msg("Enter")
if err := s.CreateTransactionForEnterMethod(amount, priKey); err != nil { if err := s.CreateTransactionForEnterMethod(amount, priKey); err != nil {
return nil, ErrEnterMethod return nil, ErrEnterMethod
} }

@ -507,8 +507,8 @@ func (ss *StateSync) getMaxConsensusBlockFromParentHash(parentHash common.Hash)
maxFirstID, maxCount := GetHowManyMaxConsensus(candidateBlocks) maxFirstID, maxCount := GetHowManyMaxConsensus(candidateBlocks)
hash := candidateBlocks[maxFirstID].Hash() hash := candidateBlocks[maxFirstID].Hash()
utils.Logger().Debug(). utils.Logger().Debug().
Bytes("parentHash", parentHash[:]). Hex("parentHash", parentHash[:]).
Bytes("hash", hash[:]). Hex("hash", hash[:]).
Int("maxCount", maxCount). Int("maxCount", maxCount).
Msg("[SYNC] Find block with matching parenthash") Msg("[SYNC] Find block with matching parenthash")
return candidateBlocks[maxFirstID] return candidateBlocks[maxFirstID]
@ -675,7 +675,7 @@ func (ss *StateSync) RegisterNodeInfo() int {
err := peerConfig.registerToBroadcast(ss.selfPeerHash[:], ss.selfip, ss.selfport) err := peerConfig.registerToBroadcast(ss.selfPeerHash[:], ss.selfip, ss.selfport)
if err != nil { if err != nil {
logger.Debug(). logger.Debug().
Bytes("selfPeerHash", ss.selfPeerHash[:]). Hex("selfPeerHash", ss.selfPeerHash[:]).
Msg("[SYNC] register failed to peer") Msg("[SYNC] register failed to peer")
return return
} }

@ -151,7 +151,9 @@ func main() {
MaxNumTxsPerBatch: *numTxns, MaxNumTxsPerBatch: *numTxns,
} }
shardID := *shardIDFlag shardID := *shardIDFlag
utils.GetLogInstance().Debug("Cross Shard Ratio Is Set But not used", "cx ratio", *crossShardRatio) utils.Logger().Debug().
Int("cx ratio", *crossShardRatio).
Msg("Cross Shard Ratio Is Set But not used")
// TODO(Richard): refactor this chuck to a single method // TODO(Richard): refactor this chuck to a single method
// Setup a logger to stdout and log file. // Setup a logger to stdout and log file.
@ -166,20 +168,30 @@ func main() {
txGen.RunServices() txGen.RunServices()
start := time.Now() start := time.Now()
totalTime := float64(*duration) totalTime := float64(*duration)
utils.GetLogInstance().Debug("Total Duration", "totalTime", totalTime, "RunForever", isDurationForever(totalTime)) utils.Logger().Debug().
Float64("totalTime", totalTime).
Bool("RunForever", isDurationForever(totalTime)).
Msg("Total Duration")
ticker := time.NewTicker(checkFrequency * time.Second) ticker := time.NewTicker(checkFrequency * time.Second)
txGen.DoSyncWithoutConsensus() txGen.DoSyncWithoutConsensus()
syncLoop: syncLoop:
for { for {
t := time.Now() t := time.Now()
if totalTime > 0 && t.Sub(start).Seconds() >= totalTime { if totalTime > 0 && t.Sub(start).Seconds() >= totalTime {
utils.GetLogInstance().Debug("Generator timer ended in syncLoop.", "duration", (int(t.Sub(start))), "startTime", start, "totalTime", totalTime) utils.Logger().Debug().
Int("duration", (int(t.Sub(start)))).
Time("startTime", start).
Float64("totalTime", totalTime).
Msg("Generator timer ended in syncLoop.")
break syncLoop break syncLoop
} }
select { select {
case <-ticker.C: case <-ticker.C:
if txGen.State.String() == "NodeReadyForConsensus" { if txGen.State.String() == "NodeReadyForConsensus" {
utils.GetLogInstance().Debug("Generator is now in Sync.", "txgen node", txGen.SelfPeer, "Node State", txGen.State.String()) utils.Logger().Debug().
Str("txgen node", txGen.SelfPeer.String()).
Str("Node State", txGen.State.String()).
Msg("Generator is now in Sync.")
ticker.Stop() ticker.Stop()
break syncLoop break syncLoop
} }
@ -188,14 +200,24 @@ syncLoop:
readySignal := make(chan uint32) readySignal := make(chan uint32)
// This func is used to update the client's blockchain when new blocks are received from the leaders // This func is used to update the client's blockchain when new blocks are received from the leaders
updateBlocksFunc := func(blocks []*types.Block) { updateBlocksFunc := func(blocks []*types.Block) {
utils.GetLogInstance().Info("[Txgen] Received new block", "block num", blocks[0].NumberU64()) utils.Logger().Info().
Uint64("block num", blocks[0].NumberU64()).
Msg("[Txgen] Received new block")
for _, block := range blocks { for _, block := range blocks {
shardID := block.ShardID() shardID := block.ShardID()
if txGen.Consensus.ShardID == shardID { if txGen.Consensus.ShardID == shardID {
utils.GetLogInstance().Info("Got block from leader", "txNum", len(block.Transactions()), "shardID", shardID, "preHash", block.ParentHash().Hex(), "currentBlock", txGen.Blockchain().CurrentBlock().NumberU64(), "incoming block", block.NumberU64()) utils.Logger().Info().
Int("txNum", len(block.Transactions())).
Uint32("shardID", shardID).
Str("preHash", block.ParentHash().Hex()).
Uint64("currentBlock", txGen.Blockchain().CurrentBlock().NumberU64()).
Uint64("incoming block", block.NumberU64()).
Msg("Got block from leader")
if block.NumberU64()-txGen.Blockchain().CurrentBlock().NumberU64() == 1 { if block.NumberU64()-txGen.Blockchain().CurrentBlock().NumberU64() == 1 {
if err := txGen.AddNewBlock(block); err != nil { if err := txGen.AddNewBlock(block); err != nil {
utils.GetLogInstance().Error("Error when adding new block", "error", err) utils.Logger().Error().
Err(err).
Msg("Error when adding new block")
} }
stateMutex.Lock() stateMutex.Lock()
if err := txGen.Worker.UpdateCurrent(block.Coinbase()); err != nil { if err := txGen.Worker.UpdateCurrent(block.Coinbase()); err != nil {
@ -221,9 +243,16 @@ syncLoop:
pushLoop: pushLoop:
for { for {
t := time.Now() t := time.Now()
utils.GetLogInstance().Debug("Current running time", "running time", t.Sub(start).Seconds(), "totaltime", totalTime) utils.Logger().Debug().
Float64("running time", t.Sub(start).Seconds()).
Float64("totalTime", totalTime).
Msg("Current running time")
if !isDurationForever(totalTime) && t.Sub(start).Seconds() >= totalTime { if !isDurationForever(totalTime) && t.Sub(start).Seconds() >= totalTime {
utils.GetLogInstance().Debug("Generator timer ended.", "duration", (int(t.Sub(start))), "startTime", start, "totalTime", totalTime) utils.Logger().Debug().
Int("duration", (int(t.Sub(start)))).
Time("startTime", start).
Float64("totalTime", totalTime).
Msg("Generator timer ended.")
break pushLoop break pushLoop
} }
if shardID != 0 { if shardID != 0 {
@ -231,7 +260,7 @@ pushLoop:
if otherHeight >= 1 { if otherHeight >= 1 {
go func() { go func() {
readySignal <- uint32(shardID) readySignal <- uint32(shardID)
utils.GetLogInstance().Debug("Same blockchain height so readySignal generated") utils.Logger().Debug().Msg("Same blockchain height so readySignal generated")
time.Sleep(3 * time.Second) // wait for nodes to be ready time.Sleep(3 * time.Second) // wait for nodes to be ready
}() }()
} }
@ -242,13 +271,15 @@ pushLoop:
lock := sync.Mutex{} lock := sync.Mutex{}
txs, err := GenerateSimulatedTransactionsAccount(uint32(shardID), txGen, setting) txs, err := GenerateSimulatedTransactionsAccount(uint32(shardID), txGen, setting)
if err != nil { if err != nil {
utils.GetLogInstance().Debug("Error in Generating Txns", "Err", err) utils.Logger().Debug().
Err(err).
Msg("Error in Generating Txns")
} }
lock.Lock() lock.Lock()
SendTxsToShard(txGen, txs, uint32(shardID)) SendTxsToShard(txGen, txs, uint32(shardID))
lock.Unlock() lock.Unlock()
case <-time.After(10 * time.Second): case <-time.After(10 * time.Second):
utils.GetLogInstance().Warn("No new block is received so far") utils.Logger().Warn().Msg("No new block is received so far")
} }
} }
} }
@ -264,7 +295,9 @@ func SendTxsToShard(clientNode *node.Node, txs types.Transactions, shardID uint3
err = clientNode.GetHost().SendMessageToGroups([]p2p.GroupID{clientGroup}, p2p_host.ConstructP2pMessage(byte(0), msg)) err = clientNode.GetHost().SendMessageToGroups([]p2p.GroupID{clientGroup}, p2p_host.ConstructP2pMessage(byte(0), msg))
} }
if err != nil { if err != nil {
utils.GetLogInstance().Debug("Error in Sending Txns", "Err", err) utils.Logger().Debug().
Err(err).
Msg("Error in Sending Txns")
} }
} }

@ -322,8 +322,9 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
} }
// TODO: add staking support // TODO: add staking support
// currentNode.StakingAccount = myAccount // currentNode.StakingAccount = myAccount
utils.GetLogInstance().Info("node account set", utils.Logger().Info().
"address", common.MustAddressToBech32(currentNode.StakingAccount.Address)) Str("address", common.MustAddressToBech32(currentNode.StakingAccount.Address)).
Msg("node account set")
// TODO: refactor the creation of blockchain out of node.New() // TODO: refactor the creation of blockchain out of node.New()
currentConsensus.ChainReader = currentNode.Blockchain() currentConsensus.ChainReader = currentNode.Blockchain()
@ -373,7 +374,9 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
height := currentNode.Blockchain().CurrentBlock().NumberU64() height := currentNode.Blockchain().CurrentBlock().NumberU64()
currentConsensus.SetViewID(height) currentConsensus.SetViewID(height)
utils.GetLogInstance().Info("Init Blockchain", "height", height) utils.Logger().Info().
Uint64("height", height).
Msg("Init Blockchain")
// Assign closure functions to the consensus object // Assign closure functions to the consensus object
currentConsensus.BlockVerifier = currentNode.VerifyNewBlock currentConsensus.BlockVerifier = currentNode.VerifyNewBlock
@ -435,7 +438,10 @@ func main() {
} }
if *shardID >= 0 { if *shardID >= 0 {
utils.GetLogInstance().Info("ShardID Override", "original", initialAccount.ShardID, "override", *shardID) utils.Logger().Info().
Uint32("original", initialAccount.ShardID).
Int("override", *shardID).
Msg("ShardID Override")
initialAccount.ShardID = uint32(*shardID) initialAccount.ShardID = uint32(*shardID)
} }
@ -451,15 +457,16 @@ func main() {
if *isExplorer { if *isExplorer {
startMsg = "==== New Explorer Node ====" startMsg = "==== New Explorer Node ===="
} }
utils.GetLogInstance().Info(startMsg,
"BlsPubKey", hex.EncodeToString(nodeConfig.ConsensusPubKey.Serialize()), utils.Logger().Info().
"ShardID", nodeConfig.ShardID, Str("BlsPubKey", hex.EncodeToString(nodeConfig.ConsensusPubKey.Serialize())).
"ShardGroupID", nodeConfig.GetShardGroupID(), Uint32("ShardID", nodeConfig.ShardID).
"BeaconGroupID", nodeConfig.GetBeaconGroupID(), Str("ShardGroupID", nodeConfig.GetShardGroupID().String()).
"ClientGroupID", nodeConfig.GetClientGroupID(), Str("BeaconGroupID", nodeConfig.GetBeaconGroupID().String()).
"Role", currentNode.NodeConfig.Role(), Str("ClientGroupID", nodeConfig.GetClientGroupID().String()).
"multiaddress", fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", Str("Role", currentNode.NodeConfig.Role().String()).
*ip, *port, nodeConfig.Host.GetID().Pretty())) Str("multiaddress", fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", *ip, *port, nodeConfig.Host.GetID().Pretty())).
Msg(startMsg)
if *enableMemProfiling { if *enableMemProfiling {
memprofiling.GetMemProfiling().Start() memprofiling.GetMemProfiling().Start()

@ -50,7 +50,7 @@ func (consensus *Consensus) handleMessageUpdate(payload []byte) {
if msg.Type == msg_pb.MessageType_VIEWCHANGE || msg.Type == msg_pb.MessageType_NEWVIEW { if msg.Type == msg_pb.MessageType_VIEWCHANGE || msg.Type == msg_pb.MessageType_NEWVIEW {
if msg.GetViewchange() != nil && msg.GetViewchange().ShardId != consensus.ShardID { if msg.GetViewchange() != nil && msg.GetViewchange().ShardId != consensus.ShardID {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint32("myShardId", consensus.ShardID). Uint32("myShardId", consensus.ShardID).
Uint32("receivedShardId", msg.GetViewchange().ShardId). Uint32("receivedShardId", msg.GetViewchange().ShardId).
Msg("Received view change message from different shard") Msg("Received view change message from different shard")
@ -58,7 +58,7 @@ func (consensus *Consensus) handleMessageUpdate(payload []byte) {
} }
} else { } else {
if msg.GetConsensus() != nil && msg.GetConsensus().ShardId != consensus.ShardID { if msg.GetConsensus() != nil && msg.GetConsensus().ShardId != consensus.ShardID {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint32("myShardId", consensus.ShardID). Uint32("myShardId", consensus.ShardID).
Uint32("receivedShardId", msg.GetConsensus().ShardId). Uint32("receivedShardId", msg.GetConsensus().ShardId).
Msg("Received consensus message from different shard") Msg("Received consensus message from different shard")
@ -92,16 +92,12 @@ func (consensus *Consensus) announce(block *types.Block) {
// prepare message and broadcast to validators // prepare message and broadcast to validators
encodedBlock, err := rlp.EncodeToBytes(block) encodedBlock, err := rlp.EncodeToBytes(block)
if err != nil { if err != nil {
consensus.getLogger().Debug(). utils.Logger().Debug().Msg("[Announce] Failed encoding block")
Err(err).
Msg("[Announce] Failed encoding block")
return return
} }
encodedBlockHeader, err := rlp.EncodeToBytes(block.Header()) encodedBlockHeader, err := rlp.EncodeToBytes(block.Header())
if err != nil { if err != nil {
consensus.getLogger().Debug(). utils.Logger().Debug().Msg("[Announce] Failed encoding block header")
Err(err).
Msg("[Announce] Failed encoding block header")
return return
} }
@ -115,12 +111,12 @@ func (consensus *Consensus) announce(block *types.Block) {
_ = protobuf.Unmarshal(msgPayload, msg) _ = protobuf.Unmarshal(msgPayload, msg)
pbftMsg, err := ParsePbftMessage(msg) pbftMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[Announce] Unable to parse pbft message") utils.Logger().Warn().Err(err).Msg("[Announce] Unable to parse pbft message")
return return
} }
consensus.PbftLog.AddMessage(pbftMsg) consensus.PbftLog.AddMessage(pbftMsg)
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("MsgBlockHash", pbftMsg.BlockHash.Hex()). Str("MsgBlockHash", pbftMsg.BlockHash.Hex()).
Uint64("MsgViewID", pbftMsg.ViewID). Uint64("MsgViewID", pbftMsg.ViewID).
Uint64("MsgBlockNum", pbftMsg.BlockNum). Uint64("MsgBlockNum", pbftMsg.BlockNum).
@ -130,24 +126,24 @@ func (consensus *Consensus) announce(block *types.Block) {
// Leader sign the block hash itself // Leader sign the block hash itself
consensus.prepareSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(consensus.blockHash[:]) consensus.prepareSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(consensus.blockHash[:])
if err := consensus.prepareBitmap.SetKey(consensus.PubKey, true); err != nil { if err := consensus.prepareBitmap.SetKey(consensus.PubKey, true); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[Announce] Leader prepareBitmap SetKey failed") utils.Logger().Warn().Err(err).Msg("[Announce] Leader prepareBitmap SetKey failed")
return return
} }
// Construct broadcast p2p message // Construct broadcast p2p message
if err := consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_ANNOUNCE, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil { if err := consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_ANNOUNCE, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn(). utils.Logger().Warn().
Str("groupID", string(p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID)))). Str("groupID", string(p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID)))).
Msg("[Announce] Cannot send announce message") Msg("[Announce] Cannot send announce message")
} else { } else {
consensus.getLogger().Info(). utils.Logger().Info().
Str("blockHash", block.Hash().Hex()). Str("blockHash", block.Hash().Hex()).
Uint64("blockNum", block.NumberU64()). Uint64("blockNum", block.NumberU64()).
Msg("[Announce] Sent Announce Message!!") Msg("[Announce] Sent Announce Message!!")
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("From", consensus.phase.String()). Str("From", consensus.phase.String()).
Str("To", Prepare.String()). Str("To", Prepare.String()).
Msg("[Announce] Switching phase") Msg("[Announce] Switching phase")
@ -155,31 +151,31 @@ func (consensus *Consensus) announce(block *types.Block) {
} }
func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) { func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[OnAnnounce] Receive announce message") utils.Logger().Debug().Msg("[OnAnnounce] Receive announce message")
if consensus.IsLeader() && consensus.mode.Mode() == Normal { if consensus.IsLeader() && consensus.mode.Mode() == Normal {
return return
} }
senderKey, err := consensus.verifySenderKey(msg) senderKey, err := consensus.verifySenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnAnnounce] VerifySenderKey failed") utils.Logger().Error().Err(err).Msg("[OnAnnounce] VerifySenderKey failed")
return return
} }
if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck { if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck {
consensus.getLogger().Warn(). utils.Logger().Warn().
Str("senderKey", senderKey.SerializeToHexStr()). Str("senderKey", senderKey.SerializeToHexStr()).
Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()). Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("[OnAnnounce] SenderKey does not match leader PubKey") Msg("[OnAnnounce] SenderKey does not match leader PubKey")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnAnnounce] Failed to verify leader signature") utils.Logger().Error().Err(err).Msg("[OnAnnounce] Failed to verify leader signature")
return return
} }
recvMsg, err := ParsePbftMessage(msg) recvMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Error(). utils.Logger().Error().
Err(err). Err(err).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] Unparseable leader message") Msg("[OnAnnounce] Unparseable leader message")
@ -191,7 +187,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
header := new(block.Header) header := new(block.Header)
err = rlp.DecodeBytes(encodedHeader, header) err = rlp.DecodeBytes(encodedHeader, header)
if err != nil { if err != nil {
consensus.getLogger().Warn(). utils.Logger().Warn().
Err(err). Err(err).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] Unparseable block header data") Msg("[OnAnnounce] Unparseable block header data")
@ -199,7 +195,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
} }
if recvMsg.BlockNum < consensus.blockNum || recvMsg.BlockNum != header.Number().Uint64() { if recvMsg.BlockNum < consensus.blockNum || recvMsg.BlockNum != header.Number().Uint64() {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Uint64("hdrBlockNum", header.Number().Uint64()). Uint64("hdrBlockNum", header.Number().Uint64()).
@ -208,7 +204,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
} }
if consensus.mode.Mode() == Normal { if consensus.mode.Mode() == Normal {
if err = chain.Engine.VerifyHeader(consensus.ChainReader, header, true); err != nil { if err = chain.Engine.VerifyHeader(consensus.ChainReader, header, true); err != nil {
consensus.getLogger().Warn(). utils.Logger().Warn().
Err(err). Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()). Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()).
Str("MsgBlockNum", header.Number().String()). Str("MsgBlockNum", header.Number().String()).
@ -237,18 +233,18 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
logMsgs := consensus.PbftLog.GetMessagesByTypeSeqView(msg_pb.MessageType_ANNOUNCE, recvMsg.BlockNum, recvMsg.ViewID) logMsgs := consensus.PbftLog.GetMessagesByTypeSeqView(msg_pb.MessageType_ANNOUNCE, recvMsg.BlockNum, recvMsg.ViewID)
if len(logMsgs) > 0 { if len(logMsgs) > 0 {
if logMsgs[0].BlockHash != recvMsg.BlockHash { if logMsgs[0].BlockHash != recvMsg.BlockHash {
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()). Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("[OnAnnounce] Leader is malicious") Msg("[OnAnnounce] Leader is malicious")
consensus.startViewChange(consensus.viewID + 1) consensus.startViewChange(consensus.viewID + 1)
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()). Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("[OnAnnounce] Announce message received again") Msg("[OnAnnounce] Announce message received again")
//return //return
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] Announce message Added") Msg("[OnAnnounce] Announce message Added")
@ -261,13 +257,13 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
// we have already added message and block, skip check viewID and send prepare message if is in ViewChanging mode // we have already added message and block, skip check viewID and send prepare message if is in ViewChanging mode
if consensus.mode.Mode() == ViewChanging { if consensus.mode.Mode() == ViewChanging {
consensus.getLogger().Debug().Msg("[OnAnnounce] Still in ViewChanging Mode, Exiting !!") utils.Logger().Debug().Msg("[OnAnnounce] Still in ViewChanging Mode, Exiting !!")
return return
} }
if consensus.checkViewID(recvMsg) != nil { if consensus.checkViewID(recvMsg) != nil {
if consensus.mode.Mode() == Normal { if consensus.mode.Mode() == Normal {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] ViewID check failed") Msg("[OnAnnounce] ViewID check failed")
@ -286,13 +282,13 @@ func (consensus *Consensus) prepare() {
// TODO: this will not return immediatey, may block // TODO: this will not return immediatey, may block
if err := consensus.msgSender.SendWithoutRetry([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil { if err := consensus.msgSender.SendWithoutRetry([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnAnnounce] Cannot send prepare message") utils.Logger().Warn().Err(err).Msg("[OnAnnounce] Cannot send prepare message")
} else { } else {
consensus.getLogger().Info(). utils.Logger().Info().
Str("blockHash", hex.EncodeToString(consensus.blockHash[:])). Str("blockHash", hex.EncodeToString(consensus.blockHash[:])).
Msg("[OnAnnounce] Sent Prepare Message!!") Msg("[OnAnnounce] Sent Prepare Message!!")
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("From", consensus.phase.String()). Str("From", consensus.phase.String()).
Str("To", Prepare.String()). Str("To", Prepare.String()).
Msg("[Announce] Switching Phase") Msg("[Announce] Switching Phase")
@ -307,22 +303,22 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
senderKey, err := consensus.verifySenderKey(msg) senderKey, err := consensus.verifySenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] VerifySenderKey failed") utils.Logger().Error().Err(err).Msg("[OnPrepare] VerifySenderKey failed")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] Failed to verify sender's signature") utils.Logger().Error().Err(err).Msg("[OnPrepare] Failed to verify sender's signature")
return return
} }
recvMsg, err := ParsePbftMessage(msg) recvMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] Unparseable validator message") utils.Logger().Error().Err(err).Msg("[OnPrepare] Unparseable validator message")
return return
} }
if recvMsg.ViewID != consensus.viewID || recvMsg.BlockNum != consensus.blockNum { if recvMsg.ViewID != consensus.viewID || recvMsg.BlockNum != consensus.blockNum {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
@ -331,7 +327,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
} }
if !consensus.PbftLog.HasMatchingViewAnnounce(consensus.blockNum, consensus.viewID, recvMsg.BlockHash) { if !consensus.PbftLog.HasMatchingViewAnnounce(consensus.blockNum, consensus.viewID, recvMsg.BlockHash) {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
@ -347,7 +343,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
consensus.mutex.Lock() consensus.mutex.Lock()
defer consensus.mutex.Unlock() defer consensus.mutex.Unlock()
logger := consensus.getLogger().With().Str("validatorPubKey", validatorPubKey).Logger() logger := utils.Logger().With().Str("validatorPubKey", validatorPubKey).Logger()
if len(prepareSigs) >= consensus.Quorum() { if len(prepareSigs) >= consensus.Quorum() {
// already have enough signatures // already have enough signatures
logger.Debug().Msg("[OnPrepare] Received Additional Prepare Message") logger.Debug().Msg("[OnPrepare] Received Additional Prepare Message")
@ -364,11 +360,11 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
var sign bls.Sign var sign bls.Sign
err = sign.Deserialize(prepareSig) err = sign.Deserialize(prepareSig)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] Failed to deserialize bls signature") utils.Logger().Error().Err(err).Msg("[OnPrepare] Failed to deserialize bls signature")
return return
} }
if !sign.VerifyHash(recvMsg.SenderPubkey, consensus.blockHash[:]) { if !sign.VerifyHash(recvMsg.SenderPubkey, consensus.blockHash[:]) {
consensus.getLogger().Error().Msg("[OnPrepare] Received invalid BLS signature") utils.Logger().Error().Msg("[OnPrepare] Received invalid BLS signature")
return return
} }
@ -377,7 +373,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
prepareSigs[validatorPubKey] = &sign prepareSigs[validatorPubKey] = &sign
// Set the bitmap indicating that this validator signed. // Set the bitmap indicating that this validator signed.
if err := prepareBitmap.SetKey(recvMsg.SenderPubkey, true); err != nil { if err := prepareBitmap.SetKey(recvMsg.SenderPubkey, true); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnPrepare] prepareBitmap.SetKey failed") utils.Logger().Warn().Err(err).Msg("[OnPrepare] prepareBitmap.SetKey failed")
return return
} }
@ -393,7 +389,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
_ = protobuf.Unmarshal(msgPayload, msg) _ = protobuf.Unmarshal(msgPayload, msg)
pbftMsg, err := ParsePbftMessage(msg) pbftMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnPrepare] Unable to parse pbft message") utils.Logger().Warn().Err(err).Msg("[OnPrepare] Unable to parse pbft message")
return return
} }
consensus.PbftLog.AddMessage(pbftMsg) consensus.PbftLog.AddMessage(pbftMsg)
@ -404,22 +400,22 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
commitPayload := append(blockNumHash, consensus.blockHash[:]...) commitPayload := append(blockNumHash, consensus.blockHash[:]...)
consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload) consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload)
if err := consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil { if err := consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil {
consensus.getLogger().Debug().Msg("[OnPrepare] Leader commit bitmap set failed") utils.Logger().Debug().Msg("[OnPrepare] Leader commit bitmap set failed")
return return
} }
if err := consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_PREPARED, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil { if err := consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_PREPARED, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Msg("[OnPrepare] Cannot send prepared message") utils.Logger().Warn().Msg("[OnPrepare] Cannot send prepared message")
} else { } else {
consensus.getLogger().Debug(). utils.Logger().Debug().
Bytes("blockHash", consensus.blockHash[:]). Hex("blockHash", consensus.blockHash[:]).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Msg("[OnPrepare] Sent Prepared Message!!") Msg("[OnPrepare] Sent Prepared Message!!")
} }
consensus.msgSender.StopRetry(msg_pb.MessageType_ANNOUNCE) consensus.msgSender.StopRetry(msg_pb.MessageType_ANNOUNCE)
consensus.msgSender.StopRetry(msg_pb.MessageType_COMMITTED) // Stop retry committed msg of last consensus consensus.msgSender.StopRetry(msg_pb.MessageType_COMMITTED) // Stop retry committed msg of last consensus
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("From", consensus.phase.String()). Str("From", consensus.phase.String()).
Str("To", Commit.String()). Str("To", Commit.String()).
Msg("[OnPrepare] Switching phase") Msg("[OnPrepare] Switching phase")
@ -429,37 +425,37 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
} }
func (consensus *Consensus) onPrepared(msg *msg_pb.Message) { func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[OnPrepared] Received Prepared message") utils.Logger().Debug().Msg("[OnPrepared] Received Prepared message")
if consensus.IsLeader() && consensus.mode.Mode() == Normal { if consensus.IsLeader() && consensus.mode.Mode() == Normal {
return return
} }
senderKey, err := consensus.verifySenderKey(msg) senderKey, err := consensus.verifySenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnPrepared] VerifySenderKey failed") utils.Logger().Debug().Err(err).Msg("[OnPrepared] VerifySenderKey failed")
return return
} }
if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck { if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck {
consensus.getLogger().Warn().Msg("[OnPrepared] SenderKey not match leader PubKey") utils.Logger().Warn().Msg("[OnPrepared] SenderKey not match leader PubKey")
return return
} }
if err := verifyMessageSig(senderKey, msg); err != nil { if err := verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnPrepared] Failed to verify sender's signature") utils.Logger().Debug().Err(err).Msg("[OnPrepared] Failed to verify sender's signature")
return return
} }
recvMsg, err := ParsePbftMessage(msg) recvMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnPrepared] Unparseable validator message") utils.Logger().Debug().Err(err).Msg("[OnPrepared] Unparseable validator message")
return return
} }
consensus.getLogger().Info(). utils.Logger().Info().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Msg("[OnPrepared] Received prepared message") Msg("[OnPrepared] Received prepared message")
if recvMsg.BlockNum < consensus.blockNum { if recvMsg.BlockNum < consensus.blockNum {
consensus.getLogger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("Old Block Received, ignoring!!") utils.Logger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("Old Block Received, ignoring!!")
return return
} }
@ -467,11 +463,11 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
blockHash := recvMsg.BlockHash blockHash := recvMsg.BlockHash
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 0) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 0)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("ReadSignatureBitmapPayload failed!!") utils.Logger().Error().Err(err).Msg("ReadSignatureBitmapPayload failed!!")
return return
} }
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() { if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug(). utils.Logger().Debug().
Int("Need", consensus.Quorum()). Int("Need", consensus.Quorum()).
Int("Got", count). Int("Got", count).
Msg("Not enough signatures in the Prepared msg") Msg("Not enough signatures in the Prepared msg")
@ -480,7 +476,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) { if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) {
myBlockHash := common.Hash{} myBlockHash := common.Hash{}
myBlockHash.SetBytes(consensus.blockHash[:]) myBlockHash.SetBytes(consensus.blockHash[:])
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Msg("[OnPrepared] failed to verify multi signature for prepare phase") Msg("[OnPrepared] failed to verify multi signature for prepare phase")
@ -492,30 +488,30 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
var blockObj types.Block var blockObj types.Block
err = rlp.DecodeBytes(block, &blockObj) err = rlp.DecodeBytes(block, &blockObj)
if err != nil { if err != nil {
consensus.getLogger().Warn(). utils.Logger().Warn().
Err(err). Err(err).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnPrepared] Unparseable block header data") Msg("[OnPrepared] Unparseable block header data")
return return
} }
if blockObj.NumberU64() != recvMsg.BlockNum || recvMsg.BlockNum < consensus.blockNum { if blockObj.NumberU64() != recvMsg.BlockNum || recvMsg.BlockNum < consensus.blockNum {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", blockObj.NumberU64()). Uint64("blockNum", blockObj.NumberU64()).
Msg("[OnPrepared] BlockNum not match") Msg("[OnPrepared] BlockNum not match")
return return
} }
if blockObj.Header().Hash() != recvMsg.BlockHash { if blockObj.Header().Hash() != recvMsg.BlockHash {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Bytes("MsgBlockHash", recvMsg.BlockHash[:]). Hex("MsgBlockHash", recvMsg.BlockHash[:]).
Str("blockObjHash", blockObj.Header().Hash().Hex()). Str("blockObjHash", blockObj.Header().Hash().Hex()).
Msg("[OnPrepared] BlockHash not match") Msg("[OnPrepared] BlockHash not match")
return return
} }
if consensus.mode.Mode() == Normal { if consensus.mode.Mode() == Normal {
if err := chain.Engine.VerifyHeader(consensus.ChainReader, blockObj.Header(), true); err != nil { if err := chain.Engine.VerifyHeader(consensus.ChainReader, blockObj.Header(), true); err != nil {
consensus.getLogger().Warn(). utils.Logger().Error().
Err(err). Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()). Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()).
Str("MsgBlockNum", blockObj.Header().Number().String()). Str("MsgBlockNum", blockObj.Header().Number().String()).
@ -525,7 +521,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
if consensus.BlockVerifier == nil { if consensus.BlockVerifier == nil {
// do nothing // do nothing
} else if err := consensus.BlockVerifier(&blockObj); err != nil { } else if err := consensus.BlockVerifier(&blockObj); err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepared] Block verification failed") utils.Logger().Error().Err(err).Msg("[OnPrepared] Block verification failed")
return return
} }
} }
@ -533,10 +529,10 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
consensus.PbftLog.AddBlock(&blockObj) consensus.PbftLog.AddBlock(&blockObj)
recvMsg.Block = []byte{} // save memory space recvMsg.Block = []byte{} // save memory space
consensus.PbftLog.AddMessage(recvMsg) consensus.PbftLog.AddMessage(recvMsg)
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Bytes("blockHash", recvMsg.BlockHash[:]). Hex("blockHash", recvMsg.BlockHash[:]).
Msg("[OnPrepared] Prepared message and block added") Msg("[OnPrepared] Prepared message and block added")
consensus.mutex.Lock() consensus.mutex.Lock()
@ -544,13 +540,13 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
consensus.tryCatchup() consensus.tryCatchup()
if consensus.mode.Mode() == ViewChanging { if consensus.mode.Mode() == ViewChanging {
consensus.getLogger().Debug().Msg("[OnPrepared] Still in ViewChanging mode, Exiting!!") utils.Logger().Debug().Msg("[OnPrepared] Still in ViewChanging mode, Exiting!!")
return return
} }
if consensus.checkViewID(recvMsg) != nil { if consensus.checkViewID(recvMsg) != nil {
if consensus.mode.Mode() == Normal { if consensus.mode.Mode() == Normal {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnPrepared] ViewID check failed") Msg("[OnPrepared] ViewID check failed")
@ -558,7 +554,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
return return
} }
if recvMsg.BlockNum > consensus.blockNum { if recvMsg.BlockNum > consensus.blockNum {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Msg("[OnPrepared] Future Block Received, ignoring!!") Msg("[OnPrepared] Future Block Received, ignoring!!")
@ -593,15 +589,15 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
} }
if err := consensus.msgSender.SendWithoutRetry([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil { if err := consensus.msgSender.SendWithoutRetry([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Msg("[OnPrepared] Cannot send commit message!!") utils.Logger().Warn().Msg("[OnPrepared] Cannot send commit message!!")
} else { } else {
consensus.getLogger().Info(). utils.Logger().Info().
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Bytes("blockHash", consensus.blockHash[:]). Hex("blockHash", consensus.blockHash[:]).
Msg("[OnPrepared] Sent Commit Message!!") Msg("[OnPrepared] Sent Commit Message!!")
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("From", consensus.phase.String()). Str("From", consensus.phase.String()).
Str("To", Commit.String()). Str("To", Commit.String()).
Msg("[OnPrepared] Switching phase") Msg("[OnPrepared] Switching phase")
@ -618,22 +614,22 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
senderKey, err := consensus.verifySenderKey(msg) senderKey, err := consensus.verifySenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnCommit] VerifySenderKey Failed") utils.Logger().Debug().Err(err).Msg("[OnCommit] VerifySenderKey Failed")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnCommit] Failed to verify sender's signature") utils.Logger().Debug().Err(err).Msg("[OnCommit] Failed to verify sender's signature")
return return
} }
recvMsg, err := ParsePbftMessage(msg) recvMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnCommit] Parse pbft message failed") utils.Logger().Debug().Err(err).Msg("[OnCommit] Parse pbft message failed")
return return
} }
if recvMsg.ViewID != consensus.viewID || recvMsg.BlockNum != consensus.blockNum { if recvMsg.ViewID != consensus.viewID || recvMsg.BlockNum != consensus.blockNum {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
@ -643,8 +639,8 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
} }
if !consensus.PbftLog.HasMatchingAnnounce(consensus.blockNum, recvMsg.BlockHash) { if !consensus.PbftLog.HasMatchingAnnounce(consensus.blockNum, recvMsg.BlockHash) {
consensus.getLogger().Debug(). utils.Logger().Debug().
Bytes("MsgBlockHash", recvMsg.BlockHash[:]). Hex("MsgBlockHash", recvMsg.BlockHash[:]).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Msg("[OnCommit] Cannot find matching blockhash") Msg("[OnCommit] Cannot find matching blockhash")
@ -652,8 +648,8 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
} }
if !consensus.PbftLog.HasMatchingPrepared(consensus.blockNum, recvMsg.BlockHash) { if !consensus.PbftLog.HasMatchingPrepared(consensus.blockNum, recvMsg.BlockHash) {
consensus.getLogger().Debug(). utils.Logger().Debug().
Bytes("blockHash", recvMsg.BlockHash[:]). Hex("blockHash", recvMsg.BlockHash[:]).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Msg("[OnCommit] Cannot find matching prepared message") Msg("[OnCommit] Cannot find matching prepared message")
return return
@ -666,7 +662,7 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
consensus.mutex.Lock() consensus.mutex.Lock()
defer consensus.mutex.Unlock() defer consensus.mutex.Unlock()
logger := consensus.getLogger().With().Str("validatorPubKey", validatorPubKey).Logger() logger := utils.Logger().With().Str("validatorPubKey", validatorPubKey).Logger()
if !consensus.IsValidatorInCommittee(recvMsg.SenderPubkey) { if !consensus.IsValidatorInCommittee(recvMsg.SenderPubkey) {
logger.Error().Msg("[OnCommit] Invalid validator") logger.Error().Msg("[OnCommit] Invalid validator")
return return
@ -705,7 +701,7 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
commitSigs[validatorPubKey] = &sign commitSigs[validatorPubKey] = &sign
// Set the bitmap indicating that this validator signed. // Set the bitmap indicating that this validator signed.
if err := commitBitmap.SetKey(recvMsg.SenderPubkey, true); err != nil { if err := commitBitmap.SetKey(recvMsg.SenderPubkey, true); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnCommit] commitBitmap.SetKey failed") utils.Logger().Warn().Err(err).Msg("[OnCommit] commitBitmap.SetKey failed")
return return
} }
@ -732,7 +728,7 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
} }
func (consensus *Consensus) finalizeCommits() { func (consensus *Consensus) finalizeCommits() {
consensus.getLogger().Info().Int("NumCommits", len(consensus.commitSigs)).Msg("[Finalizing] Finalizing Block") utils.Logger().Info().Int("NumCommits", len(consensus.commitSigs)).Msg("[Finalizing] Finalizing Block")
beforeCatchupNum := consensus.blockNum beforeCatchupNum := consensus.blockNum
//beforeCatchupViewID := consensus.viewID //beforeCatchupViewID := consensus.viewID
@ -747,7 +743,7 @@ func (consensus *Consensus) finalizeCommits() {
_ = protobuf.Unmarshal(msgPayload, msg) _ = protobuf.Unmarshal(msgPayload, msg)
pbftMsg, err := ParsePbftMessage(msg) pbftMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[FinalizeCommits] Unable to parse pbft message") utils.Logger().Warn().Err(err).Msg("[FinalizeCommits] Unable to parse pbft message")
return return
} }
consensus.PbftLog.AddMessage(pbftMsg) consensus.PbftLog.AddMessage(pbftMsg)
@ -756,14 +752,14 @@ func (consensus *Consensus) finalizeCommits() {
// find correct block content // find correct block content
block := consensus.PbftLog.GetBlockByHash(consensus.blockHash) block := consensus.PbftLog.GetBlockByHash(consensus.blockHash)
if block == nil { if block == nil {
consensus.getLogger().Warn(). utils.Logger().Warn().
Str("blockHash", hex.EncodeToString(consensus.blockHash[:])). Str("blockHash", hex.EncodeToString(consensus.blockHash[:])).
Msg("[FinalizeCommits] Cannot find block by hash") Msg("[FinalizeCommits] Cannot find block by hash")
return return
} }
consensus.tryCatchup() consensus.tryCatchup()
if consensus.blockNum-beforeCatchupNum != 1 { if consensus.blockNum-beforeCatchupNum != 1 {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint64("beforeCatchupBlockNum", beforeCatchupNum). Uint64("beforeCatchupBlockNum", beforeCatchupNum).
Msg("[FinalizeCommits] Leader cannot provide the correct block for committed message") Msg("[FinalizeCommits] Leader cannot provide the correct block for committed message")
return return
@ -771,10 +767,10 @@ func (consensus *Consensus) finalizeCommits() {
// if leader success finalize the block, send committed message to validators // if leader success finalize the block, send committed message to validators
if err := consensus.msgSender.SendWithRetry(block.NumberU64(), msg_pb.MessageType_COMMITTED, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil { if err := consensus.msgSender.SendWithRetry(block.NumberU64(), msg_pb.MessageType_COMMITTED, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[Finalizing] Cannot send committed message") utils.Logger().Warn().Err(err).Msg("[Finalizing] Cannot send committed message")
} else { } else {
consensus.getLogger().Info(). utils.Logger().Info().
Bytes("blockHash", consensus.blockHash[:]). Hex("blockHash", consensus.blockHash[:]).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Msg("[Finalizing] Sent Committed Message") Msg("[Finalizing] Sent Committed Message")
} }
@ -789,13 +785,13 @@ func (consensus *Consensus) finalizeCommits() {
if consensus.consensusTimeout[timeoutBootstrap].IsActive() { if consensus.consensusTimeout[timeoutBootstrap].IsActive() {
consensus.consensusTimeout[timeoutBootstrap].Stop() consensus.consensusTimeout[timeoutBootstrap].Stop()
consensus.getLogger().Debug().Msg("[Finalizing] Start consensus timer; stop bootstrap timer only once") utils.Logger().Debug().Msg("[Finalizing] Start consensus timer; stop bootstrap timer only once")
} else { } else {
consensus.getLogger().Debug().Msg("[Finalizing] Start consensus timer") utils.Logger().Debug().Msg("[Finalizing] Start consensus timer")
} }
consensus.consensusTimeout[timeoutConsensus].Start() consensus.consensusTimeout[timeoutConsensus].Start()
consensus.getLogger().Info(). utils.Logger().Info().
Uint64("blockNum", block.NumberU64()). Uint64("blockNum", block.NumberU64()).
Uint64("ViewId", block.Header().ViewID().Uint64()). Uint64("ViewId", block.Header().ViewID().Uint64()).
Str("blockHash", block.Hash().String()). Str("blockHash", block.Hash().String()).
@ -809,7 +805,7 @@ func (consensus *Consensus) finalizeCommits() {
} }
func (consensus *Consensus) onCommitted(msg *msg_pb.Message) { func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[OnCommitted] Receive committed message") utils.Logger().Debug().Msg("[OnCommitted] Receive committed message")
if consensus.IsLeader() && consensus.mode.Mode() == Normal { if consensus.IsLeader() && consensus.mode.Mode() == Normal {
return return
@ -817,26 +813,26 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
senderKey, err := consensus.verifySenderKey(msg) senderKey, err := consensus.verifySenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnCommitted] verifySenderKey failed") utils.Logger().Warn().Err(err).Msg("[OnCommitted] verifySenderKey failed")
return return
} }
if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck { if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck {
consensus.getLogger().Warn().Msg("[OnCommitted] senderKey not match leader PubKey") utils.Logger().Warn().Msg("[OnCommitted] senderKey not match leader PubKey")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnCommitted] Failed to verify sender's signature") utils.Logger().Warn().Err(err).Msg("[OnCommitted] Failed to verify sender's signature")
return return
} }
recvMsg, err := ParsePbftMessage(msg) recvMsg, err := ParsePbftMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Msg("[OnCommitted] unable to parse msg") utils.Logger().Warn().Msg("[OnCommitted] unable to parse msg")
return return
} }
if recvMsg.BlockNum < consensus.blockNum { if recvMsg.BlockNum < consensus.blockNum {
consensus.getLogger().Info(). utils.Logger().Info().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Msg("[OnCommitted] Received Old Blocks!!") Msg("[OnCommitted] Received Old Blocks!!")
@ -845,13 +841,13 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 0) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 0)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnCommitted] readSignatureBitmapPayload failed") utils.Logger().Error().Err(err).Msg("[OnCommitted] readSignatureBitmapPayload failed")
return return
} }
// check has 2f+1 signatures // check has 2f+1 signatures
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() { if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Warn(). utils.Logger().Warn().
Int("need", consensus.Quorum()). Int("need", consensus.Quorum()).
Int("got", count). Int("got", count).
Msg("[OnCommitted] Not enough signature in committed msg") Msg("[OnCommitted] Not enough signature in committed msg")
@ -862,7 +858,7 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
binary.LittleEndian.PutUint64(blockNumBytes, recvMsg.BlockNum) binary.LittleEndian.PutUint64(blockNumBytes, recvMsg.BlockNum)
commitPayload := append(blockNumBytes, recvMsg.BlockHash[:]...) commitPayload := append(blockNumBytes, recvMsg.BlockHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) { if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) {
consensus.getLogger().Error(). utils.Logger().Error().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnCommitted] Failed to verify the multi signature for commit phase") Msg("[OnCommitted] Failed to verify the multi signature for commit phase")
return return
@ -870,7 +866,7 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
consensus.PbftLog.AddMessage(recvMsg) consensus.PbftLog.AddMessage(recvMsg)
consensus.ChainReader.WriteLastCommits(recvMsg.Payload) consensus.ChainReader.WriteLastCommits(recvMsg.Payload)
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnCommitted] Committed message added") Msg("[OnCommitted] Committed message added")
@ -882,7 +878,7 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
consensus.commitBitmap = mask consensus.commitBitmap = mask
if recvMsg.BlockNum-consensus.blockNum > consensusBlockNumBuffer { if recvMsg.BlockNum-consensus.blockNum > consensusBlockNumBuffer {
consensus.getLogger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("[OnCommitted] out of sync") utils.Logger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("[OnCommitted] out of sync")
go func() { go func() {
select { select {
case consensus.blockNumLowChan <- struct{}{}: case consensus.blockNumLowChan <- struct{}{}:
@ -897,21 +893,21 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
} }
// if consensus.checkViewID(recvMsg) != nil { // if consensus.checkViewID(recvMsg) != nil {
// consensus.getLogger().Debug("viewID check failed", "viewID", recvMsg.ViewID, "myViewID", consensus.viewID) // utils.Logger().Debug("viewID check failed", "viewID", recvMsg.ViewID, "myViewID", consensus.viewID)
// return // return
// } // }
consensus.tryCatchup() consensus.tryCatchup()
if consensus.mode.Mode() == ViewChanging { if consensus.mode.Mode() == ViewChanging {
consensus.getLogger().Debug().Msg("[OnCommitted] Still in ViewChanging mode, Exiting!!") utils.Logger().Debug().Msg("[OnCommitted] Still in ViewChanging mode, Exiting!!")
return return
} }
if consensus.consensusTimeout[timeoutBootstrap].IsActive() { if consensus.consensusTimeout[timeoutBootstrap].IsActive() {
consensus.consensusTimeout[timeoutBootstrap].Stop() consensus.consensusTimeout[timeoutBootstrap].Stop()
consensus.getLogger().Debug().Msg("[OnCommitted] Start consensus timer; stop bootstrap timer only once") utils.Logger().Debug().Msg("[OnCommitted] Start consensus timer; stop bootstrap timer only once")
} else { } else {
consensus.getLogger().Debug().Msg("[OnCommitted] Start consensus timer") utils.Logger().Debug().Msg("[OnCommitted] Start consensus timer")
} }
consensus.consensusTimeout[timeoutConsensus].Start() consensus.consensusTimeout[timeoutConsensus].Start()
return return
@ -943,7 +939,7 @@ func (consensus *Consensus) LastCommitSig() ([]byte, []byte, error) {
// try to catch up if fall behind // try to catch up if fall behind
func (consensus *Consensus) tryCatchup() { func (consensus *Consensus) tryCatchup() {
consensus.getLogger().Info().Msg("[TryCatchup] commit new blocks") utils.Logger().Info().Msg("[TryCatchup] commit new blocks")
// if consensus.phase != Commit && consensus.mode.Mode() == Normal { // if consensus.phase != Commit && consensus.mode.Mode() == Normal {
// return // return
// } // }
@ -954,11 +950,11 @@ func (consensus *Consensus) tryCatchup() {
break break
} }
if len(msgs) > 1 { if len(msgs) > 1 {
consensus.getLogger().Error(). utils.Logger().Error().
Int("numMsgs", len(msgs)). Int("numMsgs", len(msgs)).
Msg("[TryCatchup] DANGER!!! we should only get one committed message for a given blockNum") Msg("[TryCatchup] DANGER!!! we should only get one committed message for a given blockNum")
} }
consensus.getLogger().Info().Msg("[TryCatchup] committed message found") utils.Logger().Info().Msg("[TryCatchup] committed message found")
block := consensus.PbftLog.GetBlockByHash(msgs[0].BlockHash) block := consensus.PbftLog.GetBlockByHash(msgs[0].BlockHash)
if block == nil { if block == nil {
@ -968,36 +964,36 @@ func (consensus *Consensus) tryCatchup() {
if consensus.BlockVerifier == nil { if consensus.BlockVerifier == nil {
// do nothing // do nothing
} else if err := consensus.BlockVerifier(block); err != nil { } else if err := consensus.BlockVerifier(block); err != nil {
consensus.getLogger().Info().Err(err).Msg("[TryCatchup]block verification faied") utils.Logger().Info().Msg("[TryCatchup]block verification faied")
return return
} }
if block.ParentHash() != consensus.ChainReader.CurrentHeader().Hash() { if block.ParentHash() != consensus.ChainReader.CurrentHeader().Hash() {
consensus.getLogger().Debug().Msg("[TryCatchup] parent block hash not match") utils.Logger().Debug().Msg("[TryCatchup] parent block hash not match")
break break
} }
consensus.getLogger().Info().Msg("[TryCatchup] block found to commit") utils.Logger().Info().Msg("[TryCatchup] block found to commit")
preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeqHash(msg_pb.MessageType_PREPARED, msgs[0].BlockNum, msgs[0].BlockHash) preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeqHash(msg_pb.MessageType_PREPARED, msgs[0].BlockNum, msgs[0].BlockHash)
msg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs) msg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs)
if msg == nil { if msg == nil {
break break
} }
consensus.getLogger().Info().Msg("[TryCatchup] prepared message found to commit") utils.Logger().Info().Msg("[TryCatchup] prepared message found to commit")
consensus.blockHash = [32]byte{} consensus.blockHash = [32]byte{}
consensus.blockNum = consensus.blockNum + 1 consensus.blockNum = consensus.blockNum + 1
consensus.viewID = msgs[0].ViewID + 1 consensus.viewID = msgs[0].ViewID + 1
consensus.LeaderPubKey = msgs[0].SenderPubkey consensus.LeaderPubKey = msgs[0].SenderPubkey
consensus.getLogger().Info().Msg("[TryCatchup] Adding block to chain") utils.Logger().Info().Msg("[TryCatchup] Adding block to chain")
consensus.OnConsensusDone(block, msgs[0].Payload) consensus.OnConsensusDone(block, msgs[0].Payload)
consensus.ResetState() consensus.ResetState()
select { select {
case consensus.VerifiedNewBlock <- block: case consensus.VerifiedNewBlock <- block:
default: default:
consensus.getLogger().Info(). utils.Logger().Info().
Str("blockHash", block.Hash().String()). Str("blockHash", block.Hash().String()).
Msg("[TryCatchup] consensus verified block send to chan failed") Msg("[TryCatchup] consensus verified block send to chan failed")
continue continue
@ -1006,7 +1002,7 @@ func (consensus *Consensus) tryCatchup() {
break break
} }
if currentBlockNum < consensus.blockNum { if currentBlockNum < consensus.blockNum {
consensus.getLogger().Info(). utils.Logger().Info().
Uint64("From", currentBlockNum). Uint64("From", currentBlockNum).
Uint64("To", consensus.blockNum). Uint64("To", consensus.blockNum).
Msg("[TryCatchup] Caught up!") Msg("[TryCatchup] Caught up!")
@ -1026,21 +1022,21 @@ func (consensus *Consensus) tryCatchup() {
func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan struct{}, stoppedChan chan struct{}, startChannel chan struct{}) { func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan struct{}, stoppedChan chan struct{}, startChannel chan struct{}) {
go func() { go func() {
if consensus.IsLeader() { if consensus.IsLeader() {
consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Waiting for consensus start") utils.Logger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Waiting for consensus start")
<-startChannel <-startChannel
// send a signal to indicate it's ready to run consensus // send a signal to indicate it's ready to run consensus
// this signal is consumed by node object to create a new block and in turn trigger a new consensus on it // this signal is consumed by node object to create a new block and in turn trigger a new consensus on it
go func() { go func() {
consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Send ReadySignal") utils.Logger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Send ReadySignal")
consensus.ReadySignal <- struct{}{} consensus.ReadySignal <- struct{}{}
}() }()
} }
consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started") utils.Logger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started")
defer close(stoppedChan) defer close(stoppedChan)
ticker := time.NewTicker(3 * time.Second) ticker := time.NewTicker(3 * time.Second)
consensus.consensusTimeout[timeoutBootstrap].Start() consensus.consensusTimeout[timeoutBootstrap].Start()
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("viewID", consensus.viewID). Uint64("viewID", consensus.viewID).
Uint64("blockNum", consensus.blockNum). Uint64("blockNum", consensus.blockNum).
Msg("[ConsensusMainLoop] Start bootstrap timeout (only once)") Msg("[ConsensusMainLoop] Start bootstrap timeout (only once)")
@ -1057,11 +1053,11 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
continue continue
} }
if k != timeoutViewChange { if k != timeoutViewChange {
consensus.getLogger().Debug().Msg("[ConsensusMainLoop] Ops Consensus Timeout!!!") utils.Logger().Debug().Msg("[ConsensusMainLoop] Ops Consensus Timeout!!!")
consensus.startViewChange(consensus.viewID + 1) consensus.startViewChange(consensus.viewID + 1)
break break
} else { } else {
consensus.getLogger().Debug().Msg("[ConsensusMainLoop] Ops View Change Timeout!!!") utils.Logger().Debug().Msg("[ConsensusMainLoop] Ops View Change Timeout!!!")
viewID := consensus.mode.ViewID() viewID := consensus.mode.ViewID()
consensus.startViewChange(viewID + 1) consensus.startViewChange(viewID + 1)
break break
@ -1072,15 +1068,15 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
consensus.SetViewID(consensus.ChainReader.CurrentHeader().ViewID().Uint64() + 1) consensus.SetViewID(consensus.ChainReader.CurrentHeader().ViewID().Uint64() + 1)
mode := consensus.UpdateConsensusInformation() mode := consensus.UpdateConsensusInformation()
consensus.mode.SetMode(mode) consensus.mode.SetMode(mode)
consensus.getLogger().Info().Str("Mode", mode.String()).Msg("Node is in sync") utils.Logger().Info().Str("Mode", mode.String()).Msg("Node is in sync")
case <-consensus.syncNotReadyChan: case <-consensus.syncNotReadyChan:
consensus.SetBlockNum(consensus.ChainReader.CurrentHeader().Number().Uint64() + 1) consensus.SetBlockNum(consensus.ChainReader.CurrentHeader().Number().Uint64() + 1)
consensus.mode.SetMode(Syncing) consensus.mode.SetMode(Syncing)
consensus.getLogger().Info().Msg("Node is out of sync") utils.Logger().Info().Msg("Node is out of sync")
case newBlock := <-blockChannel: case newBlock := <-blockChannel:
consensus.getLogger().Info(). utils.Logger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()). Uint64("MsgBlockNum", newBlock.NumberU64()).
Msg("[ConsensusMainLoop] Received Proposed New Block!") Msg("[ConsensusMainLoop] Received Proposed New Block!")
@ -1163,7 +1159,7 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
startTime = time.Now() startTime = time.Now()
consensus.msgSender.Reset(newBlock.NumberU64()) consensus.msgSender.Reset(newBlock.NumberU64())
consensus.getLogger().Debug(). utils.Logger().Debug().
Int("numTxs", len(newBlock.Transactions())). Int("numTxs", len(newBlock.Transactions())).
Time("startTime", startTime). Time("startTime", startTime).
Int("publicKeys", len(consensus.PublicKeys)). Int("publicKeys", len(consensus.PublicKeys)).

@ -42,8 +42,8 @@ func (consensus *Consensus) constructViewChangeMessage() []byte {
vcMsg.Payload = append(msgToSign[:0:0], msgToSign...) vcMsg.Payload = append(msgToSign[:0:0], msgToSign...)
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Bytes("m1Payload", vcMsg.Payload). Hex("m1Payload", vcMsg.Payload).
Str("pubKey", consensus.PubKey.SerializeToHexStr()). Str("pubKey", consensus.PubKey.SerializeToHexStr()).
Msg("[constructViewChangeMessage]") Msg("[constructViewChangeMessage]")
@ -89,7 +89,7 @@ func (consensus *Consensus) constructNewViewMessage() []byte {
vcMsg.Payload = consensus.m1Payload vcMsg.Payload = consensus.m1Payload
sig2arr := consensus.GetNilSigsArray() sig2arr := consensus.GetNilSigsArray()
consensus.getLogger().Debug().Int("len", len(sig2arr)).Msg("[constructNewViewMessage] M2 (NIL) type signatures") utils.Logger().Debug().Int("len", len(sig2arr)).Msg("[constructNewViewMessage] M2 (NIL) type signatures")
if len(sig2arr) > 0 { if len(sig2arr) > 0 {
m2Sig := bls_cosi.AggregateSig(sig2arr) m2Sig := bls_cosi.AggregateSig(sig2arr)
vcMsg.M2Aggsigs = m2Sig.Serialize() vcMsg.M2Aggsigs = m2Sig.Serialize()
@ -97,7 +97,7 @@ func (consensus *Consensus) constructNewViewMessage() []byte {
} }
sig3arr := consensus.GetViewIDSigsArray() sig3arr := consensus.GetViewIDSigsArray()
consensus.getLogger().Debug().Int("len", len(sig3arr)).Msg("[constructNewViewMessage] M3 (ViewID) type signatures") utils.Logger().Debug().Int("len", len(sig3arr)).Msg("[constructNewViewMessage] M3 (ViewID) type signatures")
// even we check here for safty, m3 type signatures must >= 2f+1 // even we check here for safty, m3 type signatures must >= 2f+1
if len(sig3arr) > 0 { if len(sig3arr) > 0 {
m3Sig := bls_cosi.AggregateSig(sig3arr) m3Sig := bls_cosi.AggregateSig(sig3arr)

@ -123,7 +123,7 @@ func (consensus *Consensus) switchPhase(desirePhase PbftPhase, override bool) {
func (consensus *Consensus) GetNextLeaderKey() *bls.PublicKey { func (consensus *Consensus) GetNextLeaderKey() *bls.PublicKey {
idx := consensus.getIndexOfPubKey(consensus.LeaderPubKey) idx := consensus.getIndexOfPubKey(consensus.LeaderPubKey)
if idx == -1 { if idx == -1 {
consensus.getLogger().Warn(). utils.Logger().Warn().
Str("key", consensus.LeaderPubKey.SerializeToHexStr()). Str("key", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("GetNextLeaderKey: currentLeaderKey not found") Msg("GetNextLeaderKey: currentLeaderKey not found")
} }
@ -142,7 +142,7 @@ func (consensus *Consensus) getIndexOfPubKey(pubKey *bls.PublicKey) int {
// ResetViewChangeState reset the state for viewchange // ResetViewChangeState reset the state for viewchange
func (consensus *Consensus) ResetViewChangeState() { func (consensus *Consensus) ResetViewChangeState() {
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("Phase", consensus.phase.String()). Str("Phase", consensus.phase.String()).
Msg("[ResetViewChangeState] Resetting view change state") Msg("[ResetViewChangeState] Resetting view change state")
consensus.mode.SetMode(Normal) consensus.mode.SetMode(Normal)
@ -180,7 +180,7 @@ func (consensus *Consensus) startViewChange(viewID uint64) {
diff := viewID - consensus.viewID diff := viewID - consensus.viewID
duration := time.Duration(int64(diff) * int64(viewChangeDuration)) duration := time.Duration(int64(diff) * int64(viewChangeDuration))
consensus.getLogger().Info(). utils.Logger().Info().
Uint64("ViewChangingID", viewID). Uint64("ViewChangingID", viewID).
Dur("timeoutDuration", duration). Dur("timeoutDuration", duration).
Str("NextLeader", consensus.LeaderPubKey.SerializeToHexStr()). Str("NextLeader", consensus.LeaderPubKey.SerializeToHexStr()).
@ -191,7 +191,7 @@ func (consensus *Consensus) startViewChange(viewID uint64) {
consensus.consensusTimeout[timeoutViewChange].SetDuration(duration) consensus.consensusTimeout[timeoutViewChange].SetDuration(duration)
consensus.consensusTimeout[timeoutViewChange].Start() consensus.consensusTimeout[timeoutViewChange].Start()
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("ViewChangingID", consensus.mode.ViewID()). Uint64("ViewChangingID", consensus.mode.ViewID()).
Msg("[startViewChange] start view change timer") Msg("[startViewChange] start view change timer")
} }
@ -199,7 +199,7 @@ func (consensus *Consensus) startViewChange(viewID uint64) {
func (consensus *Consensus) onViewChange(msg *msg_pb.Message) { func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
recvMsg, err := ParseViewChangeMessage(msg) recvMsg, err := ParseViewChangeMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Msg("[onViewChange] Unable To Parse Viewchange Message") utils.Logger().Warn().Msg("[onViewChange] Unable To Parse Viewchange Message")
return return
} }
newLeaderKey := recvMsg.LeaderPubkey newLeaderKey := recvMsg.LeaderPubkey
@ -208,7 +208,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
} }
if len(consensus.viewIDSigs) >= consensus.Quorum() { if len(consensus.viewIDSigs) >= consensus.Quorum() {
consensus.getLogger().Debug(). utils.Logger().Debug().
Int("have", len(consensus.viewIDSigs)). Int("have", len(consensus.viewIDSigs)).
Int("need", consensus.Quorum()). Int("need", consensus.Quorum()).
Str("validatorPubKey", recvMsg.SenderPubkey.SerializeToHexStr()). Str("validatorPubKey", recvMsg.SenderPubkey.SerializeToHexStr()).
@ -218,34 +218,34 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
senderKey, err := consensus.verifyViewChangeSenderKey(msg) senderKey, err := consensus.verifyViewChangeSenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[onViewChange] VerifySenderKey Failed") utils.Logger().Debug().Err(err).Msg("[onViewChange] VerifySenderKey Failed")
return return
} }
// TODO: if difference is only one, new leader can still propose the same committed block to avoid another view change // TODO: if difference is only one, new leader can still propose the same committed block to avoid another view change
if consensus.blockNum > recvMsg.BlockNum { if consensus.blockNum > recvMsg.BlockNum {
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onViewChange] Message BlockNum Is Low") Msg("[onViewChange] Message BlockNum Is Low")
return return
} }
if consensus.blockNum < recvMsg.BlockNum { if consensus.blockNum < recvMsg.BlockNum {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onViewChange] New Leader Has Lower Blocknum") Msg("[onViewChange] New Leader Has Lower Blocknum")
return return
} }
if consensus.mode.Mode() == ViewChanging && consensus.mode.ViewID() > recvMsg.ViewID { if consensus.mode.Mode() == ViewChanging && consensus.mode.ViewID() > recvMsg.ViewID {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint64("MyViewChangingID", consensus.mode.ViewID()). Uint64("MyViewChangingID", consensus.mode.ViewID()).
Uint64("MsgViewChangingID", recvMsg.ViewID). Uint64("MsgViewChangingID", recvMsg.ViewID).
Msg("[onViewChange] ViewChanging ID Is Low") Msg("[onViewChange] ViewChanging ID Is Low")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Debug().Err(err).Msg("[onViewChange] Failed To Verify Sender's Signature") utils.Logger().Debug().Err(err).Msg("[onViewChange] Failed To Verify Sender's Signature")
return return
} }
@ -260,11 +260,11 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeq(msg_pb.MessageType_PREPARED, recvMsg.BlockNum) preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeq(msg_pb.MessageType_PREPARED, recvMsg.BlockNum)
preparedMsg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs) preparedMsg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs)
if preparedMsg == nil { if preparedMsg == nil {
consensus.getLogger().Debug().Msg("[onViewChange] add my M2(NIL) type messaage") utils.Logger().Debug().Msg("[onViewChange] add my M2(NIL) type messaage")
consensus.nilSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(NIL) consensus.nilSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(NIL)
consensus.nilBitmap.SetKey(consensus.PubKey, true) consensus.nilBitmap.SetKey(consensus.PubKey, true)
} else { } else {
consensus.getLogger().Debug().Msg("[onViewChange] add my M1 type messaage") utils.Logger().Debug().Msg("[onViewChange] add my M1 type messaage")
msgToSign := append(preparedMsg.BlockHash[:], preparedMsg.Payload...) msgToSign := append(preparedMsg.BlockHash[:], preparedMsg.Payload...)
consensus.bhpSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(msgToSign) consensus.bhpSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(msgToSign)
consensus.bhpBitmap.SetKey(consensus.PubKey, true) consensus.bhpBitmap.SetKey(consensus.PubKey, true)
@ -283,18 +283,18 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
if len(recvMsg.Payload) == 0 { if len(recvMsg.Payload) == 0 {
_, ok := consensus.nilSigs[senderKey.SerializeToHexStr()] _, ok := consensus.nilSigs[senderKey.SerializeToHexStr()]
if ok { if ok {
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()). Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M2 message from validator") Msg("[onViewChange] Already Received M2 message from validator")
return return
} }
if !recvMsg.ViewchangeSig.VerifyHash(senderKey, NIL) { if !recvMsg.ViewchangeSig.VerifyHash(senderKey, NIL) {
consensus.getLogger().Warn().Msg("[onViewChange] Failed To Verify Signature For M2 Type Viewchange Message") utils.Logger().Warn().Msg("[onViewChange] Failed To Verify Signature For M2 Type Viewchange Message")
return return
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()). Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M2 (NIL) type message") Msg("[onViewChange] Add M2 (NIL) type message")
consensus.nilSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig consensus.nilSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig
@ -302,20 +302,20 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
} else { // m1 type message } else { // m1 type message
_, ok := consensus.bhpSigs[senderKey.SerializeToHexStr()] _, ok := consensus.bhpSigs[senderKey.SerializeToHexStr()]
if ok { if ok {
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()). Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M1 Message From the Validator") Msg("[onViewChange] Already Received M1 Message From the Validator")
return return
} }
if !recvMsg.ViewchangeSig.VerifyHash(recvMsg.SenderPubkey, recvMsg.Payload) { if !recvMsg.ViewchangeSig.VerifyHash(recvMsg.SenderPubkey, recvMsg.Payload) {
consensus.getLogger().Warn().Msg("[onViewChange] Failed to Verify Signature for M1 Type Viewchange Message") utils.Logger().Warn().Msg("[onViewChange] Failed to Verify Signature for M1 Type Viewchange Message")
return return
} }
// first time receive m1 type message, need verify validity of prepared message // first time receive m1 type message, need verify validity of prepared message
if len(consensus.m1Payload) == 0 || !bytes.Equal(consensus.m1Payload, recvMsg.Payload) { if len(consensus.m1Payload) == 0 || !bytes.Equal(consensus.m1Payload, recvMsg.Payload) {
if len(recvMsg.Payload) <= 32 { if len(recvMsg.Payload) <= 32 {
consensus.getLogger().Debug(). utils.Logger().Debug().
Int("len", len(recvMsg.Payload)). Int("len", len(recvMsg.Payload)).
Msg("[onViewChange] M1 RecvMsg Payload Not Enough Length") Msg("[onViewChange] M1 RecvMsg Payload Not Enough Length")
return return
@ -323,12 +323,12 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
blockHash := recvMsg.Payload[:32] blockHash := recvMsg.Payload[:32]
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[onViewChange] M1 RecvMsg Payload Read Error") utils.Logger().Error().Err(err).Msg("[onViewChange] M1 RecvMsg Payload Read Error")
return return
} }
// check has 2f+1 signature in m1 type message // check has 2f+1 signature in m1 type message
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() { if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug(). utils.Logger().Debug().
Int("need", consensus.Quorum()). Int("need", consensus.Quorum()).
Int("have", count). Int("have", count).
Msg("[onViewChange] M1 Payload Not Have Enough Signature") Msg("[onViewChange] M1 Payload Not Have Enough Signature")
@ -337,8 +337,8 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
// Verify the multi-sig for prepare phase // Verify the multi-sig for prepare phase
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) { if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) {
consensus.getLogger().Warn(). utils.Logger().Warn().
Bytes("blockHash", blockHash). Hex("blockHash", blockHash).
Msg("[onViewChange] failed to verify multi signature for m1 prepared payload") Msg("[onViewChange] failed to verify multi signature for m1 prepared payload")
return return
} }
@ -353,11 +353,11 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
preparedMsg.Payload = make([]byte, len(recvMsg.Payload)-32) preparedMsg.Payload = make([]byte, len(recvMsg.Payload)-32)
copy(preparedMsg.Payload[:], recvMsg.Payload[32:]) copy(preparedMsg.Payload[:], recvMsg.Payload[32:])
preparedMsg.SenderPubkey = consensus.PubKey preparedMsg.SenderPubkey = consensus.PubKey
consensus.getLogger().Info().Msg("[onViewChange] New Leader Prepared Message Added") utils.Logger().Info().Msg("[onViewChange] New Leader Prepared Message Added")
consensus.PbftLog.AddMessage(&preparedMsg) consensus.PbftLog.AddMessage(&preparedMsg)
} }
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()). Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M1 (prepared) type message") Msg("[onViewChange] Add M1 (prepared) type message")
consensus.bhpSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig consensus.bhpSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig
@ -367,7 +367,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
// check and add viewID (m3 type) message signature // check and add viewID (m3 type) message signature
_, ok := consensus.viewIDSigs[senderKey.SerializeToHexStr()] _, ok := consensus.viewIDSigs[senderKey.SerializeToHexStr()]
if ok { if ok {
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()). Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M3(ViewID) message from the validator") Msg("[onViewChange] Already Received M3(ViewID) message from the validator")
return return
@ -375,17 +375,17 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
viewIDHash := make([]byte, 8) viewIDHash := make([]byte, 8)
binary.LittleEndian.PutUint64(viewIDHash, recvMsg.ViewID) binary.LittleEndian.PutUint64(viewIDHash, recvMsg.ViewID)
if !recvMsg.ViewidSig.VerifyHash(recvMsg.SenderPubkey, viewIDHash) { if !recvMsg.ViewidSig.VerifyHash(recvMsg.SenderPubkey, viewIDHash) {
consensus.getLogger().Warn(). utils.Logger().Warn().
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Msg("[onViewChange] Failed to Verify M3 Message Signature") Msg("[onViewChange] Failed to Verify M3 Message Signature")
return return
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()). Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M3 (ViewID) type message") Msg("[onViewChange] Add M3 (ViewID) type message")
consensus.viewIDSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewidSig consensus.viewIDSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewidSig
consensus.viewIDBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed. consensus.viewIDBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed.
consensus.getLogger().Debug(). utils.Logger().Debug().
Int("numSigs", len(consensus.viewIDSigs)). Int("numSigs", len(consensus.viewIDSigs)).
Int("needed", consensus.Quorum()). Int("needed", consensus.Quorum()).
Msg("[onViewChange]") Msg("[onViewChange]")
@ -400,7 +400,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
consensus.ReadySignal <- struct{}{} consensus.ReadySignal <- struct{}{}
}() }()
} else { } else {
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("From", consensus.phase.String()). Str("From", consensus.phase.String()).
Str("To", Commit.String()). Str("To", Commit.String()).
Msg("[OnViewChange] Switching phase") Msg("[OnViewChange] Switching phase")
@ -408,7 +408,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
copy(consensus.blockHash[:], consensus.m1Payload[:32]) copy(consensus.blockHash[:], consensus.m1Payload[:32])
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[onViewChange] ReadSignatureBitmapPayload Fail") utils.Logger().Error().Err(err).Msg("[onViewChange] ReadSignatureBitmapPayload Fail")
return return
} }
consensus.aggregatedPrepareSig = aggSig consensus.aggregatedPrepareSig = aggSig
@ -420,7 +420,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
commitPayload := append(blockNumBytes, consensus.blockHash[:]...) commitPayload := append(blockNumBytes, consensus.blockHash[:]...)
consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload) consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload)
if err = consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil { if err = consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil {
consensus.getLogger().Debug().Msg("[OnViewChange] New Leader commit bitmap set failed") utils.Logger().Debug().Msg("[OnViewChange] New Leader commit bitmap set failed")
return return
} }
} }
@ -428,9 +428,9 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
consensus.mode.SetViewID(recvMsg.ViewID) consensus.mode.SetViewID(recvMsg.ViewID)
msgToSend := consensus.constructNewViewMessage() msgToSend := consensus.constructNewViewMessage()
consensus.getLogger().Warn(). utils.Logger().Warn().
Int("payloadSize", len(consensus.m1Payload)). Int("payloadSize", len(consensus.m1Payload)).
Bytes("M1Payload", consensus.m1Payload). Hex("M1Payload", consensus.m1Payload).
Msg("[onViewChange] Sent NewView Message") Msg("[onViewChange] Sent NewView Message")
consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_NEWVIEW, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)) consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_NEWVIEW, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
@ -438,10 +438,10 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
consensus.ResetViewChangeState() consensus.ResetViewChangeState()
consensus.consensusTimeout[timeoutViewChange].Stop() consensus.consensusTimeout[timeoutViewChange].Stop()
consensus.consensusTimeout[timeoutConsensus].Start() consensus.consensusTimeout[timeoutConsensus].Start()
consensus.getLogger().Debug(). utils.Logger().Debug().
Uint64("viewChangingID", consensus.mode.ViewID()). Uint64("viewChangingID", consensus.mode.ViewID()).
Msg("[onViewChange] New Leader Start Consensus Timer and Stop View Change Timer") Msg("[onViewChange] New Leader Start Consensus Timer and Stop View Change Timer")
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("myKey", consensus.PubKey.SerializeToHexStr()). Str("myKey", consensus.PubKey.SerializeToHexStr()).
Uint64("viewID", consensus.viewID). Uint64("viewID", consensus.viewID).
Uint64("block", consensus.blockNum). Uint64("block", consensus.blockNum).
@ -451,27 +451,27 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
// TODO: move to consensus_leader.go later // TODO: move to consensus_leader.go later
func (consensus *Consensus) onNewView(msg *msg_pb.Message) { func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[onNewView] Received NewView Message") utils.Logger().Debug().Msg("[onNewView] Received NewView Message")
senderKey, err := consensus.verifyViewChangeSenderKey(msg) senderKey, err := consensus.verifyViewChangeSenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[onNewView] VerifySenderKey Failed") utils.Logger().Warn().Err(err).Msg("[onNewView] VerifySenderKey Failed")
return return
} }
recvMsg, err := consensus.ParseNewViewMessage(msg) recvMsg, err := consensus.ParseNewViewMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[onNewView] Unable to Parse NewView Message") utils.Logger().Warn().Err(err).Msg("[onNewView] Unable to Parse NewView Message")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Error().Err(err).Msg("[onNewView] Failed to Verify New Leader's Signature") utils.Logger().Error().Err(err).Msg("[onNewView] Failed to Verify New Leader's Signature")
return return
} }
consensus.vcLock.Lock() consensus.vcLock.Lock()
defer consensus.vcLock.Unlock() defer consensus.vcLock.Unlock()
if recvMsg.M3AggSig == nil || recvMsg.M3Bitmap == nil { if recvMsg.M3AggSig == nil || recvMsg.M3Bitmap == nil {
consensus.getLogger().Error().Msg("[onNewView] M3AggSig or M3Bitmap is nil") utils.Logger().Error().Msg("[onNewView] M3AggSig or M3Bitmap is nil")
return return
} }
m3Sig := recvMsg.M3AggSig m3Sig := recvMsg.M3AggSig
@ -481,7 +481,7 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
binary.LittleEndian.PutUint64(viewIDBytes, recvMsg.ViewID) binary.LittleEndian.PutUint64(viewIDBytes, recvMsg.ViewID)
// check total number of sigs >= 2f+1 // check total number of sigs >= 2f+1
if count := utils.CountOneBits(m3Mask.Bitmap); count < consensus.Quorum() { if count := utils.CountOneBits(m3Mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug(). utils.Logger().Debug().
Int("need", consensus.Quorum()). Int("need", consensus.Quorum()).
Int("have", count). Int("have", count).
Msg("[onNewView] Not Have Enough M3 (ViewID) Signature") Msg("[onNewView] Not Have Enough M3 (ViewID) Signature")
@ -489,9 +489,9 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
} }
if !m3Sig.VerifyHash(m3Mask.AggregatePublic, viewIDBytes) { if !m3Sig.VerifyHash(m3Mask.AggregatePublic, viewIDBytes) {
consensus.getLogger().Warn(). utils.Logger().Warn().
Str("m3Sig", m3Sig.SerializeToHexStr()). Str("m3Sig", m3Sig.SerializeToHexStr()).
Bytes("m3Mask", m3Mask.Bitmap). Hex("m3Mask", m3Mask.Bitmap).
Uint64("MsgViewID", recvMsg.ViewID). Uint64("MsgViewID", recvMsg.ViewID).
Msg("[onNewView] Unable to Verify Aggregated Signature of M3 (ViewID) payload") Msg("[onNewView] Unable to Verify Aggregated Signature of M3 (ViewID) payload")
return return
@ -499,10 +499,10 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
m2Mask := recvMsg.M2Bitmap m2Mask := recvMsg.M2Bitmap
if recvMsg.M2AggSig != nil { if recvMsg.M2AggSig != nil {
consensus.getLogger().Debug().Msg("[onNewView] M2AggSig (NIL) is Not Empty") utils.Logger().Debug().Msg("[onNewView] M2AggSig (NIL) is Not Empty")
m2Sig := recvMsg.M2AggSig m2Sig := recvMsg.M2AggSig
if !m2Sig.VerifyHash(m2Mask.AggregatePublic, NIL) { if !m2Sig.VerifyHash(m2Mask.AggregatePublic, NIL) {
consensus.getLogger().Warn().Msg("[onNewView] Unable to Verify Aggregated Signature of M2 (NIL) payload") utils.Logger().Warn().Msg("[onNewView] Unable to Verify Aggregated Signature of M2 (NIL) payload")
return return
} }
} }
@ -510,18 +510,18 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
// check when M3 sigs > M2 sigs, then M1 (recvMsg.Payload) should not be empty // check when M3 sigs > M2 sigs, then M1 (recvMsg.Payload) should not be empty
if m2Mask == nil || m2Mask.Bitmap == nil || (m2Mask != nil && m2Mask.Bitmap != nil && utils.CountOneBits(m3Mask.Bitmap) > utils.CountOneBits(m2Mask.Bitmap)) { if m2Mask == nil || m2Mask.Bitmap == nil || (m2Mask != nil && m2Mask.Bitmap != nil && utils.CountOneBits(m3Mask.Bitmap) > utils.CountOneBits(m2Mask.Bitmap)) {
if len(recvMsg.Payload) <= 32 { if len(recvMsg.Payload) <= 32 {
consensus.getLogger().Debug().Msg("[onNewView] M1 (prepared) Type Payload Not Have Enough Length") utils.Logger().Debug().Msg("[onNewView] M1 (prepared) Type Payload Not Have Enough Length")
return return
} }
// m1 is not empty, check it's valid // m1 is not empty, check it's valid
blockHash := recvMsg.Payload[:32] blockHash := recvMsg.Payload[:32]
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil { if err != nil {
consensus.getLogger().Error().Err(err).Msg("[onNewView] ReadSignatureBitmapPayload Failed") utils.Logger().Error().Err(err).Msg("[onNewView] ReadSignatureBitmapPayload Failed")
return return
} }
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash) { if !aggSig.VerifyHash(mask.AggregatePublic, blockHash) {
consensus.getLogger().Warn().Msg("[onNewView] Failed to Verify Signature for M1 (prepare) message") utils.Logger().Warn().Msg("[onNewView] Failed to Verify Signature for M1 (prepare) message")
return return
} }
copy(consensus.blockHash[:], blockHash) copy(consensus.blockHash[:], blockHash)
@ -546,7 +546,7 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
// change view and leaderKey to keep in sync with network // change view and leaderKey to keep in sync with network
if consensus.blockNum != recvMsg.BlockNum { if consensus.blockNum != recvMsg.BlockNum {
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()). Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Uint64("MsgBlockNum", recvMsg.BlockNum). Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onNewView] New Leader Changed") Msg("[onNewView] New Leader Changed")
@ -561,21 +561,21 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
commitPayload := append(blockNumHash, consensus.blockHash[:]...) commitPayload := append(blockNumHash, consensus.blockHash[:]...)
msgToSend := consensus.constructCommitMessage(commitPayload) msgToSend := consensus.constructCommitMessage(commitPayload)
consensus.getLogger().Info().Msg("onNewView === commit") utils.Logger().Info().Msg("onNewView === commit")
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)) consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("From", consensus.phase.String()). Str("From", consensus.phase.String()).
Str("To", Commit.String()). Str("To", Commit.String()).
Msg("[OnViewChange] Switching phase") Msg("[OnViewChange] Switching phase")
consensus.switchPhase(Commit, true) consensus.switchPhase(Commit, true)
} else { } else {
consensus.ResetState() consensus.ResetState()
consensus.getLogger().Info().Msg("onNewView === announce") utils.Logger().Info().Msg("onNewView === announce")
} }
consensus.getLogger().Debug(). utils.Logger().Debug().
Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()). Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("new leader changed") Msg("new leader changed")
consensus.getLogger().Debug().Msg("validator start consensus timer and stop view change timer") utils.Logger().Debug().Msg("validator start consensus timer and stop view change timer")
consensus.consensusTimeout[timeoutConsensus].Start() consensus.consensusTimeout[timeoutConsensus].Start()
consensus.consensusTimeout[timeoutViewChange].Stop() consensus.consensusTimeout[timeoutViewChange].Stop()
} }

@ -281,7 +281,7 @@ func (bc *BlockChain) loadLastState() error {
currentBlock := bc.GetBlockByHash(head) currentBlock := bc.GetBlockByHash(head)
if currentBlock == nil { if currentBlock == nil {
// Corrupt or empty database, init from scratch // Corrupt or empty database, init from scratch
utils.Logger().Warn().Bytes("hash", head.Bytes()).Msg("Head block missing, resetting chain") utils.Logger().Warn().Str("hash", head.Hex()).Msg("Head block missing, resetting chain")
return bc.Reset() return bc.Reset()
} }
// Make sure the state associated with the block is available // Make sure the state associated with the block is available

@ -472,6 +472,9 @@ func WriteLastCommits(
if err = db.Put(lastCommitsKey, data); err != nil { if err = db.Put(lastCommitsKey, data); err != nil {
return ctxerror.New("cannot write last commits").WithCause(err) return ctxerror.New("cannot write last commits").WithCause(err)
} }
utils.Logger().Info().
Int("numShards", len(data)).
Msg("wrote last commits")
return nil return nil
} }

@ -33,7 +33,7 @@ func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64
} }
var entry TxLookupEntry var entry TxLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil { if err := rlp.DecodeBytes(data, &entry); err != nil {
utils.Logger().Error().Err(err).Bytes("hash", hash.Bytes()).Msg("Invalid transaction lookup entry RLP") utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid transaction lookup entry RLP")
return common.Hash{}, 0, 0 return common.Hash{}, 0, 0
} }
return entry.BlockHash, entry.BlockIndex, entry.Index return entry.BlockHash, entry.BlockIndex, entry.Index

@ -52,7 +52,7 @@ func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig {
} }
var config params.ChainConfig var config params.ChainConfig
if err := json.Unmarshal(data, &config); err != nil { if err := json.Unmarshal(data, &config); err != nil {
utils.Logger().Error().Err(err).Bytes("hash", hash.Bytes()).Msg("Invalid chain config JSON") utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid chain config JSON")
return nil return nil
} }
return &config return &config

@ -78,7 +78,7 @@ func (dRand *DRand) init(epochBlock *types.Block) {
(*dRand.vrfs)[dRand.SelfAddress] = append(rand[:], proof...) (*dRand.vrfs)[dRand.SelfAddress] = append(rand[:], proof...)
utils.Logger().Info(). utils.Logger().Info().
Bytes("msg", msgToSend). Hex("msg", msgToSend).
Str("leader.PubKey", dRand.leader.ConsensusPubKey.SerializeToHexStr()). Str("leader.PubKey", dRand.leader.ConsensusPubKey.SerializeToHexStr()).
Msg("[DRG] sent init") Msg("[DRG] sent init")
dRand.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(dRand.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)) dRand.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(dRand.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
@ -167,8 +167,8 @@ func (dRand *DRand) processCommitMessage(message *msg_pb.Message) {
utils.Logger().Error(). utils.Logger().Error().
Err(err). Err(err).
Str("validatorAddress", validatorAddress). Str("validatorAddress", validatorAddress).
Bytes("expectedRand", expectedRand[:]). Hex("expectedRand", expectedRand[:]).
Bytes("receivedRand", rand[:]). Hex("receivedRand", rand[:]).
Msg("[DRAND] Failed to verify the VRF") Msg("[DRAND] Failed to verify the VRF")
return return
} }

@ -76,7 +76,9 @@ func (attack *Model) NodeKilledByItSelf() {
} }
if rand.Intn(HitRate) == 0 { if rand.Intn(HitRate) == 0 {
utils.GetLogInstance().Debug("******************Killing myself******************", "PID: ", os.Getpid()) utils.Logger().Debug().
Int("PID", os.Getpid()).
Msg("******************Killing myself******************")
os.Exit(1) os.Exit(1)
} }
} }
@ -87,7 +89,9 @@ func (attack *Model) DelayResponse() {
return return
} }
if rand.Intn(HitRate) == 0 { if rand.Intn(HitRate) == 0 {
utils.GetLogInstance().Debug("******************Model: DelayResponse******************", "PID: ", os.Getpid()) utils.Logger().Debug().
Int("PID", os.Getpid()).
Msg("******************Model: DelayResponse******************")
time.Sleep(DelayResponseDuration) time.Sleep(DelayResponseDuration)
} }
} }
@ -98,7 +102,9 @@ func (attack *Model) IncorrectResponse() bool {
return false return false
} }
if rand.Intn(HitRate) == 0 { if rand.Intn(HitRate) == 0 {
utils.GetLogInstance().Debug("******************Model: IncorrectResponse******************", "PID: ", os.Getpid()) utils.Logger().Debug().
Int("PID", os.Getpid()).
Msg("******************Model: IncorrectResponse******************")
return true return true
} }
return false return false

@ -24,7 +24,7 @@ const (
mainnetV0_4Epoch = 10 mainnetV0_4Epoch = 10
mainnetV1Epoch = 12 mainnetV1Epoch = 12
mainnetV1_1Epoch = 19 mainnetV1_1Epoch = 19
mainnetV1_2Epoch = 21 mainnetV1_2Epoch = 25
mainnetMaxTxAmountLimit = 1e3 // unit is interface{} One mainnetMaxTxAmountLimit = 1e3 // unit is interface{} One
mainnetMaxNumRecentTxsPerAccountLimit = 1e2 mainnetMaxNumRecentTxsPerAccountLimit = 1e2
@ -47,7 +47,7 @@ type mainnetSchedule struct{}
func (mainnetSchedule) InstanceForEpoch(epoch *big.Int) Instance { func (mainnetSchedule) InstanceForEpoch(epoch *big.Int) Instance {
switch { switch {
case epoch.Cmp(big.NewInt(mainnetV1_2Epoch)) >= 0: case epoch.Cmp(big.NewInt(mainnetV1_2Epoch)) >= 0:
// twenty-first resharding epoch around 08/30/2019 11:35pm PDT // twenty-fifth resharding epoch around 09/06/2019 5:31am PDT
return mainnetV1_2 return mainnetV1_2
case epoch.Cmp(big.NewInt(mainnetV1_1Epoch)) >= 0: case epoch.Cmp(big.NewInt(mainnetV1_1Epoch)) >= 0:
// nineteenth resharding epoch around 08/27/2019 9:07pm PDT // nineteenth resharding epoch around 08/27/2019 9:07pm PDT

@ -24,13 +24,9 @@ func TestMainnetInstanceForEpoch(t *testing.T) {
mainnetV1_1, mainnetV1_1,
}, },
{ {
big.NewInt(21), big.NewInt(25),
mainnetV1_2, mainnetV1_2,
}, },
{
big.NewInt(8),
mainnetV0_3,
},
} }
for _, test := range tests { for _, test := range tests {

@ -1708,7 +1708,7 @@ var FoundationalNodeAccountsV1_1 = []DeployAccount{
{Index: "319", Address: "one19c4uqfzezuws7e4ka4kvc5r09suks2ghpyg6xw", BlsPublicKey: "51b2019b222df63fc99d202b03834dee09f1ef11e25a03592a96c1d01bca2bedfc25e0f26d88dcbb8a7176e30e1ec116"}, {Index: "319", Address: "one19c4uqfzezuws7e4ka4kvc5r09suks2ghpyg6xw", BlsPublicKey: "51b2019b222df63fc99d202b03834dee09f1ef11e25a03592a96c1d01bca2bedfc25e0f26d88dcbb8a7176e30e1ec116"},
} }
// FoundationalNodeAccountsV1_2 are the accounts for the foundational nodes from Epoch 21. // FoundationalNodeAccountsV1_2 are the accounts for the foundational nodes from Epoch 25.
var FoundationalNodeAccountsV1_2 = []DeployAccount{ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "0", Address: "one1y0xcf40fg65n2ehm8fx5vda4thrkymhpg45ecj", BlsPublicKey: "9e70e8d76851f6e8dc648255acdd57bb5c49cdae7571aed43f86e9f140a6343caed2ffa860919d03e0912411fee4850a"}, {Index: "0", Address: "one1y0xcf40fg65n2ehm8fx5vda4thrkymhpg45ecj", BlsPublicKey: "9e70e8d76851f6e8dc648255acdd57bb5c49cdae7571aed43f86e9f140a6343caed2ffa860919d03e0912411fee4850a"},
{Index: "1", Address: "one18lp2w7ghhuajdpzl8zqeddza97u92wtkfcwpjk", BlsPublicKey: "fce3097d9fc234d34d6eaef3eecd0365d435d1118f69f2da1ed2a69ba725270771572e40347c222aca784cb973307b11"}, {Index: "1", Address: "one18lp2w7ghhuajdpzl8zqeddza97u92wtkfcwpjk", BlsPublicKey: "fce3097d9fc234d34d6eaef3eecd0365d435d1118f69f2da1ed2a69ba725270771572e40347c222aca784cb973307b11"},
@ -1830,8 +1830,8 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "117", Address: "one1wt5darzj8wd385xl8stccj4sv6553hgckaypfr", BlsPublicKey: "9622f8a5590d6ef8ca94e6c866d663aa0398caf00a88b2dd059dc7a63daa8600828a85737eca4e595caa382b5d407205"}, {Index: "117", Address: "one1wt5darzj8wd385xl8stccj4sv6553hgckaypfr", BlsPublicKey: "9622f8a5590d6ef8ca94e6c866d663aa0398caf00a88b2dd059dc7a63daa8600828a85737eca4e595caa382b5d407205"},
{Index: "118", Address: "one1k80wv3uvfw5r0qhzp9yxn94u4jxu8my2xwuk87", BlsPublicKey: "bcd24c722dc5dd3727bc3f027e3f681e4d1f5a552513d158645833eb8d8d39ec1076370b55e063aeed5a7825eb6aa20a"}, {Index: "118", Address: "one1k80wv3uvfw5r0qhzp9yxn94u4jxu8my2xwuk87", BlsPublicKey: "bcd24c722dc5dd3727bc3f027e3f681e4d1f5a552513d158645833eb8d8d39ec1076370b55e063aeed5a7825eb6aa20a"},
{Index: "119", Address: "one1kwqkyzq2pmhvufe9528g9nd966ur54v6auzruf", BlsPublicKey: "aaac4eb8260e6cee7f19fbcae721ce2d68f125461953a583adca44407194452e7ac41de0757e2921c8fed83469172f92"}, {Index: "119", Address: "one1kwqkyzq2pmhvufe9528g9nd966ur54v6auzruf", BlsPublicKey: "aaac4eb8260e6cee7f19fbcae721ce2d68f125461953a583adca44407194452e7ac41de0757e2921c8fed83469172f92"},
{Index: "120", Address: "one1kykp8kzgmrkvhsz60t4yx8a06s44wt6jvpuuzg", BlsPublicKey: "85d0761a56dd18a67ee71e38ff949d93301467e5fbf9ab3fcb6875a6778341b271ae6acfc94d3046f454aeda138a1b89"}, {Index: "120", Address: "one1gjas4xurmc0rguafq63ql65rwuxayukm74w2mn", BlsPublicKey: "d6c8cf5553fa77257d26ba6b201294a2a497d070d420ab76c044efc0f4325f40b5664e7a7f973940ef1ea57530215886"},
{Index: "121", Address: "one1zcgx6pncyxrv6t7cza0f6r0e0pnp0gqd6knvcw", BlsPublicKey: "f1787fff85d1d5a6d458e768b425d2d9a9aec09ad8ddd020fd7d2f275970a8bef1f9a0c4a705e8f8b8734f9daf39d604"}, {Index: "121", Address: "one1pkw7wnplp077fn6phv2kfejw3u7wvx0m9vppzc", BlsPublicKey: "92d5e3fb5d3f1e64af4be7c0acbd457b68a2ec59cf34aaaa0bac04d0e0346b283a65e0227378a60e1fe7af2407d9c50a"},
{Index: "122", Address: "one12mn87akec4dqpfzy8q0jtqz9epf2pme605vc0h", BlsPublicKey: "6dd84461d038ccf467bfc2413b23b8eced80534b4507c3cdffa6f68111a799dccf1d42fd552ffeb3d595d2ec9733ef99"}, {Index: "122", Address: "one12mn87akec4dqpfzy8q0jtqz9epf2pme605vc0h", BlsPublicKey: "6dd84461d038ccf467bfc2413b23b8eced80534b4507c3cdffa6f68111a799dccf1d42fd552ffeb3d595d2ec9733ef99"},
{Index: "123", Address: "one18xnzxwn5hwuuh229cuv7mr0hnjcvgru7qt2wdv", BlsPublicKey: "83426cbc962bc18900540a3f886486f35c17d182f2e13c06a23d3242c5ca50138aa98ac842d2a3d03d8a273a8329ac82"}, {Index: "123", Address: "one18xnzxwn5hwuuh229cuv7mr0hnjcvgru7qt2wdv", BlsPublicKey: "83426cbc962bc18900540a3f886486f35c17d182f2e13c06a23d3242c5ca50138aa98ac842d2a3d03d8a273a8329ac82"},
{Index: "124", Address: "one1efat5elqnvttf7gm86q9kmt48z69njax464rhv", BlsPublicKey: "2066a0b39474b9cdaec88bb94953219a2690e1dd6008bb6dc31c7f76c08a9db54758ff3aaed3249fcb0029dfc1e92995"}, {Index: "124", Address: "one1efat5elqnvttf7gm86q9kmt48z69njax464rhv", BlsPublicKey: "2066a0b39474b9cdaec88bb94953219a2690e1dd6008bb6dc31c7f76c08a9db54758ff3aaed3249fcb0029dfc1e92995"},
@ -1840,7 +1840,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "127", Address: "one18r7axamzxgn57s9xcsvhevv58ukxrhsrfug6vg", BlsPublicKey: "17cd5fbf29ff9d1e21fc0f1b22be0dbaa9b05b49e011cbedceb6584153669e1318c052d431d9e3faec82fff823a0fb90"}, {Index: "127", Address: "one18r7axamzxgn57s9xcsvhevv58ukxrhsrfug6vg", BlsPublicKey: "17cd5fbf29ff9d1e21fc0f1b22be0dbaa9b05b49e011cbedceb6584153669e1318c052d431d9e3faec82fff823a0fb90"},
{Index: "128", Address: "one1w3pvyg56gal0ajef487944gzjkg6sv68j26pey", BlsPublicKey: "dc2f315a1dbe67ed3cfd8925fbfed964203c11e5c77112643d53b51e3c23135c695d218233c8a6cccea4a5032488d28c"}, {Index: "128", Address: "one1w3pvyg56gal0ajef487944gzjkg6sv68j26pey", BlsPublicKey: "dc2f315a1dbe67ed3cfd8925fbfed964203c11e5c77112643d53b51e3c23135c695d218233c8a6cccea4a5032488d28c"},
{Index: "129", Address: "one1r3mh2h7flr6sgcjvpaadlfjcnguwfk5z6mjuvu", BlsPublicKey: "2684b9b856e2b3f6ff0916b17137ba61e2495a9636859ff108defcca38f4dc49508c44ff16ad8f74e5182769c6a5a699"}, {Index: "129", Address: "one1r3mh2h7flr6sgcjvpaadlfjcnguwfk5z6mjuvu", BlsPublicKey: "2684b9b856e2b3f6ff0916b17137ba61e2495a9636859ff108defcca38f4dc49508c44ff16ad8f74e5182769c6a5a699"},
{Index: "130", Address: "one17kjexl6hyh0evgt7en7q04m9q7lwgym5grs9q7", BlsPublicKey: "6c793f4a4d09ac07fe6b8fb7fcc4796b08c37994f9b5ec3a68e30ff48c32bd3fed1e358fced864558054f5e11f7e1398"}, {Index: "130", Address: "one1u9g5g7qxx3rx802c4xfjre8vz7mwz87s0e8y4k", BlsPublicKey: "c34f6916cf06ed15a3771678c73941271253e14f8b21a16872d8e30bcdf241ef9a4f58453c3953a25e17b2387f6fd813"},
{Index: "131", Address: "one12kdc0fqxne5f3394wrwadxq9yfaquyxgtaed3q", BlsPublicKey: "826f98d1f8ce8751dac0441888f4b4f1190ec90691d5e40110c39068d0f95ea9cca8efe23d0f7a865bd9ed37ebf38d12"}, {Index: "131", Address: "one12kdc0fqxne5f3394wrwadxq9yfaquyxgtaed3q", BlsPublicKey: "826f98d1f8ce8751dac0441888f4b4f1190ec90691d5e40110c39068d0f95ea9cca8efe23d0f7a865bd9ed37ebf38d12"},
{Index: "132", Address: "one1h2dynptqmtgdfg9fgpd8dvmv8scupkgtzapx4l", BlsPublicKey: "814843ee8475adb2245027e9531e036c4135cf25a1051d5ea97f9f9cea506503e4a83a49bea1ee40e5b9a9c5f55f8014"}, {Index: "132", Address: "one1h2dynptqmtgdfg9fgpd8dvmv8scupkgtzapx4l", BlsPublicKey: "814843ee8475adb2245027e9531e036c4135cf25a1051d5ea97f9f9cea506503e4a83a49bea1ee40e5b9a9c5f55f8014"},
{Index: "133", Address: "one1l4yrxjz8ugwjdcxrm5snwgmgk8few9v2q2xv8h", BlsPublicKey: "aa37a8ad8fb42a5b1413be9ae5b053e7a47d2f36a11a1e7ba74c4b338919c9c76eca3a6332c06fbbd161a6aef20e9a0b"}, {Index: "133", Address: "one1l4yrxjz8ugwjdcxrm5snwgmgk8few9v2q2xv8h", BlsPublicKey: "aa37a8ad8fb42a5b1413be9ae5b053e7a47d2f36a11a1e7ba74c4b338919c9c76eca3a6332c06fbbd161a6aef20e9a0b"},
@ -1854,11 +1854,11 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "141", Address: "one1ljvq9tkvfp583zzl85mgjh3qjvjufnuwmn7krv", BlsPublicKey: "37644619ae24ffa2ab63dde85f8116effddaa500c6e28c57a5f100cb8a97262ed97f671d903a9c32a53301eec1100b96"}, {Index: "141", Address: "one1ljvq9tkvfp583zzl85mgjh3qjvjufnuwmn7krv", BlsPublicKey: "37644619ae24ffa2ab63dde85f8116effddaa500c6e28c57a5f100cb8a97262ed97f671d903a9c32a53301eec1100b96"},
{Index: "142", Address: "one12c23ekslj469g0g0tu9jcvecfkla7rahmrhe37", BlsPublicKey: "19101de3d0578c3146a1904f25a3344a998dcb0a18433dc5cc977d05f378676b0652b4a64fa8dff6c819cfd52dc94c14"}, {Index: "142", Address: "one12c23ekslj469g0g0tu9jcvecfkla7rahmrhe37", BlsPublicKey: "19101de3d0578c3146a1904f25a3344a998dcb0a18433dc5cc977d05f378676b0652b4a64fa8dff6c819cfd52dc94c14"},
{Index: "143", Address: "one19l9equxmql4jkcah8g4f6qva732npajarffj6q", BlsPublicKey: "f531f442dc19d2967fbcdcab68e77b16bc49cd05698ed8a56b39f3704315915c2b2b0175be6982be7238290b3504d487"}, {Index: "143", Address: "one19l9equxmql4jkcah8g4f6qva732npajarffj6q", BlsPublicKey: "f531f442dc19d2967fbcdcab68e77b16bc49cd05698ed8a56b39f3704315915c2b2b0175be6982be7238290b3504d487"},
{Index: "144", Address: "one1nq5dglmw0vunsa34mve8sdyrkhfd0373v4xgtv", BlsPublicKey: "a48d7cd3f3004cf2cecd4d4eba14d257da046f13ff461fedc2c3daaf725776e9a96ef38098a9a04f968f9c2287fc220d"}, {Index: "144", Address: "one18ehxprgnrnjug4yqcwq48ynnw3am237hlwmwxv", BlsPublicKey: "3a4212d1fdeb837d5c7cb709298c596c71a4faf57a592dc8a675eba752c31f256f0a99478fc0f38f13790df087f9ff0f"},
{Index: "145", Address: "one1qmgqawpflw4pu9ytryz69mrk0mhhsswdmjgfrj", BlsPublicKey: "013116498f069ab0b1e3bb1043e45283c9fb8641eed8470d77d59bc8a42fca53ce6d3714ceb5c37aa71e3d1839b25690"}, {Index: "145", Address: "one1aydmy5m3ymurgac9yaea9nq7tg4fwu8dnetz02", BlsPublicKey: "87ec47a0ee00ea2b31640283644cf093f4ba6d63ddefc50a699994aedb29bc83727a32a7d238ff66683269a0230eb593"},
{Index: "146", Address: "one1h7c7pgwnht4nns40k6swdzwy8xn9uvl0e65e49", BlsPublicKey: "05f9655186ef16363e2ae14629bc0fef17781bac5eed7875e3b442e57e45340115f6d087035f626a84832479c9a54a03"}, {Index: "146", Address: "one14kljysylyq0nypcxzndhfqxrkjk290l9kkhmnz", BlsPublicKey: "d6b1b753e1d3333fc3898f394edae8cf79d9168c6b8c07ac8109cb754086f1402e82cc6a52131e0399c2d7cac2bf260b"},
{Index: "147", Address: "one1cwzleselrsq3x76vjzy7u65a9tqmsrcne2w83h", BlsPublicKey: "783841b0eaea497e2f894d482b6bde16b96359cb837a9f7dc3bf89abcb45b75e8ea597d60d2e89775c6fb37164fa3694"}, {Index: "147", Address: "one18ahgl50uyq29cuacn76ucmsgxppgmjx80zs2cj", BlsPublicKey: "6e72635ed8e416ebdc48c59d2642cc8b884b0a5281a36a31220bb9e1bb453f8db22ea7905cda29271bde4cf858244514"},
{Index: "148", Address: "one1kkcw2y5d9w9celf0vu025hflyxu33gekmntx9u", BlsPublicKey: "b12b30b10c9b002ec0832d061025b99a695052800ebf642299fd439be505322208543566efda12b524b284ce5ea06510"}, {Index: "148", Address: "one1n392jsedk9pfx7rkw2gpdpjptsgfrkxla5ve6t", BlsPublicKey: "3a9217c7d6574e3922c4c55a258a4ba70ce54872e097022807dc15a1999a99919f3afcc0301b1cb8feb7ffbbe42b8a16"},
{Index: "149", Address: "one16xzyq2zarperhxjzmk8dyp629v4vugdkzfx7r2", BlsPublicKey: "4ff265748fbc2e3f99e95fc2e1818d02a38bcb72e95efa8ffb50820919f543f98705b330522d51e5272a5427e98ad38b"}, {Index: "149", Address: "one16xzyq2zarperhxjzmk8dyp629v4vugdkzfx7r2", BlsPublicKey: "4ff265748fbc2e3f99e95fc2e1818d02a38bcb72e95efa8ffb50820919f543f98705b330522d51e5272a5427e98ad38b"},
{Index: "150", Address: "one175jcxcdk2xlmccndr2mux3c8se8gsmddesg5ed", BlsPublicKey: "74a8762803bfbe8893540480cc9f789914b56f86a1458c38d5bf1b6737cd149a28f761bec95f3e9cf4095fb55a6e7294"}, {Index: "150", Address: "one175jcxcdk2xlmccndr2mux3c8se8gsmddesg5ed", BlsPublicKey: "74a8762803bfbe8893540480cc9f789914b56f86a1458c38d5bf1b6737cd149a28f761bec95f3e9cf4095fb55a6e7294"},
{Index: "151", Address: "one1lmqycl6wezcdf7nqxj34slstamt0hlhp4s0rj4", BlsPublicKey: "764cec13fff061afecd226e167ac0452e2f16e5e8faa9c2152ba060243e6c29220850c9acae6f13612c842d277d5118b"}, {Index: "151", Address: "one1lmqycl6wezcdf7nqxj34slstamt0hlhp4s0rj4", BlsPublicKey: "764cec13fff061afecd226e167ac0452e2f16e5e8faa9c2152ba060243e6c29220850c9acae6f13612c842d277d5118b"},
@ -1873,8 +1873,8 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "160", Address: "one1hdxnmfgkxtlmrym9ljjw2dpweke85pheu8g8zl", BlsPublicKey: "95f9d0f1a688b9c4d2a9d876580f7920625f0859846711203fb4f0f364290895daab76557d09f4acb6d3b562eed00782"}, {Index: "160", Address: "one1hdxnmfgkxtlmrym9ljjw2dpweke85pheu8g8zl", BlsPublicKey: "95f9d0f1a688b9c4d2a9d876580f7920625f0859846711203fb4f0f364290895daab76557d09f4acb6d3b562eed00782"},
{Index: "161", Address: "one1d0k2x6ue6yd0pvuumxgxjk3qqfjpn08mu7r253", BlsPublicKey: "254772532f47cd28c0ed95986c678e589661de36eeef7502c57a95f753e1c0c1ea6348f6f19e10729faf31ae37fe1a0c"}, {Index: "161", Address: "one1d0k2x6ue6yd0pvuumxgxjk3qqfjpn08mu7r253", BlsPublicKey: "254772532f47cd28c0ed95986c678e589661de36eeef7502c57a95f753e1c0c1ea6348f6f19e10729faf31ae37fe1a0c"},
{Index: "162", Address: "one10j0tswg6x4udqafvsetjj3fl0g4e52spwp0wsh", BlsPublicKey: "289eb7f7c6b601dc83534734f500365f4fcf2189a5813b806b9ef6a777183c697d5fb22c07a26d19b63f1e10fa88e50c"}, {Index: "162", Address: "one10j0tswg6x4udqafvsetjj3fl0g4e52spwp0wsh", BlsPublicKey: "289eb7f7c6b601dc83534734f500365f4fcf2189a5813b806b9ef6a777183c697d5fb22c07a26d19b63f1e10fa88e50c"},
{Index: "163", Address: "one15fswyv4znc8mw5dqutgssdj35ut2ghpg0wqg7p", BlsPublicKey: "4f5e3f01edbfc48e6260a8557f2bafd7bdfe76e727055110882ccadbcbba2ddd96a7f4008f576c94977e488c35086c18"}, {Index: "163", Address: "one1hmmh8tfh8p7ad2szyn5wntfplt5mn79mlfemwn", BlsPublicKey: "224fac479a8c0554faf2ecf0227453831bdeae7d03e96c2b0c9e7b3a33ac8701f57431833c7898a3c885d89f850e5696"},
{Index: "164", Address: "one1txjkm9v7avmpy4u9ugktwwt4tkkfz2r40v6v7a", BlsPublicKey: "998bb2fc775f08bc748acded8f2b331edf3d01da8d7bf5f1c6cc7bb8afb3bd384bb2fb40351c245ba7b5027c2f1c0608"}, {Index: "164", Address: "one1fallaznqw6mv2wyzggpxvlk0xjeu8hry9u7v74", BlsPublicKey: "3c8c4ecf114e0d12516db91d9eb17a0dde8bc80f3d95df7b95d02b1efcc70098d3e15ac6d38f8cd389114f3247dde883"},
{Index: "165", Address: "one10jkak35au0w9lmsqvjvpnx7t0qc6svklup48k3", BlsPublicKey: "1acf941d944db3f7410f4bf4f2d541e17d50bfde0f853ddcb2630b7ecf701c7be454ec6577aabe9320e2a8524cc60715"}, {Index: "165", Address: "one10jkak35au0w9lmsqvjvpnx7t0qc6svklup48k3", BlsPublicKey: "1acf941d944db3f7410f4bf4f2d541e17d50bfde0f853ddcb2630b7ecf701c7be454ec6577aabe9320e2a8524cc60715"},
{Index: "166", Address: "one1zy628rt2d87nlad0dlvlzrnnqsv5cnt4uaa05m", BlsPublicKey: "898c9ec1fa5073e96ff71b66a5353eb2d9bbbaf103adeac5625ccae8cc926e99165ca2afed4f22065e8becf7b012f20f"}, {Index: "166", Address: "one1zy628rt2d87nlad0dlvlzrnnqsv5cnt4uaa05m", BlsPublicKey: "898c9ec1fa5073e96ff71b66a5353eb2d9bbbaf103adeac5625ccae8cc926e99165ca2afed4f22065e8becf7b012f20f"},
{Index: "167", Address: "one1zefrcfgjuaqxggql0syz28cq40cy6ujwq6zmpx", BlsPublicKey: "6510d39a8db76f952c9dfa34b16cd1324877dabef2f441095c8b1f9809b884c4628b16363349871350d4422f4c61bf0c"}, {Index: "167", Address: "one1zefrcfgjuaqxggql0syz28cq40cy6ujwq6zmpx", BlsPublicKey: "6510d39a8db76f952c9dfa34b16cd1324877dabef2f441095c8b1f9809b884c4628b16363349871350d4422f4c61bf0c"},
@ -1896,7 +1896,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "183", Address: "one1933f2dpa5a7ezh2q6ul5nakd26tynn4l75pt6h", BlsPublicKey: "7fa5a39d171f5ceb2c58571731d6273e55420722984caaf17607a6ebdc18e32350406f5e537474a26cb31b9c97504a04"}, {Index: "183", Address: "one1933f2dpa5a7ezh2q6ul5nakd26tynn4l75pt6h", BlsPublicKey: "7fa5a39d171f5ceb2c58571731d6273e55420722984caaf17607a6ebdc18e32350406f5e537474a26cb31b9c97504a04"},
{Index: "184", Address: "one1wrvlznh27fywscexnc2l9fxk5gjelcqdnw8pvw", BlsPublicKey: "84b6b842abc1c28aa505f22ac553443f919ff742f1b6cc2c1b9ab07518a649dafc4d5e1360ee20462168753361569293"}, {Index: "184", Address: "one1wrvlznh27fywscexnc2l9fxk5gjelcqdnw8pvw", BlsPublicKey: "84b6b842abc1c28aa505f22ac553443f919ff742f1b6cc2c1b9ab07518a649dafc4d5e1360ee20462168753361569293"},
{Index: "185", Address: "one1v6e7uc4jwamdcjw43042jt5fw5ck5mxzt8c4wx", BlsPublicKey: "eb0f1af6910302340af0c20f05c08654131bf7eb0e8118548e7dfe25cc2991d953bbe8ace2786f5aed77484ee0adeb95"}, {Index: "185", Address: "one1v6e7uc4jwamdcjw43042jt5fw5ck5mxzt8c4wx", BlsPublicKey: "eb0f1af6910302340af0c20f05c08654131bf7eb0e8118548e7dfe25cc2991d953bbe8ace2786f5aed77484ee0adeb95"},
{Index: "186", Address: "one1q50h43adwq85nd28pq5yhw3sjlcd63x6urhmnh", BlsPublicKey: "c9e1ff04c12679c5315d960ecd5c1ced35616381c33ab8bb612eb78088c601295535a2f43140d647e6d9095b6ef60380"}, {Index: "186", Address: "one1seaavwphcu5a3mm4p36fpl9r68f0hk24g289st", BlsPublicKey: "25279696affa165739d769c135f901a2008555426979be6c92fd94a0c5d0f33415f85bfc3308be0c64e3de34cc2b198d"},
{Index: "187", Address: "one1hyqkrvkad8kpttpglrl42rcthd6h0uqg2etgf4", BlsPublicKey: "1bc81f9fd333524032c5a8fbbd976b00ba8d5e0e2efe60fd8336abbaec1b5744ecef07b242efddf09bd42dbcbd2f5795"}, {Index: "187", Address: "one1hyqkrvkad8kpttpglrl42rcthd6h0uqg2etgf4", BlsPublicKey: "1bc81f9fd333524032c5a8fbbd976b00ba8d5e0e2efe60fd8336abbaec1b5744ecef07b242efddf09bd42dbcbd2f5795"},
{Index: "188", Address: "one12xfkkmqm7cnv52tml8vpdawvax0kr8f4wrlp96", BlsPublicKey: "64745645766808aeee7eb00081d6d11119dcd3a925255960faccc0c6d22f17b6fd0814c061dd3ab39b5877df119f6711"}, {Index: "188", Address: "one12xfkkmqm7cnv52tml8vpdawvax0kr8f4wrlp96", BlsPublicKey: "64745645766808aeee7eb00081d6d11119dcd3a925255960faccc0c6d22f17b6fd0814c061dd3ab39b5877df119f6711"},
{Index: "189", Address: "one1yuqws7le57naq90w2z42k3undccy8nh7wdmuhz", BlsPublicKey: "2611e59381f2f117746ea12cea76a09ba5895bc503d4bc5561b546d673ccc4fff854aea26e9c43c3682444c25396490a"}, {Index: "189", Address: "one1yuqws7le57naq90w2z42k3undccy8nh7wdmuhz", BlsPublicKey: "2611e59381f2f117746ea12cea76a09ba5895bc503d4bc5561b546d673ccc4fff854aea26e9c43c3682444c25396490a"},
@ -1914,7 +1914,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "201", Address: "one1fgsn4xrhtq4ljfd394gesj4aj6pkkf0lsytmnn", BlsPublicKey: "11e163fab1c890a81d224dc9d462d56b0cef5cd1645d2edd848067ff34f480b02ac8c920c4646618f235a7e3f9bdc505"}, {Index: "201", Address: "one1fgsn4xrhtq4ljfd394gesj4aj6pkkf0lsytmnn", BlsPublicKey: "11e163fab1c890a81d224dc9d462d56b0cef5cd1645d2edd848067ff34f480b02ac8c920c4646618f235a7e3f9bdc505"},
{Index: "202", Address: "one1g6l7xj9w8z3uxjud8da69twzvyccqam6k5xymd", BlsPublicKey: "a11451a324fffc50f33e934bcee1bd673b7e285c24817285cea8207212aad9951aa9b6c40a5b78a7fea7ed047c48d188"}, {Index: "202", Address: "one1g6l7xj9w8z3uxjud8da69twzvyccqam6k5xymd", BlsPublicKey: "a11451a324fffc50f33e934bcee1bd673b7e285c24817285cea8207212aad9951aa9b6c40a5b78a7fea7ed047c48d188"},
{Index: "203", Address: "one1l476nnnhekrnnk5m78k8jr9uxvh6y4ql7xq9fq", BlsPublicKey: "4fd4f3a680528b60f6de7d944febe5426485f0ca0816a7c44fe3355bef2f48519f75235f42732678b2a5fab860e67304"}, {Index: "203", Address: "one1l476nnnhekrnnk5m78k8jr9uxvh6y4ql7xq9fq", BlsPublicKey: "4fd4f3a680528b60f6de7d944febe5426485f0ca0816a7c44fe3355bef2f48519f75235f42732678b2a5fab860e67304"},
{Index: "204", Address: "one134phdwr88wnpadw4x8p7r972f7wkqerd7kwxg9", BlsPublicKey: "7a37baf96f89557f4157d206af89ac343b17df147e44d6d94b76637ba16668854205a639f39d2c7f51b8d1e26205b296"}, {Index: "204", Address: "one1a962kd80qmqu6wwd2ywjnh2f0e95zmrur2qmde", BlsPublicKey: "11fc5e3e3d1b8671f4fa2da7eec3174d832450c4b45530ee24bde9f8e549600ddebe2c60631926f15f287fdc28e77619"},
{Index: "205", Address: "one1qfs3q0mwzmg3dsyzc6vf2telzx54t0t3zlc8a7", BlsPublicKey: "b3edc1d8778cae41255c06f2425347c7230de80e5f6a3b9e46a28a5223fb36d3fa6433cb5d589ee990ab73db3ea8a419"}, {Index: "205", Address: "one1qfs3q0mwzmg3dsyzc6vf2telzx54t0t3zlc8a7", BlsPublicKey: "b3edc1d8778cae41255c06f2425347c7230de80e5f6a3b9e46a28a5223fb36d3fa6433cb5d589ee990ab73db3ea8a419"},
{Index: "206", Address: "one1kgu8jl9vtff4yklvxspg4whjwdvkqsx50zwy6g", BlsPublicKey: "94c13845fa5fb93967e1f4485255993e42cddc6cef75bd5709af76275fd9a6d6fc4e062a9f4d2b776348794fc25a2408"}, {Index: "206", Address: "one1kgu8jl9vtff4yklvxspg4whjwdvkqsx50zwy6g", BlsPublicKey: "94c13845fa5fb93967e1f4485255993e42cddc6cef75bd5709af76275fd9a6d6fc4e062a9f4d2b776348794fc25a2408"},
{Index: "207", Address: "one1zr57fc7txdwvcwd6w2rn4yg6tdy0j0tjhvyj8f", BlsPublicKey: "76ac6c7d29e5e47874619816d6a441645adb0138bd02aa71d5b654cfeb9bdc1d4dd66d4e493100d48aeb558824d63408"}, {Index: "207", Address: "one1zr57fc7txdwvcwd6w2rn4yg6tdy0j0tjhvyj8f", BlsPublicKey: "76ac6c7d29e5e47874619816d6a441645adb0138bd02aa71d5b654cfeb9bdc1d4dd66d4e493100d48aeb558824d63408"},
@ -2015,7 +2015,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "302", Address: "one10pptg3m2k4c7ntgyqqpdmcamwg9e29naq4a4gd", BlsPublicKey: "931c600eb04d9fe1fde45104a00e5da9bcdd94787e18b5867ab6c0c5d33834f9a1fb7f090f3eafcb26e368b43f82f213"}, {Index: "302", Address: "one10pptg3m2k4c7ntgyqqpdmcamwg9e29naq4a4gd", BlsPublicKey: "931c600eb04d9fe1fde45104a00e5da9bcdd94787e18b5867ab6c0c5d33834f9a1fb7f090f3eafcb26e368b43f82f213"},
{Index: "303", Address: "one1hcru2lx6nagvf49hfc5skhhxummq863nlmvvu5", BlsPublicKey: "9a2529a40c23b45e33058e4758e63d2c5285bc64c0f71563725767274e9d0c25b6a9e96cf37d8ddf3a7047a2f5f8778d"}, {Index: "303", Address: "one1hcru2lx6nagvf49hfc5skhhxummq863nlmvvu5", BlsPublicKey: "9a2529a40c23b45e33058e4758e63d2c5285bc64c0f71563725767274e9d0c25b6a9e96cf37d8ddf3a7047a2f5f8778d"},
{Index: "304", Address: "one13zzrvzrdxz2pf4y6ewkgjy7h2f602esjzjx4zv", BlsPublicKey: "48c89306e6eda48a4bd55145a89dc55ef7fdee4f3938afd1a6a3c503be8678e9aee3636f63b0df08a7491597e793f492"}, {Index: "304", Address: "one13zzrvzrdxz2pf4y6ewkgjy7h2f602esjzjx4zv", BlsPublicKey: "48c89306e6eda48a4bd55145a89dc55ef7fdee4f3938afd1a6a3c503be8678e9aee3636f63b0df08a7491597e793f492"},
{Index: "305", Address: "one1zkpxghkct52crp2tda7xkdpxga32j56jjevntf", BlsPublicKey: "8137e81449de355de1befa684f89c38f5769a3744af8f9066d517474476b20be930cfad7de30b73b26c2353480111d92"}, {Index: "305", Address: "one1apdxhkhen9svzkk4x7n475lx8lmckawet7fdrx", BlsPublicKey: "16975468c2cedd7ce9785e41b264d11fe7fa75ae5d4561ec8c088ec03c3a0b4518b3ffc4830e71ddfc55bf6098cc4186"},
{Index: "306", Address: "one1r8ywmftsqgew4sfx9808dtww4r6j3a3v98c8vt", BlsPublicKey: "a221b590df1f201b653c31c0df894471519c0601dac36189ddb9511b4b920d5a43e597842971e784d0e0cb17bda7e983"}, {Index: "306", Address: "one1r8ywmftsqgew4sfx9808dtww4r6j3a3v98c8vt", BlsPublicKey: "a221b590df1f201b653c31c0df894471519c0601dac36189ddb9511b4b920d5a43e597842971e784d0e0cb17bda7e983"},
{Index: "307", Address: "one1w2t3d2g435az4cnyqf7rkuzq2mu08mfuwlqhcz", BlsPublicKey: "8520f6519d3bc40fbffc703248cdcb6222e24394238cc7e705048ea62dceb35c3a2d0151d863556e8f172ca54ab8fe87"}, {Index: "307", Address: "one1w2t3d2g435az4cnyqf7rkuzq2mu08mfuwlqhcz", BlsPublicKey: "8520f6519d3bc40fbffc703248cdcb6222e24394238cc7e705048ea62dceb35c3a2d0151d863556e8f172ca54ab8fe87"},
{Index: "308", Address: "one1hz4n5vn4e6ucvppxf6n8zayhml8wsqken60ksg", BlsPublicKey: "9d5063f3d82ca4b6855f3b00a45bdff5dd309fd6c4e12488ef18fbe808c847c2ff89334e641b3850e025b9bb85f4538b"}, {Index: "308", Address: "one1hz4n5vn4e6ucvppxf6n8zayhml8wsqken60ksg", BlsPublicKey: "9d5063f3d82ca4b6855f3b00a45bdff5dd309fd6c4e12488ef18fbe808c847c2ff89334e641b3850e025b9bb85f4538b"},

@ -28,7 +28,7 @@ func (d DeployAccount) String() string {
func BeaconAccountPriKey() *ecdsa.PrivateKey { func BeaconAccountPriKey() *ecdsa.PrivateKey {
prikey, err := ecdsa.GenerateKey(crypto.S256(), strings.NewReader(genesisString)) prikey, err := ecdsa.GenerateKey(crypto.S256(), strings.NewReader(genesisString))
if err != nil && prikey == nil { if err != nil && prikey == nil {
utils.GetLogInstance().Error("Failed to generate beacon chain contract deployer account") utils.Logger().Error().Msg("Failed to generate beacon chain contract deployer account")
os.Exit(111) os.Exit(111)
} }
return prikey return prikey

@ -121,7 +121,9 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr r
func doCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) { func doCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
defer func(start time.Time) { defer func(start time.Time) {
utils.GetLogInstance().Debug("Executing EVM call finished", "runtime", time.Since(start)) utils.Logger().Debug().
Dur("runtime", time.Since(start)).
Msg("Executing EVM call finished")
}(time.Now()) }(time.Now())
state, header, err := b.StateAndHeaderByNumber(ctx, blockNr) state, header, err := b.StateAndHeaderByNumber(ctx, blockNr)
@ -152,7 +154,10 @@ func doCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumb
gas = uint64(*args.Gas) gas = uint64(*args.Gas)
} }
if globalGasCap != nil && globalGasCap.Uint64() < gas { if globalGasCap != nil && globalGasCap.Uint64() < gas {
utils.GetLogInstance().Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) utils.Logger().Warn().
Uint64("requested", gas).
Uint64("cap", globalGasCap.Uint64()).
Msg("Caller gas above allowance, capping")
gas = globalGasCap.Uint64() gas = globalGasCap.Uint64()
} }
gasPrice := new(big.Int).SetUint64(defaultGasPrice) gasPrice := new(big.Int).SetUint64(defaultGasPrice)

@ -38,7 +38,12 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs
} }
signed, err := s.signTransaction(ctx, &args, passwd) signed, err := s.signTransaction(ctx, &args, passwd)
if err != nil { if err != nil {
utils.GetLogger().Warn("Failed transaction send attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err) utils.Logger().Warn().
Str("from", args.From.Hex()).
Str("to", args.To.Hex()).
Uint64("value", args.Value.ToInt().Uint64()).
AnErr("err", err).
Msg("Failed transaction send attempt")
return common.Hash{}, err return common.Hash{}, err
} }
return SubmitTransaction(ctx, s.b, signed) return SubmitTransaction(ctx, s.b, signed)

@ -23,9 +23,15 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
return common.Hash{}, err return common.Hash{}, err
} }
addr := crypto.CreateAddress(from, tx.Nonce()) addr := crypto.CreateAddress(from, tx.Nonce())
utils.GetLogger().Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", common2.MustAddressToBech32(addr)) utils.Logger().Info().
Str("fullhash", tx.Hash().Hex()).
Str("contract", common2.MustAddressToBech32(addr)).
Msg("Submitted contract creation")
} else { } else {
utils.GetLogger().Info("Submitted transaction", "fullhash", tx.Hash().Hex(), "recipient", tx.To()) utils.Logger().Info().
Str("fullhash", tx.Hash().Hex()).
Str("recipient", tx.To().Hex()).
Msg("Submitted transaction")
} }
return tx.Hash(), nil return tx.Hash(), nil
} }

@ -47,7 +47,9 @@ func (m *MemProfiling) Config() {
Addr: fmt.Sprintf("%s:%s", nodeconfig.GetDefaultConfig().IP, utils.GetPortFromDiff(nodeconfig.GetDefaultConfig().Port, MemProfilingPortDiff)), Addr: fmt.Sprintf("%s:%s", nodeconfig.GetDefaultConfig().IP, utils.GetPortFromDiff(nodeconfig.GetDefaultConfig().Port, MemProfilingPortDiff)),
Handler: m.h, Handler: m.h,
} }
utils.GetLogInstance().Info("running mem profiling", "port", utils.GetPortFromDiff(nodeconfig.GetDefaultConfig().Port, MemProfilingPortDiff)) utils.Logger().Info().
Str("port", utils.GetPortFromDiff(nodeconfig.GetDefaultConfig().Port, MemProfilingPortDiff)).
Msgf("running mem profiling")
} }
// Add adds variables to watch for profiling. // Add adds variables to watch for profiling.
@ -67,7 +69,6 @@ func (m *MemProfiling) Add(name string, v interface{}) {
func (m *MemProfiling) Start() { func (m *MemProfiling) Start() {
go m.s.ListenAndServe() go m.s.ListenAndServe()
m.PeriodicallyScanMemSize() m.PeriodicallyScanMemSize()
utils.GetLogInstance().Info("Start memprofiling.")
} }
// Stop stops mem profiling. // Stop stops mem profiling.
@ -86,7 +87,7 @@ func (m *MemProfiling) PeriodicallyScanMemSize() {
for k, v := range m.observedObject { for k, v := range m.observedObject {
s := memsize.Scan(v) s := memsize.Scan(v)
r := s.Report() r := s.Report()
utils.GetLogInstance().Info(fmt.Sprintf("memsize report for %s:\n %s", k, r)) utils.Logger().Info().Msgf("memsize report for %s:\n %s", k, r)
} }
m.mu.Unlock() m.mu.Unlock()
} }
@ -120,11 +121,11 @@ func MaybeCallGCPeriodically() {
func PrintMemUsage(msg string) { func PrintMemUsage(msg string) {
var m runtime.MemStats var m runtime.MemStats
runtime.ReadMemStats(&m) runtime.ReadMemStats(&m)
utils.GetLogInstance().Info(msg, utils.Logger().Info().
"alloc", bToMb(m.Alloc), Uint64("alloc", bToMb(m.Alloc)).
"totalalloc", bToMb(m.TotalAlloc), Uint64("totalalloc", bToMb(m.TotalAlloc)).
"sys", bToMb(m.Sys), Uint64("sys", bToMb(m.Sys)).
"numgc", m.NumGC) Uint32("numgc", m.NumGC)
} }
func bToMb(b uint64) uint64 { func bToMb(b uint64) uint64 {

@ -3,6 +3,7 @@ package profiler
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt"
"net/http" "net/http"
"os" "os"
"sync" "sync"
@ -46,7 +47,15 @@ func (profiler *Profiler) LogMemory() {
// log mem usage // log mem usage
info, _ := profiler.proc.MemoryInfo() info, _ := profiler.proc.MemoryInfo()
memMap, _ := profiler.proc.MemoryMaps(false) memMap, _ := profiler.proc.MemoryMaps(false)
utils.GetLogInstance().Info("Mem Report", "info", info, "map", memMap, "shardID", profiler.shardID) loggedMemMap := ""
for _, mems := range *memMap {
loggedMemMap = fmt.Sprintf("%v; %v", loggedMemMap, mems)
}
utils.Logger().Info().
Str("info", info.String()).
Str("map", loggedMemMap).
Uint32("shardID", profiler.shardID).
Msg("Mem Report")
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
} }
@ -58,7 +67,11 @@ func (profiler *Profiler) LogCPU() {
// log cpu usage // log cpu usage
percent, _ := profiler.proc.CPUPercent() percent, _ := profiler.proc.CPUPercent()
times, _ := profiler.proc.Times() times, _ := profiler.proc.Times()
utils.GetLogInstance().Info("CPU Report", "percent", percent, "times", times, "shardID", profiler.shardID) utils.Logger().Info().
Float64("percent", percent).
Str("times", times.String()).
Uint32("shardID", profiler.shardID).
Msg("CPU Report")
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
} }

@ -81,8 +81,9 @@ func (sc *CollectionImpl) ShardChain(shardID uint32) (*core.BlockChain, error) {
return nil, ctxerror.New("cannot open chain database").WithCause(err) return nil, ctxerror.New("cannot open chain database").WithCause(err)
} }
if rawdb.ReadCanonicalHash(db, 0) == (common.Hash{}) { if rawdb.ReadCanonicalHash(db, 0) == (common.Hash{}) {
utils.GetLogger().Info("initializing a new chain database", utils.Logger().Info().
"shardID", shardID) Uint32("shardID", shardID).
Msg("initializing a new chain database")
if err := sc.dbInit.InitChainDB(db, shardID); err != nil { if err := sc.dbInit.InitChainDB(db, shardID); err != nil {
return nil, ctxerror.New("cannot initialize a new chain database"). return nil, ctxerror.New("cannot initialize a new chain database").
WithCause(err) WithCause(err)
@ -119,11 +120,15 @@ func (sc *CollectionImpl) CloseShardChain(shardID uint32) error {
if !ok { if !ok {
return ctxerror.New("shard chain not found", "shardID", shardID) return ctxerror.New("shard chain not found", "shardID", shardID)
} }
utils.GetLogger().Info("closing shard chain", "shardID", shardID) utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closing shard chain")
delete(sc.pool, shardID) delete(sc.pool, shardID)
bc.Stop() bc.Stop()
bc.ChainDb().Close() bc.ChainDb().Close()
utils.GetLogger().Info("closed shard chain", "shardID", shardID) utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closed shard chain")
return nil return nil
} }
@ -135,10 +140,14 @@ func (sc *CollectionImpl) Close() error {
sc.pool = newPool sc.pool = newPool
sc.mtx.Unlock() sc.mtx.Unlock()
for shardID, bc := range oldPool { for shardID, bc := range oldPool {
utils.GetLogger().Info("closing shard chain", "shardID", shardID) utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closing shard chain")
bc.Stop() bc.Stop()
bc.ChainDb().Close() bc.ChainDb().Close()
utils.GetLogger().Info("closed shard chain", "shardID", shardID) utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closed shard chain")
} }
return nil return nil
} }

@ -63,7 +63,9 @@ func ConvertFixedDataIntoByteArray(data interface{}) []byte {
buff := new(bytes.Buffer) buff := new(bytes.Buffer)
err := binary.Write(buff, binary.BigEndian, data) err := binary.Write(buff, binary.BigEndian, data)
if err != nil { if err != nil {
GetLogger().Crit("Failed to convert fixed data into byte array", "err", err) Logger().Error().
AnErr("err", err).
Msg("Failed to convert fixed data into byte array")
} }
return buff.Bytes() return buff.Bytes()
} }
@ -195,16 +197,22 @@ func LoadKeyFromFile(keyfile string) (key p2p_crypto.PrivKey, pk p2p_crypto.PubK
var keyStruct PrivKeyStore var keyStruct PrivKeyStore
err = Load(keyfile, &keyStruct) err = Load(keyfile, &keyStruct)
if err != nil { if err != nil {
GetLogger().Info("No priviate key can be loaded from file", "keyfile", keyfile) Logger().Info().
GetLogger().Info("Using random private key") Str("keyfile", keyfile).
Msg("No private key can be loaded from file")
Logger().Info().Msg("Using random private key")
key, pk, err = GenKeyP2PRand() key, pk, err = GenKeyP2PRand()
if err != nil { if err != nil {
GetLogger().Crit("LoadKeyFromFile", "GenKeyP2PRand Error", err) Logger().Error().
AnErr("GenKeyP2PRand Error", err).
Msg("LoadedKeyFromFile")
panic(err) panic(err)
} }
err = SaveKeyToFile(keyfile, key) err = SaveKeyToFile(keyfile, key)
if err != nil { if err != nil {
GetLogger().Error("failed to save key to keyfile", "keyfile", err) Logger().Error().
AnErr("keyfile", err).
Msg("failed to save key to keyfile")
} }
return key, pk, nil return key, pk, nil
} }

@ -246,7 +246,7 @@ func (node *Node) reducePendingTransactions() {
// If length of pendingTransactions is greater than TxPoolLimit then by greedy take the TxPoolLimit recent transactions. // If length of pendingTransactions is greater than TxPoolLimit then by greedy take the TxPoolLimit recent transactions.
if curLen > txPoolLimit+txPoolLimit { if curLen > txPoolLimit+txPoolLimit {
node.pendingTransactions = append(types.Transactions(nil), node.pendingTransactions[curLen-txPoolLimit:]...) node.pendingTransactions = append(types.Transactions(nil), node.pendingTransactions[curLen-txPoolLimit:]...)
utils.GetLogger().Info("mem stat reduce pending transaction") utils.Logger().Info().Msg("mem stat reduce pending transaction")
} }
} }

@ -459,8 +459,8 @@ func (node *Node) AddNewBlock(newBlock *types.Block) error {
utils.Logger().Error(). utils.Logger().Error().
Err(err). Err(err).
Uint64("blockNum", newBlock.NumberU64()). Uint64("blockNum", newBlock.NumberU64()).
Bytes("parentHash", newBlock.Header().ParentHash().Bytes()[:]). Str("parentHash", newBlock.Header().ParentHash().Hex()).
Bytes("hash", newBlock.Header().Hash().Bytes()[:]). Str("hash", newBlock.Header().Hash().Hex()).
Msg("Error Adding new block to blockchain") Msg("Error Adding new block to blockchain")
} else { } else {
utils.Logger().Info(). utils.Logger().Info().

@ -0,0 +1,700 @@
#!/bin/bash
# This Script is for Testing the API functionality on both local and betanet.
# -l to run localnet, -b to run betanet(mutually exclusive)
# -v to see returns from each request
# Right now only tests whether a response is recieved
# You must have properly cloned into the dapp-examples repo and have installed nodejs
VERBOSE="FALSE"
TESTS_RAN=0
TESTS_PASSED=0
red=`tput setaf 1`
green=`tput setaf 2`
blue=`tput setaf 6`
white=`tput sgr0`
yellow=`tput setaf 11`
reset=`tput sgr0`
function response_test() {
if [ "$1" != "" ]; then
echo "${green}RESPONSE RECIEVED${reset}"
return 1
else
echo "${red}NO RESPONSE${reset}"
return 0
fi
}
function isHashTest() {
if [ "$TRANSACTION" != "null" ]; then
if [[ "$TRANSACTION_HASH" =~ ^0x[0-9a-f]{64}$ ]]; then
echo ${green}TRANSACTION HASH VALID${reset}
echo
return 1
fi
fi
echo ${red}TRANSACTION HASH INVALID${reset}
return 0
}
function isHexTest() {
if [ "$1" != "null" ]; then
if [[ "$1" =~ ^0x[0-9a-f]+$ ]]; then
echo ${green}VALID HEX RECIEVED${reset}
echo
return 1
fi
fi
echo ${red}INVALID HEX RECIEVED${reset}
return 0
}
### SETUP COMMANDLINE FLAGS ###
while getopts "lbvp" OPTION; do
case $OPTION in
b)
NETWORK="betanet"
declare -A PORT=( [POST]="http://s0.b.hmny.io:9500/" [GET]="http://e0.b.hmny.io:5000/" )
BLOCK_0_HASH=$(curl --location --request POST "http://l0.b.hmny.io:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getBlockByNumber\",\"params\":[\"0x1\", true],\"id\":1}" | jq -r '.result.hash')
echo "BLOCK0HASH:"
echo "$BLOCK_0_HASH"
SIGNED_RAW_TRANSACTION=$(node ../dapp-examples/nodejs/apiTestSign.js)
echo "RAWTX"
echo "$SIGNED_RAW_TRANSACTION"
TRANSACTION_HASH=$(curl --location --request POST "http://l0.b.hmny.io:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_sendRawTransaction\",\"params\":[\""$SIGNED_RAW_TRANSACTION"\"],\"id\":1}" | jq -r '.result')
echo "TRANSACTION_HASH:"
echo $TRANSACTION_HASH
sleep 20s
TRANSACTION=$(curl --location --request POST "http://l0.b.hmny.io:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"],\"id\":1}")
echo "TRANSACTION:"
echo "$TRANSACTION"
TRANSACTION_BLOCK_HASH=$(echo $TRANSACTION | jq -r '.result.blockHash')
TRANSACTION_BLOCK_NUMBER=$(echo $TRANSACTION | jq -r '.result.blockNumber')
TRANSACTION_INDEX=$(echo $TRANSACTION | jq -r '.result.transactionIndex') #Needs to be get transaction Index
TRANSACTION_BLOCK_ID=$(( $TRANSACTION_BLOCK_NUMBER ))
echo TRANSACTION_BLOCK_ID
echo $TRANSACTION_BLOCK_ID
echo "TRANSACTION_BLOCK_HASH:"
echo $TRANSACTION_BLOCK_HASH
echo "TRANSACTION_BLOCK_NUMBER:"
echo "$TRANSACTION_BLOCK_NUMBER"
echo "TRANSACTION_INDEX:"
echo $TRANSACTION_INDEX
;;
l)
NETWORK="localnet"
declare -A PORT=( [POST]="localhost:9500/" [GET]="localhost:5099/" )
BLOCK_0_HASH=$(curl -s --location --request POST "localhost:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getBlockByNumber\",\"params\":[\"0x1\", true],\"id\":1}" | jq -r '.result.hash')
echo "BLOCK0HASH:"
echo "$BLOCK_0_HASH"
SIGNED_RAW_TRANSACTION=$(node ../dapp-examples/nodejs/apiTestSign.js localnet)
echo "RAWTX"
echo "$SIGNED_RAW_TRANSACTION"
TRANSACTION_HASH=$(curl --location --request POST "localhost:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_sendRawTransaction\",\"params\":[\""$SIGNED_RAW_TRANSACTION"\"],\"id\":1}" | jq -r '.result')
echo "TRANSACTION_HASH:"
echo $TRANSACTION_HASH
sleep 20s
TRANSACTION=$(curl --location --request POST "http://localhost:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"],\"id\":1}")
echo "TRANSACTION:"
echo "$TRANSACTION"
TRANSACTION_BLOCK_HASH=$(echo $TRANSACTION | jq -r '.result.blockHash')
TRANSACTION_BLOCK_NUMBER=$(echo $TRANSACTION | jq -r '.result.blockNumber')
TRANSACTION_INDEX=$(echo $TRANSACTION | jq -r '.result.transactionIndex')
TRANSACTION_BLOCK_ID=$(( $TRANSACTION_BLOCK_NUMBER ))
echo TRANSACTION_BLOCK_ID
echo $TRANSACTION_BLOCK_ID
echo "TRANSACTION_BLOCK_HASH:"
echo $TRANSACTION_BLOCK_HASH
echo "TRANSACTION_BLOCK_NUMBER:"
echo "$TRANSACTION_BLOCK_NUMBER"
echo "TRANSACTION_INDEX:"
echo $TRANSACTION_INDEX
;;
v)
VERBOSE="TRUE"
;;
p)
PRETTY="TRUE"
;;
esac
done
if [ $OPTIND -eq 1 ]; then echo "No options were passed, -l for localnet, -b for betanet, -v to view logs of either"; exit; fi
declare -A GETDATA=( [GET_blocks]="blocks?from=$TRANSACTION_BLOCK_ID&to=$TRANSACTION_BLOCK_ID" [GET_tx]="tx?id=0" [GET_address]="address?id=0" [GET_node-count]="node-count" [GET_shard]="shard?id=0" [GET_committee]="committee?shard_id=0&epoch=0" )
declare -A POSTDATA
if [ "$NETWORK" == "localnet" ]; then
POSTDATA[hmy_getBlockByHash]="hmy_getBlockByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", true]"
POSTDATA[hmy_getBlockByNumber]="hmy_getBlockByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", true]"
POSTDATA[hmy_getBlockTransactionCountByHash]="hmy_getBlockTransactionCountByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\"]"
POSTDATA[hmy_getBlockTransactionCountByNumber]="hmy_getBlockTransactionCountByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\"]"
POSTDATA[hmy_getCode]="hmy_getCode\",\"params\":[\"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\", \"latest\"]"
POSTDATA[hmy_getTransactionByBlockHashAndIndex]="hmy_getTransactionByBlockHashAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByBlockNumberAndIndex]="hmy_getTransactionByBlockNumberAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByHash]="hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_getTransactionReceipt]="hmy_getTransactionReceipt\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_syncing]="hmy_syncing\",\"params\":[]"
POSTDATA[net_peerCount]="net_peerCount\",\"params\":[]"
POSTDATA[hmy_getBalance]="hmy_getBalance\",\"params\":[\"one18t4yj4fuutj83uwqckkvxp9gfa0568uc48ggj7\", \"latest\"]"
POSTDATA[hmy_getStorageAt]="hmy_getStorageAt\",\"params\":[\"0xD7Ff41CA29306122185A07d04293DdB35F24Cf2d\", \"0\", \"latest\"]"
POSTDATA[hmy_getTransactionCount]="hmy_getTransactionCount\",\"params\":[\"0x806171f95C5a74371a19e8a312c9e5Cb4E1D24f6\", \"latest\"]" # what is this
POSTDATA[hmy_sendRawTransaction]="hmy_sendRawTransaction\",\"params\":[\"$SIGNED_RAW_TRANSACTION\"]"
POSTDATA[hmy_getLogs]="hmy_getLogs\", \"params\":[{\"BlockHash\": \"$TRANSACTION_BLOCK_HASH\"}]"
POSTDATA[hmy_getFilterChanges]="hmy_getFilterChanges\", \"params\":[\"0x58010795a282878ed0d61da72a14b8b0\"]"
POSTDATA[hmy_newPendingTransactionFilter]="hmy_newPendingTransactionFilter\", \"params\":[]"
POSTDATA[hmy_newBlockFilter]="hmy_newBlockFilter\", \"params\":[]"
POSTDATA[hmy_newFilter]="hmy_newFilter\", \"params\":[{\"BlockHash\": \"0x5725b5b2ab28206e7256a78cda4f9050c2629fd85110ffa54eacd2a13ba68072\"}]"
POSTDATA[hmy_call]="hmy_call\", \"params\":[{\"to\": \"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\"}, \"latest\"]"
POSTDATA[hmy_gasPrice]="hmy_gasPrice\",\"params\":[]"
POSTDATA[hmy_blockNumber]="hmy_blockNumber\",\"params\":[]"
POSTDATA[net_version]="net_version\",\"params\":[]"
POSTDATA[hmy_protocolVersion]="hmy_protocolVersion\",\"params\":[]"
fi
if [ "$NETWORK" == "betanet" ]; then
POSTDATA[hmy_getBlockByHash]="hmy_getBlockByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", true]"
POSTDATA[hmy_getBlockByNumber]="hmy_getBlockByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", true]"
POSTDATA[hmy_getBlockTransactionCountByHash]="hmy_getBlockTransactionCountByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\"]"
POSTDATA[hmy_getBlockTransactionCountByNumber]="hmy_getBlockTransactionCountByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\"]"
POSTDATA[hmy_getCode]="hmy_getCode\",\"params\":[\"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\", \"latest\"]"
POSTDATA[hmy_getTransactionByBlockHashAndIndex]="hmy_getTransactionByBlockHashAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByBlockNumberAndIndex]="hmy_getTransactionByBlockNumberAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByHash]="hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_getTransactionReceipt]="hmy_getTransactionReceipt\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_syncing]="hmy_syncing\",\"params\":[]"
POSTDATA[net_peerCount]="net_peerCount\",\"params\":[]"
POSTDATA[hmy_getBalance]="hmy_getBalance\",\"params\":[\"one18t4yj4fuutj83uwqckkvxp9gfa0568uc48ggj7\", \"latest\"]"
POSTDATA[hmy_getStorageAt]="hmy_getStorageAt\",\"params\":[\"0xD7Ff41CA29306122185A07d04293DdB35F24Cf2d\", \"0\", \"latest\"]"
POSTDATA[hmy_getTransactionCount]="hmy_getTransactionCount\",\"params\":[\"0x806171f95C5a74371a19e8a312c9e5Cb4E1D24f6\", \"latest\"]" # what is this
POSTDATA[hmy_sendRawTransaction]="hmy_sendRawTransaction\",\"params\":[\"$SIGNED_RAW_TRANSACTION\"]"
POSTDATA[hmy_getLogs]="hmy_getLogs\", \"params\":[{\"BlockHash\": \"$TRANSACTION_BLOCK_HASH\"}]"
POSTDATA[hmy_getFilterChanges]="hmy_getFilterChanges\", \"params\":[\"0x58010795a282878ed0d61da72a14b8b0\"]"
POSTDATA[hmy_newPendingTransactionFilter]="hmy_newPendingTransactionFilter\", \"params\":[]"
POSTDATA[hmy_newBlockFilter]="hmy_newBlockFilter\", \"params\":[]"
POSTDATA[hmy_newFilter]="hmy_newFilter\", \"params\":[{\"BlockHash\": \"0x5725b5b2ab28206e7256a78cda4f9050c2629fd85110ffa54eacd2a13ba68072\"}]"
POSTDATA[hmy_call]="hmy_call\", \"params\":[{\"to\": \"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\"}, \"latest\"]"
POSTDATA[hmy_gasPrice]="hmy_gasPrice\",\"params\":[]"
POSTDATA[hmy_blockNumber]="hmy_blockNumber\",\"params\":[]"
POSTDATA[net_version]="net_version\",\"params\":[]"
POSTDATA[hmy_protocolVersion]="hmy_protocolVersion\",\"params\":[]"
fi
declare -A RESPONSES
RESPONSES[GET_blocks]=""
RESPONSES[GET_tx]=""
RESPONSES[GET_address]=""
RESPONSES[GET_node-count]=""
RESPONSES[GET_shard]=""
RESPONSES[GET_committee]=""
RESPONSES[hmy_getBlockByHash]=""
RESPONSES[hmy_getBlockByNumber]=""
RESPONSES[hmy_getBlockTransactionCountByHash]=""
RESPONSES[hmy_getBlockTransactionCountByNumber]=""
RESPONSES[hmy_getCode]=""
RESPONSES[hmy_getTransactionByBlockHashAndIndex]=""
RESPONSES[hmy_getTransactionByBlockNumberAndIndex]=""
RESPONSES[hmy_getTransactionByHash]=""
RESPONSES[hmy_getTransactionReceipt]=""
RESPONSES[hmy_syncing]=""
RESPONSES[net_peerCount]=""
RESPONSES[hmy_getBalance]=""
RESPONSES[hmy_getStorageAt]=""
RESPONSES[hmy_getTransactionCount]=""
RESPONSES[hmy_sendRawTransaction]=""
RESPONSES[hmy_getLogs]=""
RESPONSES[hmy_getFilterChanges]=""
RESPONSES[hmy_newPendingTransactionFilter]=""
RESPONSES[hmy_newBlockFilter]=""
RESPONSES[hmy_newFilter]=""
RESPONSES[hmy_call]=""
RESPONSES[hmy_gasPrice]=""
RESPONSES[hmy_blockNumber]=""
RESPONSES[net_version]=""
RESPONSES[hmy_protocolVersion]=""
### Processes GET requests and stores reponses in RESPONSES ###
function GET_requests() {
for K in "${!GETDATA[@]}";
do
RESPONSES[$K]=$(curl -s --location --request GET "${PORT[GET]}${GETDATA[$K]}" \
--header "Content-Type: application/json" \
--data "")
done
}
### Processes POST requests and stores reponses in RESPONSES ###
function POST_requests() {
for K in "${!POSTDATA[@]}";
do
RESPONSES[$K]="$(curl -s --location --request POST "${PORT[POST]}" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"${POSTDATA[$K]},\"id\":1}")"
done
}
function log_API_responses() {
for K in "${!GETDATA[@]}";
do
echo "${yellow}$K"
echo "${blue}REQUEST:"
echo "${white}${GETDATA[$K]}"
echo "${blue}RESPONSE:" ${white}
echo ${RESPONSES[$K]} #| jq .
echo
echo
done
for K in "${!POSTDATA[@]}";
do
echo "${yellow}$K"
echo "${blue}REQUEST:"
echo "${white}${POSTDATA[$K]}"
echo "${blue}RESPONSE: $white"
echo ${RESPONSES[$K]} #| jq .
echo
echo
done
}
GET_requests
POST_requests
### BASIC QUERY TESTS ###
function Explorer_getBlock_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET blocks(explorer) test:"
response_test ${RESPONSES[GET_blocks]}
if [ "$?" == "1" ]; then
BLOCKBYIDHASH=$(echo ${RESPONSES[GET_blocks]} | jq -r .[0].id)
if [ "$BLOCKBYIDHASH" != "null" ]; then
if [ "$BLOCKBYIDHASH" == "$TRANSACTION_BLOCK_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
fi
echo
}
#Needs updating - wtf does getTx do - no arguments?
function Explorer_getTx_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET tx(explorer) test:"
response_test ${RESPONSES[GET_tx]}
if [ "$?" == "1" ]; then
TX_HASH=$(echo ${RESPONSES[GET_tx]} | jq -r .id) # fix agrs to jq
if [ "$TX_HASH" != "null" ]; then
if [ "$TX_HASH" == "$TX_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
fi
echo
}
function Explorer_getExplorerNodeAdress_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET address(explorer) test:"
response_test ${RESPONSES[GET_address]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function Explorer_getExplorerNode_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET node-count(explorer) test:"
response_test ${RESPONSES[GET_node-count]}
if [ ${RESPONSES[GET_node-count]}="2" ]; then
echo ${green}SANE VALUE, 2 explorer nodes reported $reset
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
else
echo ${red}non 2 explorer nodes reported $reset
fi
echo
}
function Explorer_getShard_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET shard(explorer) test:"
response_test ${RESPONSES[GET_shard]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function Explorer_getCommitte_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET committe(explorer) test:"
response_test ${RESPONSES[GET_committee]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
### API POST REQUESTS ###
function API_getBlockByNumber_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockByNumber test:"
response_test ${RESPONSES[hmy_getBlockByNumber]}
BLOCKBYNUMBERHASH=$(echo ${RESPONSES[hmy_getBlockByNumber]} | jq -r '.result.hash')
if [ "$BLOCKBLOCKBYNUMBERHASH" != "null" ]; then
if [ "$BLOCKBYNUMBERHASH" == "$TRANSACTION_BLOCK_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
echo
}
function API_getBlockByHash_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockByHash test:"
response_test ${RESPONSES[hmy_getBlockByHash]}
BLOCKBYHASHHASH=$(echo ${RESPONSES[hmy_getBlockByHash]} | jq -r '.result.hash')
if [ "$BLOCKBYHASHBYHASH" != "null" ]; then
if [ "$BLOCKBYHASHHASH" == "$TRANSACTION_BLOCK_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
echo
}
function API_getBlockTransactionCountByHash_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockTransactionCountByHash test:"
response_test ${RESPONSES[hmy_getBlockTransactionCountByHash]}
TRANSACTIONCOUNTBYHASH=$(echo ${RESPONSES[hmy_getBlockTransactionCountByHash]} | jq -r '.result')
TRANSACTIONCOUNTBYHASH=$(( TRANSACTIONCOUNTBYHASH ))
if [ "$TRANSACTIONCOUNTBYHASH" != "null" ]; then
if [ $TRANSACTIONCOUNTBYHASH -gt 0 ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}NON ZERO TRANSACTION COUNT IN BLOCK${reset}
echo
return
fi
fi
echo ${red}INVALID TRANSACTION COUNT IN BLOCK${reset}
echo
}
function API_getBlockTransactionCountByNumber_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockTransactionCountByNumber test:"
response_test ${RESPONSES[hmy_getBlockTransactionCountByNumber]}
TRANSACTIONCOUNTBYNUMBER=$(echo ${RESPONSES[hmy_getBlockTransactionCountByNumber]} | jq -r '.result')
TRANSACTIONCOUNTBYNUMBER=$(( TRANSACTIONCOUNTBYNUMBER ))
if [ "$BLOCKBYHASH" != "null" ]; then
if [ $TRANSACTIONCOUNTBYNUMBER -gt 0 ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}NON ZERO TRANSACTION COUNT IN BLOCK${reset}
echo
return
fi
fi
echo ${red}NON NATURAL TRANSACTION COUNT IN BLOCK${reset}
echo
}
function API_getCode_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getCode test:"
response_test ${RESPONSES[hmy_getCode]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getTransactionByBlockHashAndIndex_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionByBlockHashAndIndex test:"
response_test ${RESPONSES[hmy_getTransactionByBlockHashAndIndex]}
TRANSACTIONHASHBYHASHANDINDEX=$(echo ${RESPONSES[hmy_getTransactionByBlockHashAndIndex]} | jq -r '.result.hash')
if [ "$TRANSACTIONHASHBYHASHANDINDEX" != "null" ]; then
if [ "$TRANSACTIONHASHBYHASHANDINDEX" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION FROM BLOCKHASH AND INDEX MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION FROM BLOCKHASH AND INDEX MATCH${reset}
echo
}
function API_getTransactionByBlockNumberAndIndex_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionByBlockNumberAndIndex test:"
response_test ${RESPONSES[hmy_getTransactionByBlockNumberAndIndex]}
TRANSACTIONHASHBYNUMBERANDINDEX=$(echo ${RESPONSES[hmy_getTransactionByBlockNumberAndIndex]} | jq -r '.result.hash')
if [ "$TRANSACTIONHASHBYNUMBERANDINDEX" != "null" ]; then
if [ "$TRANSACTIONHASHBYNUMBERANDINDEX" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION FROM BLOCKNUMBER AND INDEX MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION FROM BLOCKNUMBER AND INDEX MISMATCH${reset}
echo
}
function API_getTransactionByHash_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionByHash test:"
TX_HASH=$(echo ${RESPONSES[hmy_getTransactionByHash]} | jq -r '.result.hash')
response_test ${RESPONSES[hmy_getTransactionByHash]}
if [ "$TX_HASH" != "null" ]; then
if [ "$TX_HASH" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION HASH MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION HASH MISMATCH${reset}
echo
}
function API_getTransactionReceipt_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionReceipt test:"
TX_HASH=$(echo ${RESPONSES[hmy_getTransactionReceipt]} | jq -r '.result.transactionHash')
response_test ${RESPONSES[hmy_getTransactionReceipt]}
if [ "$TX_HASH" != "null" ]; then
if [ "$TX_HASH" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION HASH MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION HASH MISMATCH${reset}
echo
}
function API_syncing_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_syncing test:"
response_test ${RESPONSES[hmy_syncing]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_netPeerCount_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST net_peerCount test:"
response_test ${RESPONSES[net_peerCount]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getBalance_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBalance test:"
response_test ${RESPONSES[hmy_getBalance]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getStorageAt_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getStorageAt test:"
response_test ${RESPONSES[hmy_getStorageAt]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getTransactionCount_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionCount test:"
response_test ${RESPONSES[hmy_getTransactionCount]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_sendRawTransaction_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_sendRawTransaction test:"
response_test ${RESPONSES[hmy_sendRawTransaction]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getLogs_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getLogs test:"
response_test ${RESPONSES[hmy_getLogs]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getFilterChanges_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getFilterChanges test:"
response_test ${RESPONSES[hmy_getFilterChanges]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_newPendingTransactionFilter_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_sendRawTransaction test:"
response_test ${RESPONSES[hmy_newPendingTransactionFilter]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_newBlockFilter_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_newBlockFilter test:"
response_test ${RESPONSES[hmy_newBlockFilter]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_newFilter_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_newFilter test:"
response_test ${RESPONSES[hmy_newFilter]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_call_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_call test:"
response_test ${RESPONSES[hmy_call]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_gasPrice_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_gasPrice test:"
response_test ${RESPONSES[hmy_gasPrice]}
if [ "$?" == "1" ]; then
RESULT=$(echo ${RESPONSES[hmy_gasPrice]} | jq -r '.result')
isHexTest $RESULT
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
fi
}
function API_blockNumber_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_blockNumber test:"
response_test ${RESPONSES[hmy_blockNumber]}
if [ "$?" == "1" ]; then
RESULT=$(echo ${RESPONSES[hmy_blockNumber]} | jq -r '.result')
isHexTest $RESULT
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
fi
}
function API_net_version_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST net_version test:"
response_test ${RESPONSES[net_version]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_protocolVersion_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_protocolVersion test:"
response_test ${RESPONSES[hmy_protocolVersion]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function run_tests() {
echo "### TESTING RPC CALLS ###"
echo
### Calls to the individual API method test ###
Explorer_getBlock_test
Explorer_getTx_test
Explorer_getExplorerNodeAdress_test
Explorer_getExplorerNode_test
Explorer_getShard_test
Explorer_getCommitte_test
API_getBlockByNumber_test
API_getBlockByHash_test
API_getBlockTransactionCountByHash_test
API_getBlockTransactionCountByNumber_test
API_getCode_test
API_getTransactionByBlockHashAndIndex_test
API_getTransactionByBlockNumberAndIndex_test
API_getTransactionByHash_test
API_getTransactionReceipt_test
API_syncing_test
API_netPeerCount_test
API_getBalance_test
API_getStorageAt_test
API_getTransactionCount_test
API_sendRawTransaction_test
API_getLogs_test
API_getFilterChanges_test
API_newPendingTransactionFilter_test
API_sendRawTransaction_test
API_newBlockFilter_test
API_newFilter_test
API_call_test
API_gasPrice_test
API_blockNumber_test
API_net_version_test
API_protocolVersion_test
TESTS_FAILED=$(( $TESTS_RAN - $TESTS_PASSED ))
echo -n ${red}
[ $TESTS_FAILED -eq 0 ] && echo -n ${green}
echo "PASSED $TESTS_PASSED/$TESTS_RAN: $TESTS_FAILED TESTS FAILED"${reset}
}
if [ "$VERBOSE" == "TRUE" ]; then
log_API_responses
fi
### BETANET TESTS ###
run_tests
Loading…
Cancel
Save