Merge branch 'master' of github.com:harmony-one/harmony into verify_headers

pull/1539/head
Dennis Won 5 years ago
commit c14654ffaf
  1. 19
      Makefile
  2. 28
      README.md
  3. 2
      api/proto/message/server.go
  4. 6
      api/service/syncing/syncing.go
  5. 59
      cmd/client/txgen/main.go
  6. 35
      cmd/harmony/main.go
  7. 246
      consensus/consensus_v2.go
  8. 8
      consensus/consensus_viewchange_msg.go
  9. 110
      consensus/view_change.go
  10. 2
      core/blockchain.go
  11. 3
      core/rawdb/accessors_chain.go
  12. 2
      core/rawdb/accessors_indexes.go
  13. 2
      core/rawdb/accessors_metadata.go
  14. 6
      drand/drand_leader.go
  15. 12
      internal/attack/attack.go
  16. 8
      internal/configs/sharding/mainnet.go
  17. 4
      internal/configs/sharding/pangaea.go
  18. 6
      internal/configs/sharding/shardingconfig_test.go
  19. 4
      internal/configs/sharding/testnet.go
  20. 28
      internal/genesis/foundational.go
  21. 2
      internal/genesis/genesis.go
  22. 9
      internal/hmyapi/blockchain.go
  23. 7
      internal/hmyapi/private_account.go
  24. 10
      internal/hmyapi/util.go
  25. 17
      internal/memprofiling/lib.go
  26. 6
      internal/params/config.go
  27. 17
      internal/profiler/profiler.go
  28. 21
      internal/shardchain/shardchains.go
  29. 18
      internal/utils/utils.go
  30. 2
      node/node.go
  31. 4
      node/node_handler.go
  32. 700
      scripts/api_test.sh

@ -0,0 +1,19 @@
TOP:=$(realpath ..)
export CGO_CFLAGS:=-I$(TOP)/bls/include -I$(TOP)/mcl/include -I/usr/local/opt/openssl/include
export CGO_LDFLAGS:=-L$(TOP)/bls/lib -L/usr/local/opt/openssl/lib
export LD_LIBRARY_PATH:=$(TOP)/bls/lib:$(TOP)/mcl/lib:/usr/local/opt/openssl/lib
export LIBRARY_PATH:=$(LD_LIBRARY_PATH)
export DYLD_FALLBACK_LIBRARY_PATH:=$(LD_LIBRARY_PATH)
export GO111MODULE:=on
.PHONY: all libs exe
all: libs
./scripts/go_executable_build.sh
libs:
make -C $(TOP)/mcl -j4
make -C $(TOP)/bls BLS_SWAP_G=1 -j4
exe:
./scripts/go_executable_build.sh

@ -21,42 +21,38 @@ The required go version is: **go1.12**
```bash
export GOPATH=$HOME/<path_of_your_choice>
export CGO_CFLAGS="-I$GOPATH/src/github.com/harmony-one/bls/include -I$GOPATH/src/github.com/harmony-one/mcl/include -I/usr/local/opt/openssl/include"
export CGO_LDFLAGS="-L$GOPATH/src/github.com/harmony-one/bls/lib -L/usr/local/opt/openssl/lib"
export LD_LIBRARY_PATH=$GOPATH/src/github.com/harmony-one/bls/lib:$GOPATH/src/github.com/harmony-one/mcl/lib:/usr/local/opt/openssl/lib
export LIBRARY_PATH=$LD_LIBRARY_PATH
export DYLD_FALLBACK_LIBRARY_PATH=$LD_LIBRARY_PATH
mkdir -p $HOME/<path_of_your_choice>/src/github.com/harmony-one
cd $HOME/<path_of_your_choice>/src/github.com/harmony-one
git clone git@github.com:harmony-one/mcl.git
cd mcl && make -j4 && cd ..
git clone git@github.com:harmony-one/bls.git
cd bls && make BLS_SWAP_G=1 -j4 && cd ..
git clone git@github.com:harmony-one/harmony.git
cd harmony
export GO111MODULE=on
./scripts/go_executable_build.sh
make
```
## Build
If you want to bypass the Makefile:
```bash
export CGO_CFLAGS="-I$GOPATH/src/github.com/harmony-one/bls/include -I$GOPATH/src/github.com/harmony-one/mcl/include -I/usr/local/opt/openssl/include"
export CGO_LDFLAGS="-L$GOPATH/src/github.com/harmony-one/bls/lib -L/usr/local/opt/openssl/lib"
export LD_LIBRARY_PATH=$GOPATH/src/github.com/harmony-one/bls/lib:$GOPATH/src/github.com/harmony-one/mcl/lib:/usr/local/opt/openssl/lib
export LIBRARY_PATH=$LD_LIBRARY_PATH
export DYLD_FALLBACK_LIBRARY_PATH=$LD_LIBRARY_PATH
export GO111MODULE=on
```
Note : Some of our scripts require bash 4.x support, please [install bash 4.x](http://tldrdevnotes.com/bash-upgrade-3-4-macos) on MacOS X.
Make sure you set `export GO111MODULE=on`.
### Build all executables
You can run the script `./scripts/go_executable_build.sh` to build all the executables.
You can run the script `./scripts/go_executable_build.sh` to build all the executables.
### Build individual executables

@ -48,7 +48,7 @@ func (s *Server) Process(ctx context.Context, message *Message) (*Response, erro
}
address := crypto.PubkeyToAddress(key.PublicKey)
utils.Logger().Info().Int64("amount", amount).Bytes("address", address[:]).Msg("Enter")
utils.Logger().Info().Int64("amount", amount).Hex("address", address[:]).Msg("Enter")
if err := s.CreateTransactionForEnterMethod(amount, priKey); err != nil {
return nil, ErrEnterMethod
}

@ -507,8 +507,8 @@ func (ss *StateSync) getMaxConsensusBlockFromParentHash(parentHash common.Hash)
maxFirstID, maxCount := GetHowManyMaxConsensus(candidateBlocks)
hash := candidateBlocks[maxFirstID].Hash()
utils.Logger().Debug().
Bytes("parentHash", parentHash[:]).
Bytes("hash", hash[:]).
Hex("parentHash", parentHash[:]).
Hex("hash", hash[:]).
Int("maxCount", maxCount).
Msg("[SYNC] Find block with matching parenthash")
return candidateBlocks[maxFirstID]
@ -675,7 +675,7 @@ func (ss *StateSync) RegisterNodeInfo() int {
err := peerConfig.registerToBroadcast(ss.selfPeerHash[:], ss.selfip, ss.selfport)
if err != nil {
logger.Debug().
Bytes("selfPeerHash", ss.selfPeerHash[:]).
Hex("selfPeerHash", ss.selfPeerHash[:]).
Msg("[SYNC] register failed to peer")
return
}

@ -151,7 +151,9 @@ func main() {
MaxNumTxsPerBatch: *numTxns,
}
shardID := *shardIDFlag
utils.GetLogInstance().Debug("Cross Shard Ratio Is Set But not used", "cx ratio", *crossShardRatio)
utils.Logger().Debug().
Int("cx ratio", *crossShardRatio).
Msg("Cross Shard Ratio Is Set But not used")
// TODO(Richard): refactor this chuck to a single method
// Setup a logger to stdout and log file.
@ -166,20 +168,30 @@ func main() {
txGen.RunServices()
start := time.Now()
totalTime := float64(*duration)
utils.GetLogInstance().Debug("Total Duration", "totalTime", totalTime, "RunForever", isDurationForever(totalTime))
utils.Logger().Debug().
Float64("totalTime", totalTime).
Bool("RunForever", isDurationForever(totalTime)).
Msg("Total Duration")
ticker := time.NewTicker(checkFrequency * time.Second)
txGen.DoSyncWithoutConsensus()
syncLoop:
for {
t := time.Now()
if totalTime > 0 && t.Sub(start).Seconds() >= totalTime {
utils.GetLogInstance().Debug("Generator timer ended in syncLoop.", "duration", (int(t.Sub(start))), "startTime", start, "totalTime", totalTime)
utils.Logger().Debug().
Int("duration", (int(t.Sub(start)))).
Time("startTime", start).
Float64("totalTime", totalTime).
Msg("Generator timer ended in syncLoop.")
break syncLoop
}
select {
case <-ticker.C:
if txGen.State.String() == "NodeReadyForConsensus" {
utils.GetLogInstance().Debug("Generator is now in Sync.", "txgen node", txGen.SelfPeer, "Node State", txGen.State.String())
utils.Logger().Debug().
Str("txgen node", txGen.SelfPeer.String()).
Str("Node State", txGen.State.String()).
Msg("Generator is now in Sync.")
ticker.Stop()
break syncLoop
}
@ -188,14 +200,24 @@ syncLoop:
readySignal := make(chan uint32)
// This func is used to update the client's blockchain when new blocks are received from the leaders
updateBlocksFunc := func(blocks []*types.Block) {
utils.GetLogInstance().Info("[Txgen] Received new block", "block num", blocks[0].NumberU64())
utils.Logger().Info().
Uint64("block num", blocks[0].NumberU64()).
Msg("[Txgen] Received new block")
for _, block := range blocks {
shardID := block.ShardID()
if txGen.Consensus.ShardID == shardID {
utils.GetLogInstance().Info("Got block from leader", "txNum", len(block.Transactions()), "shardID", shardID, "preHash", block.ParentHash().Hex(), "currentBlock", txGen.Blockchain().CurrentBlock().NumberU64(), "incoming block", block.NumberU64())
utils.Logger().Info().
Int("txNum", len(block.Transactions())).
Uint32("shardID", shardID).
Str("preHash", block.ParentHash().Hex()).
Uint64("currentBlock", txGen.Blockchain().CurrentBlock().NumberU64()).
Uint64("incoming block", block.NumberU64()).
Msg("Got block from leader")
if block.NumberU64()-txGen.Blockchain().CurrentBlock().NumberU64() == 1 {
if err := txGen.AddNewBlock(block); err != nil {
utils.GetLogInstance().Error("Error when adding new block", "error", err)
utils.Logger().Error().
Err(err).
Msg("Error when adding new block")
}
stateMutex.Lock()
if err := txGen.Worker.UpdateCurrent(block.Coinbase()); err != nil {
@ -221,9 +243,16 @@ syncLoop:
pushLoop:
for {
t := time.Now()
utils.GetLogInstance().Debug("Current running time", "running time", t.Sub(start).Seconds(), "totaltime", totalTime)
utils.Logger().Debug().
Float64("running time", t.Sub(start).Seconds()).
Float64("totalTime", totalTime).
Msg("Current running time")
if !isDurationForever(totalTime) && t.Sub(start).Seconds() >= totalTime {
utils.GetLogInstance().Debug("Generator timer ended.", "duration", (int(t.Sub(start))), "startTime", start, "totalTime", totalTime)
utils.Logger().Debug().
Int("duration", (int(t.Sub(start)))).
Time("startTime", start).
Float64("totalTime", totalTime).
Msg("Generator timer ended.")
break pushLoop
}
if shardID != 0 {
@ -231,7 +260,7 @@ pushLoop:
if otherHeight >= 1 {
go func() {
readySignal <- uint32(shardID)
utils.GetLogInstance().Debug("Same blockchain height so readySignal generated")
utils.Logger().Debug().Msg("Same blockchain height so readySignal generated")
time.Sleep(3 * time.Second) // wait for nodes to be ready
}()
}
@ -242,13 +271,15 @@ pushLoop:
lock := sync.Mutex{}
txs, err := GenerateSimulatedTransactionsAccount(uint32(shardID), txGen, setting)
if err != nil {
utils.GetLogInstance().Debug("Error in Generating Txns", "Err", err)
utils.Logger().Debug().
Err(err).
Msg("Error in Generating Txns")
}
lock.Lock()
SendTxsToShard(txGen, txs, uint32(shardID))
lock.Unlock()
case <-time.After(10 * time.Second):
utils.GetLogInstance().Warn("No new block is received so far")
utils.Logger().Warn().Msg("No new block is received so far")
}
}
}
@ -264,7 +295,9 @@ func SendTxsToShard(clientNode *node.Node, txs types.Transactions, shardID uint3
err = clientNode.GetHost().SendMessageToGroups([]p2p.GroupID{clientGroup}, p2p_host.ConstructP2pMessage(byte(0), msg))
}
if err != nil {
utils.GetLogInstance().Debug("Error in Sending Txns", "Err", err)
utils.Logger().Debug().
Err(err).
Msg("Error in Sending Txns")
}
}

@ -77,7 +77,7 @@ var (
dnsZone = flag.String("dns_zone", "", "if given and not empty, use peers from the zone (default: use libp2p peer discovery instead)")
dnsFlag = flag.Bool("dns", true, "[deprecated] equivalent to -dns_zone t.hmny.io")
//Leader needs to have a minimal number of peers to start consensus
minPeers = flag.Int("min_peers", 100, "Minimal number of Peers in shard")
minPeers = flag.Int("min_peers", 32, "Minimal number of Peers in shard")
// Key file to store the private key
keyFile = flag.String("key", "./.hmykey", "the p2p key file of the harmony node")
// isGenesis indicates this node is a genesis node
@ -322,8 +322,9 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
}
// TODO: add staking support
// currentNode.StakingAccount = myAccount
utils.GetLogInstance().Info("node account set",
"address", common.MustAddressToBech32(currentNode.StakingAccount.Address))
utils.Logger().Info().
Str("address", common.MustAddressToBech32(currentNode.StakingAccount.Address)).
Msg("node account set")
// TODO: refactor the creation of blockchain out of node.New()
currentConsensus.ChainReader = currentNode.Blockchain()
@ -373,7 +374,9 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
height := currentNode.Blockchain().CurrentBlock().NumberU64()
currentConsensus.SetViewID(height)
utils.GetLogInstance().Info("Init Blockchain", "height", height)
utils.Logger().Info().
Uint64("height", height).
Msg("Init Blockchain")
// Assign closure functions to the consensus object
currentConsensus.BlockVerifier = currentNode.VerifyNewBlock
@ -435,7 +438,10 @@ func main() {
}
if *shardID >= 0 {
utils.GetLogInstance().Info("ShardID Override", "original", initialAccount.ShardID, "override", *shardID)
utils.Logger().Info().
Uint32("original", initialAccount.ShardID).
Int("override", *shardID).
Msg("ShardID Override")
initialAccount.ShardID = uint32(*shardID)
}
@ -451,15 +457,16 @@ func main() {
if *isExplorer {
startMsg = "==== New Explorer Node ===="
}
utils.GetLogInstance().Info(startMsg,
"BlsPubKey", hex.EncodeToString(nodeConfig.ConsensusPubKey.Serialize()),
"ShardID", nodeConfig.ShardID,
"ShardGroupID", nodeConfig.GetShardGroupID(),
"BeaconGroupID", nodeConfig.GetBeaconGroupID(),
"ClientGroupID", nodeConfig.GetClientGroupID(),
"Role", currentNode.NodeConfig.Role(),
"multiaddress", fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s",
*ip, *port, nodeConfig.Host.GetID().Pretty()))
utils.Logger().Info().
Str("BlsPubKey", hex.EncodeToString(nodeConfig.ConsensusPubKey.Serialize())).
Uint32("ShardID", nodeConfig.ShardID).
Str("ShardGroupID", nodeConfig.GetShardGroupID().String()).
Str("BeaconGroupID", nodeConfig.GetBeaconGroupID().String()).
Str("ClientGroupID", nodeConfig.GetClientGroupID().String()).
Str("Role", currentNode.NodeConfig.Role().String()).
Str("multiaddress", fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", *ip, *port, nodeConfig.Host.GetID().Pretty())).
Msg(startMsg)
if *enableMemProfiling {
memprofiling.GetMemProfiling().Start()

@ -50,7 +50,7 @@ func (consensus *Consensus) handleMessageUpdate(payload []byte) {
if msg.Type == msg_pb.MessageType_VIEWCHANGE || msg.Type == msg_pb.MessageType_NEWVIEW {
if msg.GetViewchange() != nil && msg.GetViewchange().ShardId != consensus.ShardID {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint32("myShardId", consensus.ShardID).
Uint32("receivedShardId", msg.GetViewchange().ShardId).
Msg("Received view change message from different shard")
@ -58,7 +58,7 @@ func (consensus *Consensus) handleMessageUpdate(payload []byte) {
}
} else {
if msg.GetConsensus() != nil && msg.GetConsensus().ShardId != consensus.ShardID {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint32("myShardId", consensus.ShardID).
Uint32("receivedShardId", msg.GetConsensus().ShardId).
Msg("Received consensus message from different shard")
@ -92,16 +92,12 @@ func (consensus *Consensus) announce(block *types.Block) {
// prepare message and broadcast to validators
encodedBlock, err := rlp.EncodeToBytes(block)
if err != nil {
consensus.getLogger().Debug().
Err(err).
Msg("[Announce] Failed encoding block")
utils.Logger().Debug().Msg("[Announce] Failed encoding block")
return
}
encodedBlockHeader, err := rlp.EncodeToBytes(block.Header())
if err != nil {
consensus.getLogger().Debug().
Err(err).
Msg("[Announce] Failed encoding block header")
utils.Logger().Debug().Msg("[Announce] Failed encoding block header")
return
}
@ -115,12 +111,12 @@ func (consensus *Consensus) announce(block *types.Block) {
_ = protobuf.Unmarshal(msgPayload, msg)
pbftMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[Announce] Unable to parse pbft message")
utils.Logger().Warn().Err(err).Msg("[Announce] Unable to parse pbft message")
return
}
consensus.PbftLog.AddMessage(pbftMsg)
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("MsgBlockHash", pbftMsg.BlockHash.Hex()).
Uint64("MsgViewID", pbftMsg.ViewID).
Uint64("MsgBlockNum", pbftMsg.BlockNum).
@ -130,24 +126,24 @@ func (consensus *Consensus) announce(block *types.Block) {
// Leader sign the block hash itself
consensus.prepareSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(consensus.blockHash[:])
if err := consensus.prepareBitmap.SetKey(consensus.PubKey, true); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[Announce] Leader prepareBitmap SetKey failed")
utils.Logger().Warn().Err(err).Msg("[Announce] Leader prepareBitmap SetKey failed")
return
}
// Construct broadcast p2p message
if err := consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_ANNOUNCE, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().
utils.Logger().Warn().
Str("groupID", string(p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID)))).
Msg("[Announce] Cannot send announce message")
} else {
consensus.getLogger().Info().
utils.Logger().Info().
Str("blockHash", block.Hash().Hex()).
Uint64("blockNum", block.NumberU64()).
Msg("[Announce] Sent Announce Message!!")
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("From", consensus.phase.String()).
Str("To", Prepare.String()).
Msg("[Announce] Switching phase")
@ -155,31 +151,31 @@ func (consensus *Consensus) announce(block *types.Block) {
}
func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[OnAnnounce] Receive announce message")
utils.Logger().Debug().Msg("[OnAnnounce] Receive announce message")
if consensus.IsLeader() && consensus.mode.Mode() == Normal {
return
}
senderKey, err := consensus.verifySenderKey(msg)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnAnnounce] VerifySenderKey failed")
utils.Logger().Error().Err(err).Msg("[OnAnnounce] VerifySenderKey failed")
return
}
if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck {
consensus.getLogger().Warn().
utils.Logger().Warn().
Str("senderKey", senderKey.SerializeToHexStr()).
Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("[OnAnnounce] SenderKey does not match leader PubKey")
return
}
if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnAnnounce] Failed to verify leader signature")
utils.Logger().Error().Err(err).Msg("[OnAnnounce] Failed to verify leader signature")
return
}
recvMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Error().
utils.Logger().Error().
Err(err).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] Unparseable leader message")
@ -191,7 +187,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
header := new(block.Header)
err = rlp.DecodeBytes(encodedHeader, header)
if err != nil {
consensus.getLogger().Warn().
utils.Logger().Warn().
Err(err).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] Unparseable block header data")
@ -199,7 +195,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
}
if recvMsg.BlockNum < consensus.blockNum || recvMsg.BlockNum != header.Number().Uint64() {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum).
Uint64("hdrBlockNum", header.Number().Uint64()).
@ -208,7 +204,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
}
if consensus.mode.Mode() == Normal {
if err = chain.Engine.VerifyHeader(consensus.ChainReader, header, true); err != nil {
consensus.getLogger().Warn().
utils.Logger().Warn().
Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()).
Str("MsgBlockNum", header.Number().String()).
@ -237,18 +233,18 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
logMsgs := consensus.PbftLog.GetMessagesByTypeSeqView(msg_pb.MessageType_ANNOUNCE, recvMsg.BlockNum, recvMsg.ViewID)
if len(logMsgs) > 0 {
if logMsgs[0].BlockHash != recvMsg.BlockHash {
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("[OnAnnounce] Leader is malicious")
consensus.startViewChange(consensus.viewID + 1)
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("[OnAnnounce] Announce message received again")
//return
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] Announce message Added")
@ -261,13 +257,13 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
// we have already added message and block, skip check viewID and send prepare message if is in ViewChanging mode
if consensus.mode.Mode() == ViewChanging {
consensus.getLogger().Debug().Msg("[OnAnnounce] Still in ViewChanging Mode, Exiting !!")
utils.Logger().Debug().Msg("[OnAnnounce] Still in ViewChanging Mode, Exiting !!")
return
}
if consensus.checkViewID(recvMsg) != nil {
if consensus.mode.Mode() == Normal {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnAnnounce] ViewID check failed")
@ -286,13 +282,13 @@ func (consensus *Consensus) prepare() {
// TODO: this will not return immediatey, may block
if err := consensus.msgSender.SendWithoutRetry([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnAnnounce] Cannot send prepare message")
utils.Logger().Warn().Err(err).Msg("[OnAnnounce] Cannot send prepare message")
} else {
consensus.getLogger().Info().
utils.Logger().Info().
Str("blockHash", hex.EncodeToString(consensus.blockHash[:])).
Msg("[OnAnnounce] Sent Prepare Message!!")
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("From", consensus.phase.String()).
Str("To", Prepare.String()).
Msg("[Announce] Switching Phase")
@ -307,22 +303,22 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
senderKey, err := consensus.verifySenderKey(msg)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] VerifySenderKey failed")
utils.Logger().Error().Err(err).Msg("[OnPrepare] VerifySenderKey failed")
return
}
if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] Failed to verify sender's signature")
utils.Logger().Error().Err(err).Msg("[OnPrepare] Failed to verify sender's signature")
return
}
recvMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] Unparseable validator message")
utils.Logger().Error().Err(err).Msg("[OnPrepare] Unparseable validator message")
return
}
if recvMsg.ViewID != consensus.viewID || recvMsg.BlockNum != consensus.blockNum {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum).
@ -331,7 +327,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
}
if !consensus.PbftLog.HasMatchingViewAnnounce(consensus.blockNum, consensus.viewID, recvMsg.BlockHash) {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum).
@ -347,7 +343,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
logger := consensus.getLogger().With().Str("validatorPubKey", validatorPubKey).Logger()
logger := utils.Logger().With().Str("validatorPubKey", validatorPubKey).Logger()
if len(prepareSigs) >= consensus.Quorum() {
// already have enough signatures
logger.Debug().Msg("[OnPrepare] Received Additional Prepare Message")
@ -364,11 +360,11 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
var sign bls.Sign
err = sign.Deserialize(prepareSig)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepare] Failed to deserialize bls signature")
utils.Logger().Error().Err(err).Msg("[OnPrepare] Failed to deserialize bls signature")
return
}
if !sign.VerifyHash(recvMsg.SenderPubkey, consensus.blockHash[:]) {
consensus.getLogger().Error().Msg("[OnPrepare] Received invalid BLS signature")
utils.Logger().Error().Msg("[OnPrepare] Received invalid BLS signature")
return
}
@ -377,7 +373,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
prepareSigs[validatorPubKey] = &sign
// Set the bitmap indicating that this validator signed.
if err := prepareBitmap.SetKey(recvMsg.SenderPubkey, true); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnPrepare] prepareBitmap.SetKey failed")
utils.Logger().Warn().Err(err).Msg("[OnPrepare] prepareBitmap.SetKey failed")
return
}
@ -393,7 +389,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
_ = protobuf.Unmarshal(msgPayload, msg)
pbftMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnPrepare] Unable to parse pbft message")
utils.Logger().Warn().Err(err).Msg("[OnPrepare] Unable to parse pbft message")
return
}
consensus.PbftLog.AddMessage(pbftMsg)
@ -404,22 +400,22 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
commitPayload := append(blockNumHash, consensus.blockHash[:]...)
consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload)
if err := consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil {
consensus.getLogger().Debug().Msg("[OnPrepare] Leader commit bitmap set failed")
utils.Logger().Debug().Msg("[OnPrepare] Leader commit bitmap set failed")
return
}
if err := consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_PREPARED, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Msg("[OnPrepare] Cannot send prepared message")
utils.Logger().Warn().Msg("[OnPrepare] Cannot send prepared message")
} else {
consensus.getLogger().Debug().
Bytes("blockHash", consensus.blockHash[:]).
utils.Logger().Debug().
Hex("blockHash", consensus.blockHash[:]).
Uint64("blockNum", consensus.blockNum).
Msg("[OnPrepare] Sent Prepared Message!!")
}
consensus.msgSender.StopRetry(msg_pb.MessageType_ANNOUNCE)
consensus.msgSender.StopRetry(msg_pb.MessageType_COMMITTED) // Stop retry committed msg of last consensus
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("From", consensus.phase.String()).
Str("To", Commit.String()).
Msg("[OnPrepare] Switching phase")
@ -429,37 +425,37 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
}
func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[OnPrepared] Received Prepared message")
utils.Logger().Debug().Msg("[OnPrepared] Received Prepared message")
if consensus.IsLeader() && consensus.mode.Mode() == Normal {
return
}
senderKey, err := consensus.verifySenderKey(msg)
if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnPrepared] VerifySenderKey failed")
utils.Logger().Debug().Err(err).Msg("[OnPrepared] VerifySenderKey failed")
return
}
if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck {
consensus.getLogger().Warn().Msg("[OnPrepared] SenderKey not match leader PubKey")
utils.Logger().Warn().Msg("[OnPrepared] SenderKey not match leader PubKey")
return
}
if err := verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnPrepared] Failed to verify sender's signature")
utils.Logger().Debug().Err(err).Msg("[OnPrepared] Failed to verify sender's signature")
return
}
recvMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnPrepared] Unparseable validator message")
utils.Logger().Debug().Err(err).Msg("[OnPrepared] Unparseable validator message")
return
}
consensus.getLogger().Info().
utils.Logger().Info().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("MsgViewID", recvMsg.ViewID).
Msg("[OnPrepared] Received prepared message")
if recvMsg.BlockNum < consensus.blockNum {
consensus.getLogger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("Old Block Received, ignoring!!")
utils.Logger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("Old Block Received, ignoring!!")
return
}
@ -467,11 +463,11 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
blockHash := recvMsg.BlockHash
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 0)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("ReadSignatureBitmapPayload failed!!")
utils.Logger().Error().Err(err).Msg("ReadSignatureBitmapPayload failed!!")
return
}
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug().
utils.Logger().Debug().
Int("Need", consensus.Quorum()).
Int("Got", count).
Msg("Not enough signatures in the Prepared msg")
@ -480,7 +476,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) {
myBlockHash := common.Hash{}
myBlockHash.SetBytes(consensus.blockHash[:])
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("MsgViewID", recvMsg.ViewID).
Msg("[OnPrepared] failed to verify multi signature for prepare phase")
@ -492,30 +488,30 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
var blockObj types.Block
err = rlp.DecodeBytes(block, &blockObj)
if err != nil {
consensus.getLogger().Warn().
utils.Logger().Warn().
Err(err).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnPrepared] Unparseable block header data")
return
}
if blockObj.NumberU64() != recvMsg.BlockNum || recvMsg.BlockNum < consensus.blockNum {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", blockObj.NumberU64()).
Msg("[OnPrepared] BlockNum not match")
return
}
if blockObj.Header().Hash() != recvMsg.BlockHash {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Bytes("MsgBlockHash", recvMsg.BlockHash[:]).
Hex("MsgBlockHash", recvMsg.BlockHash[:]).
Str("blockObjHash", blockObj.Header().Hash().Hex()).
Msg("[OnPrepared] BlockHash not match")
return
}
if consensus.mode.Mode() == Normal {
if err := chain.Engine.VerifyHeader(consensus.ChainReader, blockObj.Header(), true); err != nil {
consensus.getLogger().Warn().
utils.Logger().Error().
Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number().String()).
Str("MsgBlockNum", blockObj.Header().Number().String()).
@ -525,7 +521,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
if consensus.BlockVerifier == nil {
// do nothing
} else if err := consensus.BlockVerifier(&blockObj); err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnPrepared] Block verification failed")
utils.Logger().Error().Err(err).Msg("[OnPrepared] Block verification failed")
return
}
}
@ -533,10 +529,10 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
consensus.PbftLog.AddBlock(&blockObj)
recvMsg.Block = []byte{} // save memory space
consensus.PbftLog.AddMessage(recvMsg)
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Bytes("blockHash", recvMsg.BlockHash[:]).
Hex("blockHash", recvMsg.BlockHash[:]).
Msg("[OnPrepared] Prepared message and block added")
consensus.mutex.Lock()
@ -544,13 +540,13 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
consensus.tryCatchup()
if consensus.mode.Mode() == ViewChanging {
consensus.getLogger().Debug().Msg("[OnPrepared] Still in ViewChanging mode, Exiting!!")
utils.Logger().Debug().Msg("[OnPrepared] Still in ViewChanging mode, Exiting!!")
return
}
if consensus.checkViewID(recvMsg) != nil {
if consensus.mode.Mode() == Normal {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnPrepared] ViewID check failed")
@ -558,7 +554,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
return
}
if recvMsg.BlockNum > consensus.blockNum {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum).
Msg("[OnPrepared] Future Block Received, ignoring!!")
@ -593,15 +589,15 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
}
if err := consensus.msgSender.SendWithoutRetry([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Msg("[OnPrepared] Cannot send commit message!!")
utils.Logger().Warn().Msg("[OnPrepared] Cannot send commit message!!")
} else {
consensus.getLogger().Info().
utils.Logger().Info().
Uint64("blockNum", consensus.blockNum).
Bytes("blockHash", consensus.blockHash[:]).
Hex("blockHash", consensus.blockHash[:]).
Msg("[OnPrepared] Sent Commit Message!!")
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("From", consensus.phase.String()).
Str("To", Commit.String()).
Msg("[OnPrepared] Switching phase")
@ -618,22 +614,22 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
senderKey, err := consensus.verifySenderKey(msg)
if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnCommit] VerifySenderKey Failed")
utils.Logger().Debug().Err(err).Msg("[OnCommit] VerifySenderKey Failed")
return
}
if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnCommit] Failed to verify sender's signature")
utils.Logger().Debug().Err(err).Msg("[OnCommit] Failed to verify sender's signature")
return
}
recvMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[OnCommit] Parse pbft message failed")
utils.Logger().Debug().Err(err).Msg("[OnCommit] Parse pbft message failed")
return
}
if recvMsg.ViewID != consensus.viewID || recvMsg.BlockNum != consensus.blockNum {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum).
@ -643,8 +639,8 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
}
if !consensus.PbftLog.HasMatchingAnnounce(consensus.blockNum, recvMsg.BlockHash) {
consensus.getLogger().Debug().
Bytes("MsgBlockHash", recvMsg.BlockHash[:]).
utils.Logger().Debug().
Hex("MsgBlockHash", recvMsg.BlockHash[:]).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum).
Msg("[OnCommit] Cannot find matching blockhash")
@ -652,8 +648,8 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
}
if !consensus.PbftLog.HasMatchingPrepared(consensus.blockNum, recvMsg.BlockHash) {
consensus.getLogger().Debug().
Bytes("blockHash", recvMsg.BlockHash[:]).
utils.Logger().Debug().
Hex("blockHash", recvMsg.BlockHash[:]).
Uint64("blockNum", consensus.blockNum).
Msg("[OnCommit] Cannot find matching prepared message")
return
@ -666,7 +662,7 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
logger := consensus.getLogger().With().Str("validatorPubKey", validatorPubKey).Logger()
logger := utils.Logger().With().Str("validatorPubKey", validatorPubKey).Logger()
if !consensus.IsValidatorInCommittee(recvMsg.SenderPubkey) {
logger.Error().Msg("[OnCommit] Invalid validator")
return
@ -705,7 +701,7 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
commitSigs[validatorPubKey] = &sign
// Set the bitmap indicating that this validator signed.
if err := commitBitmap.SetKey(recvMsg.SenderPubkey, true); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnCommit] commitBitmap.SetKey failed")
utils.Logger().Warn().Err(err).Msg("[OnCommit] commitBitmap.SetKey failed")
return
}
@ -732,7 +728,7 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
}
func (consensus *Consensus) finalizeCommits() {
consensus.getLogger().Info().Int("NumCommits", len(consensus.commitSigs)).Msg("[Finalizing] Finalizing Block")
utils.Logger().Info().Int("NumCommits", len(consensus.commitSigs)).Msg("[Finalizing] Finalizing Block")
beforeCatchupNum := consensus.blockNum
//beforeCatchupViewID := consensus.viewID
@ -747,7 +743,7 @@ func (consensus *Consensus) finalizeCommits() {
_ = protobuf.Unmarshal(msgPayload, msg)
pbftMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[FinalizeCommits] Unable to parse pbft message")
utils.Logger().Warn().Err(err).Msg("[FinalizeCommits] Unable to parse pbft message")
return
}
consensus.PbftLog.AddMessage(pbftMsg)
@ -756,14 +752,14 @@ func (consensus *Consensus) finalizeCommits() {
// find correct block content
block := consensus.PbftLog.GetBlockByHash(consensus.blockHash)
if block == nil {
consensus.getLogger().Warn().
utils.Logger().Warn().
Str("blockHash", hex.EncodeToString(consensus.blockHash[:])).
Msg("[FinalizeCommits] Cannot find block by hash")
return
}
consensus.tryCatchup()
if consensus.blockNum-beforeCatchupNum != 1 {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint64("beforeCatchupBlockNum", beforeCatchupNum).
Msg("[FinalizeCommits] Leader cannot provide the correct block for committed message")
return
@ -771,10 +767,10 @@ func (consensus *Consensus) finalizeCommits() {
// if leader success finalize the block, send committed message to validators
if err := consensus.msgSender.SendWithRetry(block.NumberU64(), msg_pb.MessageType_COMMITTED, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[Finalizing] Cannot send committed message")
utils.Logger().Warn().Err(err).Msg("[Finalizing] Cannot send committed message")
} else {
consensus.getLogger().Info().
Bytes("blockHash", consensus.blockHash[:]).
utils.Logger().Info().
Hex("blockHash", consensus.blockHash[:]).
Uint64("blockNum", consensus.blockNum).
Msg("[Finalizing] Sent Committed Message")
}
@ -789,13 +785,13 @@ func (consensus *Consensus) finalizeCommits() {
if consensus.consensusTimeout[timeoutBootstrap].IsActive() {
consensus.consensusTimeout[timeoutBootstrap].Stop()
consensus.getLogger().Debug().Msg("[Finalizing] Start consensus timer; stop bootstrap timer only once")
utils.Logger().Debug().Msg("[Finalizing] Start consensus timer; stop bootstrap timer only once")
} else {
consensus.getLogger().Debug().Msg("[Finalizing] Start consensus timer")
utils.Logger().Debug().Msg("[Finalizing] Start consensus timer")
}
consensus.consensusTimeout[timeoutConsensus].Start()
consensus.getLogger().Info().
utils.Logger().Info().
Uint64("blockNum", block.NumberU64()).
Uint64("ViewId", block.Header().ViewID().Uint64()).
Str("blockHash", block.Hash().String()).
@ -809,7 +805,7 @@ func (consensus *Consensus) finalizeCommits() {
}
func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[OnCommitted] Receive committed message")
utils.Logger().Debug().Msg("[OnCommitted] Receive committed message")
if consensus.IsLeader() && consensus.mode.Mode() == Normal {
return
@ -817,26 +813,26 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
senderKey, err := consensus.verifySenderKey(msg)
if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnCommitted] verifySenderKey failed")
utils.Logger().Warn().Err(err).Msg("[OnCommitted] verifySenderKey failed")
return
}
if !senderKey.IsEqual(consensus.LeaderPubKey) && consensus.mode.Mode() == Normal && !consensus.ignoreViewIDCheck {
consensus.getLogger().Warn().Msg("[OnCommitted] senderKey not match leader PubKey")
utils.Logger().Warn().Msg("[OnCommitted] senderKey not match leader PubKey")
return
}
if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Warn().Err(err).Msg("[OnCommitted] Failed to verify sender's signature")
utils.Logger().Warn().Err(err).Msg("[OnCommitted] Failed to verify sender's signature")
return
}
recvMsg, err := ParsePbftMessage(msg)
if err != nil {
consensus.getLogger().Warn().Msg("[OnCommitted] unable to parse msg")
utils.Logger().Warn().Msg("[OnCommitted] unable to parse msg")
return
}
if recvMsg.BlockNum < consensus.blockNum {
consensus.getLogger().Info().
utils.Logger().Info().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Uint64("blockNum", consensus.blockNum).
Msg("[OnCommitted] Received Old Blocks!!")
@ -845,13 +841,13 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 0)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnCommitted] readSignatureBitmapPayload failed")
utils.Logger().Error().Err(err).Msg("[OnCommitted] readSignatureBitmapPayload failed")
return
}
// check has 2f+1 signatures
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Warn().
utils.Logger().Warn().
Int("need", consensus.Quorum()).
Int("got", count).
Msg("[OnCommitted] Not enough signature in committed msg")
@ -862,7 +858,7 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
binary.LittleEndian.PutUint64(blockNumBytes, recvMsg.BlockNum)
commitPayload := append(blockNumBytes, recvMsg.BlockHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) {
consensus.getLogger().Error().
utils.Logger().Error().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnCommitted] Failed to verify the multi signature for commit phase")
return
@ -870,7 +866,7 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
consensus.PbftLog.AddMessage(recvMsg)
consensus.ChainReader.WriteLastCommits(recvMsg.Payload)
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgViewID", recvMsg.ViewID).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[OnCommitted] Committed message added")
@ -882,7 +878,7 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
consensus.commitBitmap = mask
if recvMsg.BlockNum-consensus.blockNum > consensusBlockNumBuffer {
consensus.getLogger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("[OnCommitted] out of sync")
utils.Logger().Debug().Uint64("MsgBlockNum", recvMsg.BlockNum).Msg("[OnCommitted] out of sync")
go func() {
select {
case consensus.blockNumLowChan <- struct{}{}:
@ -897,21 +893,21 @@ func (consensus *Consensus) onCommitted(msg *msg_pb.Message) {
}
// if consensus.checkViewID(recvMsg) != nil {
// consensus.getLogger().Debug("viewID check failed", "viewID", recvMsg.ViewID, "myViewID", consensus.viewID)
// utils.Logger().Debug("viewID check failed", "viewID", recvMsg.ViewID, "myViewID", consensus.viewID)
// return
// }
consensus.tryCatchup()
if consensus.mode.Mode() == ViewChanging {
consensus.getLogger().Debug().Msg("[OnCommitted] Still in ViewChanging mode, Exiting!!")
utils.Logger().Debug().Msg("[OnCommitted] Still in ViewChanging mode, Exiting!!")
return
}
if consensus.consensusTimeout[timeoutBootstrap].IsActive() {
consensus.consensusTimeout[timeoutBootstrap].Stop()
consensus.getLogger().Debug().Msg("[OnCommitted] Start consensus timer; stop bootstrap timer only once")
utils.Logger().Debug().Msg("[OnCommitted] Start consensus timer; stop bootstrap timer only once")
} else {
consensus.getLogger().Debug().Msg("[OnCommitted] Start consensus timer")
utils.Logger().Debug().Msg("[OnCommitted] Start consensus timer")
}
consensus.consensusTimeout[timeoutConsensus].Start()
return
@ -943,7 +939,7 @@ func (consensus *Consensus) LastCommitSig() ([]byte, []byte, error) {
// try to catch up if fall behind
func (consensus *Consensus) tryCatchup() {
consensus.getLogger().Info().Msg("[TryCatchup] commit new blocks")
utils.Logger().Info().Msg("[TryCatchup] commit new blocks")
// if consensus.phase != Commit && consensus.mode.Mode() == Normal {
// return
// }
@ -954,11 +950,11 @@ func (consensus *Consensus) tryCatchup() {
break
}
if len(msgs) > 1 {
consensus.getLogger().Error().
utils.Logger().Error().
Int("numMsgs", len(msgs)).
Msg("[TryCatchup] DANGER!!! we should only get one committed message for a given blockNum")
}
consensus.getLogger().Info().Msg("[TryCatchup] committed message found")
utils.Logger().Info().Msg("[TryCatchup] committed message found")
block := consensus.PbftLog.GetBlockByHash(msgs[0].BlockHash)
if block == nil {
@ -968,36 +964,36 @@ func (consensus *Consensus) tryCatchup() {
if consensus.BlockVerifier == nil {
// do nothing
} else if err := consensus.BlockVerifier(block); err != nil {
consensus.getLogger().Info().Err(err).Msg("[TryCatchup]block verification faied")
utils.Logger().Info().Msg("[TryCatchup]block verification faied")
return
}
if block.ParentHash() != consensus.ChainReader.CurrentHeader().Hash() {
consensus.getLogger().Debug().Msg("[TryCatchup] parent block hash not match")
utils.Logger().Debug().Msg("[TryCatchup] parent block hash not match")
break
}
consensus.getLogger().Info().Msg("[TryCatchup] block found to commit")
utils.Logger().Info().Msg("[TryCatchup] block found to commit")
preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeqHash(msg_pb.MessageType_PREPARED, msgs[0].BlockNum, msgs[0].BlockHash)
msg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs)
if msg == nil {
break
}
consensus.getLogger().Info().Msg("[TryCatchup] prepared message found to commit")
utils.Logger().Info().Msg("[TryCatchup] prepared message found to commit")
consensus.blockHash = [32]byte{}
consensus.blockNum = consensus.blockNum + 1
consensus.viewID = msgs[0].ViewID + 1
consensus.LeaderPubKey = msgs[0].SenderPubkey
consensus.getLogger().Info().Msg("[TryCatchup] Adding block to chain")
utils.Logger().Info().Msg("[TryCatchup] Adding block to chain")
consensus.OnConsensusDone(block, msgs[0].Payload)
consensus.ResetState()
select {
case consensus.VerifiedNewBlock <- block:
default:
consensus.getLogger().Info().
utils.Logger().Info().
Str("blockHash", block.Hash().String()).
Msg("[TryCatchup] consensus verified block send to chan failed")
continue
@ -1006,7 +1002,7 @@ func (consensus *Consensus) tryCatchup() {
break
}
if currentBlockNum < consensus.blockNum {
consensus.getLogger().Info().
utils.Logger().Info().
Uint64("From", currentBlockNum).
Uint64("To", consensus.blockNum).
Msg("[TryCatchup] Caught up!")
@ -1026,21 +1022,21 @@ func (consensus *Consensus) tryCatchup() {
func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan struct{}, stoppedChan chan struct{}, startChannel chan struct{}) {
go func() {
if consensus.IsLeader() {
consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Waiting for consensus start")
utils.Logger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Waiting for consensus start")
<-startChannel
// send a signal to indicate it's ready to run consensus
// this signal is consumed by node object to create a new block and in turn trigger a new consensus on it
go func() {
consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Send ReadySignal")
utils.Logger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Send ReadySignal")
consensus.ReadySignal <- struct{}{}
}()
}
consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started")
utils.Logger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started")
defer close(stoppedChan)
ticker := time.NewTicker(3 * time.Second)
consensus.consensusTimeout[timeoutBootstrap].Start()
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("viewID", consensus.viewID).
Uint64("blockNum", consensus.blockNum).
Msg("[ConsensusMainLoop] Start bootstrap timeout (only once)")
@ -1057,11 +1053,11 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
continue
}
if k != timeoutViewChange {
consensus.getLogger().Debug().Msg("[ConsensusMainLoop] Ops Consensus Timeout!!!")
utils.Logger().Debug().Msg("[ConsensusMainLoop] Ops Consensus Timeout!!!")
consensus.startViewChange(consensus.viewID + 1)
break
} else {
consensus.getLogger().Debug().Msg("[ConsensusMainLoop] Ops View Change Timeout!!!")
utils.Logger().Debug().Msg("[ConsensusMainLoop] Ops View Change Timeout!!!")
viewID := consensus.mode.ViewID()
consensus.startViewChange(viewID + 1)
break
@ -1072,15 +1068,15 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
consensus.SetViewID(consensus.ChainReader.CurrentHeader().ViewID().Uint64() + 1)
mode := consensus.UpdateConsensusInformation()
consensus.mode.SetMode(mode)
consensus.getLogger().Info().Str("Mode", mode.String()).Msg("Node is in sync")
utils.Logger().Info().Str("Mode", mode.String()).Msg("Node is in sync")
case <-consensus.syncNotReadyChan:
consensus.SetBlockNum(consensus.ChainReader.CurrentHeader().Number().Uint64() + 1)
consensus.mode.SetMode(Syncing)
consensus.getLogger().Info().Msg("Node is out of sync")
utils.Logger().Info().Msg("Node is out of sync")
case newBlock := <-blockChannel:
consensus.getLogger().Info().
utils.Logger().Info().
Uint64("MsgBlockNum", newBlock.NumberU64()).
Msg("[ConsensusMainLoop] Received Proposed New Block!")
@ -1163,7 +1159,7 @@ func (consensus *Consensus) Start(blockChannel chan *types.Block, stopChan chan
startTime = time.Now()
consensus.msgSender.Reset(newBlock.NumberU64())
consensus.getLogger().Debug().
utils.Logger().Debug().
Int("numTxs", len(newBlock.Transactions())).
Time("startTime", startTime).
Int("publicKeys", len(consensus.PublicKeys)).

@ -42,8 +42,8 @@ func (consensus *Consensus) constructViewChangeMessage() []byte {
vcMsg.Payload = append(msgToSign[:0:0], msgToSign...)
}
consensus.getLogger().Debug().
Bytes("m1Payload", vcMsg.Payload).
utils.Logger().Debug().
Hex("m1Payload", vcMsg.Payload).
Str("pubKey", consensus.PubKey.SerializeToHexStr()).
Msg("[constructViewChangeMessage]")
@ -89,7 +89,7 @@ func (consensus *Consensus) constructNewViewMessage() []byte {
vcMsg.Payload = consensus.m1Payload
sig2arr := consensus.GetNilSigsArray()
consensus.getLogger().Debug().Int("len", len(sig2arr)).Msg("[constructNewViewMessage] M2 (NIL) type signatures")
utils.Logger().Debug().Int("len", len(sig2arr)).Msg("[constructNewViewMessage] M2 (NIL) type signatures")
if len(sig2arr) > 0 {
m2Sig := bls_cosi.AggregateSig(sig2arr)
vcMsg.M2Aggsigs = m2Sig.Serialize()
@ -97,7 +97,7 @@ func (consensus *Consensus) constructNewViewMessage() []byte {
}
sig3arr := consensus.GetViewIDSigsArray()
consensus.getLogger().Debug().Int("len", len(sig3arr)).Msg("[constructNewViewMessage] M3 (ViewID) type signatures")
utils.Logger().Debug().Int("len", len(sig3arr)).Msg("[constructNewViewMessage] M3 (ViewID) type signatures")
// even we check here for safty, m3 type signatures must >= 2f+1
if len(sig3arr) > 0 {
m3Sig := bls_cosi.AggregateSig(sig3arr)

@ -123,7 +123,7 @@ func (consensus *Consensus) switchPhase(desirePhase PbftPhase, override bool) {
func (consensus *Consensus) GetNextLeaderKey() *bls.PublicKey {
idx := consensus.getIndexOfPubKey(consensus.LeaderPubKey)
if idx == -1 {
consensus.getLogger().Warn().
utils.Logger().Warn().
Str("key", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("GetNextLeaderKey: currentLeaderKey not found")
}
@ -142,7 +142,7 @@ func (consensus *Consensus) getIndexOfPubKey(pubKey *bls.PublicKey) int {
// ResetViewChangeState reset the state for viewchange
func (consensus *Consensus) ResetViewChangeState() {
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("Phase", consensus.phase.String()).
Msg("[ResetViewChangeState] Resetting view change state")
consensus.mode.SetMode(Normal)
@ -180,7 +180,7 @@ func (consensus *Consensus) startViewChange(viewID uint64) {
diff := viewID - consensus.viewID
duration := time.Duration(int64(diff) * int64(viewChangeDuration))
consensus.getLogger().Info().
utils.Logger().Info().
Uint64("ViewChangingID", viewID).
Dur("timeoutDuration", duration).
Str("NextLeader", consensus.LeaderPubKey.SerializeToHexStr()).
@ -191,7 +191,7 @@ func (consensus *Consensus) startViewChange(viewID uint64) {
consensus.consensusTimeout[timeoutViewChange].SetDuration(duration)
consensus.consensusTimeout[timeoutViewChange].Start()
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("ViewChangingID", consensus.mode.ViewID()).
Msg("[startViewChange] start view change timer")
}
@ -199,7 +199,7 @@ func (consensus *Consensus) startViewChange(viewID uint64) {
func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
recvMsg, err := ParseViewChangeMessage(msg)
if err != nil {
consensus.getLogger().Warn().Msg("[onViewChange] Unable To Parse Viewchange Message")
utils.Logger().Warn().Msg("[onViewChange] Unable To Parse Viewchange Message")
return
}
newLeaderKey := recvMsg.LeaderPubkey
@ -208,7 +208,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
}
if len(consensus.viewIDSigs) >= consensus.Quorum() {
consensus.getLogger().Debug().
utils.Logger().Debug().
Int("have", len(consensus.viewIDSigs)).
Int("need", consensus.Quorum()).
Str("validatorPubKey", recvMsg.SenderPubkey.SerializeToHexStr()).
@ -218,34 +218,34 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
senderKey, err := consensus.verifyViewChangeSenderKey(msg)
if err != nil {
consensus.getLogger().Debug().Err(err).Msg("[onViewChange] VerifySenderKey Failed")
utils.Logger().Debug().Err(err).Msg("[onViewChange] VerifySenderKey Failed")
return
}
// TODO: if difference is only one, new leader can still propose the same committed block to avoid another view change
if consensus.blockNum > recvMsg.BlockNum {
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onViewChange] Message BlockNum Is Low")
return
}
if consensus.blockNum < recvMsg.BlockNum {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onViewChange] New Leader Has Lower Blocknum")
return
}
if consensus.mode.Mode() == ViewChanging && consensus.mode.ViewID() > recvMsg.ViewID {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint64("MyViewChangingID", consensus.mode.ViewID()).
Uint64("MsgViewChangingID", recvMsg.ViewID).
Msg("[onViewChange] ViewChanging ID Is Low")
return
}
if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Debug().Err(err).Msg("[onViewChange] Failed To Verify Sender's Signature")
utils.Logger().Debug().Err(err).Msg("[onViewChange] Failed To Verify Sender's Signature")
return
}
@ -260,11 +260,11 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeq(msg_pb.MessageType_PREPARED, recvMsg.BlockNum)
preparedMsg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs)
if preparedMsg == nil {
consensus.getLogger().Debug().Msg("[onViewChange] add my M2(NIL) type messaage")
utils.Logger().Debug().Msg("[onViewChange] add my M2(NIL) type messaage")
consensus.nilSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(NIL)
consensus.nilBitmap.SetKey(consensus.PubKey, true)
} else {
consensus.getLogger().Debug().Msg("[onViewChange] add my M1 type messaage")
utils.Logger().Debug().Msg("[onViewChange] add my M1 type messaage")
msgToSign := append(preparedMsg.BlockHash[:], preparedMsg.Payload...)
consensus.bhpSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(msgToSign)
consensus.bhpBitmap.SetKey(consensus.PubKey, true)
@ -283,18 +283,18 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
if len(recvMsg.Payload) == 0 {
_, ok := consensus.nilSigs[senderKey.SerializeToHexStr()]
if ok {
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M2 message from validator")
return
}
if !recvMsg.ViewchangeSig.VerifyHash(senderKey, NIL) {
consensus.getLogger().Warn().Msg("[onViewChange] Failed To Verify Signature For M2 Type Viewchange Message")
utils.Logger().Warn().Msg("[onViewChange] Failed To Verify Signature For M2 Type Viewchange Message")
return
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M2 (NIL) type message")
consensus.nilSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig
@ -302,20 +302,20 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
} else { // m1 type message
_, ok := consensus.bhpSigs[senderKey.SerializeToHexStr()]
if ok {
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M1 Message From the Validator")
return
}
if !recvMsg.ViewchangeSig.VerifyHash(recvMsg.SenderPubkey, recvMsg.Payload) {
consensus.getLogger().Warn().Msg("[onViewChange] Failed to Verify Signature for M1 Type Viewchange Message")
utils.Logger().Warn().Msg("[onViewChange] Failed to Verify Signature for M1 Type Viewchange Message")
return
}
// first time receive m1 type message, need verify validity of prepared message
if len(consensus.m1Payload) == 0 || !bytes.Equal(consensus.m1Payload, recvMsg.Payload) {
if len(recvMsg.Payload) <= 32 {
consensus.getLogger().Debug().
utils.Logger().Debug().
Int("len", len(recvMsg.Payload)).
Msg("[onViewChange] M1 RecvMsg Payload Not Enough Length")
return
@ -323,12 +323,12 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
blockHash := recvMsg.Payload[:32]
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[onViewChange] M1 RecvMsg Payload Read Error")
utils.Logger().Error().Err(err).Msg("[onViewChange] M1 RecvMsg Payload Read Error")
return
}
// check has 2f+1 signature in m1 type message
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug().
utils.Logger().Debug().
Int("need", consensus.Quorum()).
Int("have", count).
Msg("[onViewChange] M1 Payload Not Have Enough Signature")
@ -337,8 +337,8 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
// Verify the multi-sig for prepare phase
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) {
consensus.getLogger().Warn().
Bytes("blockHash", blockHash).
utils.Logger().Warn().
Hex("blockHash", blockHash).
Msg("[onViewChange] failed to verify multi signature for m1 prepared payload")
return
}
@ -353,11 +353,11 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
preparedMsg.Payload = make([]byte, len(recvMsg.Payload)-32)
copy(preparedMsg.Payload[:], recvMsg.Payload[32:])
preparedMsg.SenderPubkey = consensus.PubKey
consensus.getLogger().Info().Msg("[onViewChange] New Leader Prepared Message Added")
utils.Logger().Info().Msg("[onViewChange] New Leader Prepared Message Added")
consensus.PbftLog.AddMessage(&preparedMsg)
}
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M1 (prepared) type message")
consensus.bhpSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig
@ -367,7 +367,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
// check and add viewID (m3 type) message signature
_, ok := consensus.viewIDSigs[senderKey.SerializeToHexStr()]
if ok {
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M3(ViewID) message from the validator")
return
@ -375,17 +375,17 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
viewIDHash := make([]byte, 8)
binary.LittleEndian.PutUint64(viewIDHash, recvMsg.ViewID)
if !recvMsg.ViewidSig.VerifyHash(recvMsg.SenderPubkey, viewIDHash) {
consensus.getLogger().Warn().
utils.Logger().Warn().
Uint64("MsgViewID", recvMsg.ViewID).
Msg("[onViewChange] Failed to Verify M3 Message Signature")
return
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M3 (ViewID) type message")
consensus.viewIDSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewidSig
consensus.viewIDBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed.
consensus.getLogger().Debug().
utils.Logger().Debug().
Int("numSigs", len(consensus.viewIDSigs)).
Int("needed", consensus.Quorum()).
Msg("[onViewChange]")
@ -400,7 +400,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
consensus.ReadySignal <- struct{}{}
}()
} else {
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("From", consensus.phase.String()).
Str("To", Commit.String()).
Msg("[OnViewChange] Switching phase")
@ -408,7 +408,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
copy(consensus.blockHash[:], consensus.m1Payload[:32])
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[onViewChange] ReadSignatureBitmapPayload Fail")
utils.Logger().Error().Err(err).Msg("[onViewChange] ReadSignatureBitmapPayload Fail")
return
}
consensus.aggregatedPrepareSig = aggSig
@ -420,7 +420,7 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
commitPayload := append(blockNumBytes, consensus.blockHash[:]...)
consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload)
if err = consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil {
consensus.getLogger().Debug().Msg("[OnViewChange] New Leader commit bitmap set failed")
utils.Logger().Debug().Msg("[OnViewChange] New Leader commit bitmap set failed")
return
}
}
@ -428,9 +428,9 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
consensus.mode.SetViewID(recvMsg.ViewID)
msgToSend := consensus.constructNewViewMessage()
consensus.getLogger().Warn().
utils.Logger().Warn().
Int("payloadSize", len(consensus.m1Payload)).
Bytes("M1Payload", consensus.m1Payload).
Hex("M1Payload", consensus.m1Payload).
Msg("[onViewChange] Sent NewView Message")
consensus.msgSender.SendWithRetry(consensus.blockNum, msg_pb.MessageType_NEWVIEW, []p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
@ -438,10 +438,10 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
consensus.ResetViewChangeState()
consensus.consensusTimeout[timeoutViewChange].Stop()
consensus.consensusTimeout[timeoutConsensus].Start()
consensus.getLogger().Debug().
utils.Logger().Debug().
Uint64("viewChangingID", consensus.mode.ViewID()).
Msg("[onViewChange] New Leader Start Consensus Timer and Stop View Change Timer")
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("myKey", consensus.PubKey.SerializeToHexStr()).
Uint64("viewID", consensus.viewID).
Uint64("block", consensus.blockNum).
@ -451,27 +451,27 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
// TODO: move to consensus_leader.go later
func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
consensus.getLogger().Debug().Msg("[onNewView] Received NewView Message")
utils.Logger().Debug().Msg("[onNewView] Received NewView Message")
senderKey, err := consensus.verifyViewChangeSenderKey(msg)
if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[onNewView] VerifySenderKey Failed")
utils.Logger().Warn().Err(err).Msg("[onNewView] VerifySenderKey Failed")
return
}
recvMsg, err := consensus.ParseNewViewMessage(msg)
if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[onNewView] Unable to Parse NewView Message")
utils.Logger().Warn().Err(err).Msg("[onNewView] Unable to Parse NewView Message")
return
}
if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Error().Err(err).Msg("[onNewView] Failed to Verify New Leader's Signature")
utils.Logger().Error().Err(err).Msg("[onNewView] Failed to Verify New Leader's Signature")
return
}
consensus.vcLock.Lock()
defer consensus.vcLock.Unlock()
if recvMsg.M3AggSig == nil || recvMsg.M3Bitmap == nil {
consensus.getLogger().Error().Msg("[onNewView] M3AggSig or M3Bitmap is nil")
utils.Logger().Error().Msg("[onNewView] M3AggSig or M3Bitmap is nil")
return
}
m3Sig := recvMsg.M3AggSig
@ -481,7 +481,7 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
binary.LittleEndian.PutUint64(viewIDBytes, recvMsg.ViewID)
// check total number of sigs >= 2f+1
if count := utils.CountOneBits(m3Mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug().
utils.Logger().Debug().
Int("need", consensus.Quorum()).
Int("have", count).
Msg("[onNewView] Not Have Enough M3 (ViewID) Signature")
@ -489,9 +489,9 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
}
if !m3Sig.VerifyHash(m3Mask.AggregatePublic, viewIDBytes) {
consensus.getLogger().Warn().
utils.Logger().Warn().
Str("m3Sig", m3Sig.SerializeToHexStr()).
Bytes("m3Mask", m3Mask.Bitmap).
Hex("m3Mask", m3Mask.Bitmap).
Uint64("MsgViewID", recvMsg.ViewID).
Msg("[onNewView] Unable to Verify Aggregated Signature of M3 (ViewID) payload")
return
@ -499,10 +499,10 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
m2Mask := recvMsg.M2Bitmap
if recvMsg.M2AggSig != nil {
consensus.getLogger().Debug().Msg("[onNewView] M2AggSig (NIL) is Not Empty")
utils.Logger().Debug().Msg("[onNewView] M2AggSig (NIL) is Not Empty")
m2Sig := recvMsg.M2AggSig
if !m2Sig.VerifyHash(m2Mask.AggregatePublic, NIL) {
consensus.getLogger().Warn().Msg("[onNewView] Unable to Verify Aggregated Signature of M2 (NIL) payload")
utils.Logger().Warn().Msg("[onNewView] Unable to Verify Aggregated Signature of M2 (NIL) payload")
return
}
}
@ -510,18 +510,18 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
// check when M3 sigs > M2 sigs, then M1 (recvMsg.Payload) should not be empty
if m2Mask == nil || m2Mask.Bitmap == nil || (m2Mask != nil && m2Mask.Bitmap != nil && utils.CountOneBits(m3Mask.Bitmap) > utils.CountOneBits(m2Mask.Bitmap)) {
if len(recvMsg.Payload) <= 32 {
consensus.getLogger().Debug().Msg("[onNewView] M1 (prepared) Type Payload Not Have Enough Length")
utils.Logger().Debug().Msg("[onNewView] M1 (prepared) Type Payload Not Have Enough Length")
return
}
// m1 is not empty, check it's valid
blockHash := recvMsg.Payload[:32]
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[onNewView] ReadSignatureBitmapPayload Failed")
utils.Logger().Error().Err(err).Msg("[onNewView] ReadSignatureBitmapPayload Failed")
return
}
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash) {
consensus.getLogger().Warn().Msg("[onNewView] Failed to Verify Signature for M1 (prepare) message")
utils.Logger().Warn().Msg("[onNewView] Failed to Verify Signature for M1 (prepare) message")
return
}
copy(consensus.blockHash[:], blockHash)
@ -546,7 +546,7 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
// change view and leaderKey to keep in sync with network
if consensus.blockNum != recvMsg.BlockNum {
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onNewView] New Leader Changed")
@ -561,21 +561,21 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
commitPayload := append(blockNumHash, consensus.blockHash[:]...)
msgToSend := consensus.constructCommitMessage(commitPayload)
consensus.getLogger().Info().Msg("onNewView === commit")
utils.Logger().Info().Msg("onNewView === commit")
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("From", consensus.phase.String()).
Str("To", Commit.String()).
Msg("[OnViewChange] Switching phase")
consensus.switchPhase(Commit, true)
} else {
consensus.ResetState()
consensus.getLogger().Info().Msg("onNewView === announce")
utils.Logger().Info().Msg("onNewView === announce")
}
consensus.getLogger().Debug().
utils.Logger().Debug().
Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("new leader changed")
consensus.getLogger().Debug().Msg("validator start consensus timer and stop view change timer")
utils.Logger().Debug().Msg("validator start consensus timer and stop view change timer")
consensus.consensusTimeout[timeoutConsensus].Start()
consensus.consensusTimeout[timeoutViewChange].Stop()
}

@ -281,7 +281,7 @@ func (bc *BlockChain) loadLastState() error {
currentBlock := bc.GetBlockByHash(head)
if currentBlock == nil {
// Corrupt or empty database, init from scratch
utils.Logger().Warn().Bytes("hash", head.Bytes()).Msg("Head block missing, resetting chain")
utils.Logger().Warn().Str("hash", head.Hex()).Msg("Head block missing, resetting chain")
return bc.Reset()
}
// Make sure the state associated with the block is available

@ -472,6 +472,9 @@ func WriteLastCommits(
if err = db.Put(lastCommitsKey, data); err != nil {
return ctxerror.New("cannot write last commits").WithCause(err)
}
utils.Logger().Info().
Int("numShards", len(data)).
Msg("wrote last commits")
return nil
}

@ -33,7 +33,7 @@ func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64
}
var entry TxLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil {
utils.Logger().Error().Err(err).Bytes("hash", hash.Bytes()).Msg("Invalid transaction lookup entry RLP")
utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid transaction lookup entry RLP")
return common.Hash{}, 0, 0
}
return entry.BlockHash, entry.BlockIndex, entry.Index

@ -52,7 +52,7 @@ func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig {
}
var config params.ChainConfig
if err := json.Unmarshal(data, &config); err != nil {
utils.Logger().Error().Err(err).Bytes("hash", hash.Bytes()).Msg("Invalid chain config JSON")
utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid chain config JSON")
return nil
}
return &config

@ -78,7 +78,7 @@ func (dRand *DRand) init(epochBlock *types.Block) {
(*dRand.vrfs)[dRand.SelfAddress] = append(rand[:], proof...)
utils.Logger().Info().
Bytes("msg", msgToSend).
Hex("msg", msgToSend).
Str("leader.PubKey", dRand.leader.ConsensusPubKey.SerializeToHexStr()).
Msg("[DRG] sent init")
dRand.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(dRand.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
@ -167,8 +167,8 @@ func (dRand *DRand) processCommitMessage(message *msg_pb.Message) {
utils.Logger().Error().
Err(err).
Str("validatorAddress", validatorAddress).
Bytes("expectedRand", expectedRand[:]).
Bytes("receivedRand", rand[:]).
Hex("expectedRand", expectedRand[:]).
Hex("receivedRand", rand[:]).
Msg("[DRAND] Failed to verify the VRF")
return
}

@ -76,7 +76,9 @@ func (attack *Model) NodeKilledByItSelf() {
}
if rand.Intn(HitRate) == 0 {
utils.GetLogInstance().Debug("******************Killing myself******************", "PID: ", os.Getpid())
utils.Logger().Debug().
Int("PID", os.Getpid()).
Msg("******************Killing myself******************")
os.Exit(1)
}
}
@ -87,7 +89,9 @@ func (attack *Model) DelayResponse() {
return
}
if rand.Intn(HitRate) == 0 {
utils.GetLogInstance().Debug("******************Model: DelayResponse******************", "PID: ", os.Getpid())
utils.Logger().Debug().
Int("PID", os.Getpid()).
Msg("******************Model: DelayResponse******************")
time.Sleep(DelayResponseDuration)
}
}
@ -98,7 +102,9 @@ func (attack *Model) IncorrectResponse() bool {
return false
}
if rand.Intn(HitRate) == 0 {
utils.GetLogInstance().Debug("******************Model: IncorrectResponse******************", "PID: ", os.Getpid())
utils.Logger().Debug().
Int("PID", os.Getpid()).
Msg("******************Model: IncorrectResponse******************")
return true
}
return false

@ -24,7 +24,7 @@ const (
mainnetV0_4Epoch = 10
mainnetV1Epoch = 12
mainnetV1_1Epoch = 19
mainnetV1_2Epoch = 21
mainnetV1_2Epoch = 25
mainnetMaxTxAmountLimit = 1e3 // unit is interface{} One
mainnetMaxNumRecentTxsPerAccountLimit = 1e2
@ -34,9 +34,9 @@ const (
mainnetEnableTxnThrottling = true
// MainNetHTTPPattern is the http pattern for mainnet.
MainNetHTTPPattern = "http://s%d.t.hmny.io:9500"
MainNetHTTPPattern = "https://api.s%d.t.hmny.io"
// MainNetWSPattern is the websocket pattern for mainnet.
MainNetWSPattern = "ws://s%d.t.hmny.io:9800"
MainNetWSPattern = "wss://ws.s%d.t.hmny.io"
)
// MainnetSchedule is the mainnet sharding configuration schedule.
@ -47,7 +47,7 @@ type mainnetSchedule struct{}
func (mainnetSchedule) InstanceForEpoch(epoch *big.Int) Instance {
switch {
case epoch.Cmp(big.NewInt(mainnetV1_2Epoch)) >= 0:
// twenty-first resharding epoch around 08/30/2019 11:35pm PDT
// twenty-fifth resharding epoch around 09/06/2019 5:31am PDT
return mainnetV1_2
case epoch.Cmp(big.NewInt(mainnetV1_1Epoch)) >= 0:
// nineteenth resharding epoch around 08/27/2019 9:07pm PDT

@ -12,9 +12,9 @@ import (
const (
// PangaeaHTTPPattern is the http pattern for pangaea.
PangaeaHTTPPattern = "http://s%d.pga.hmny.io:9500"
PangaeaHTTPPattern = "https://api.s%d.pga.hmny.io"
// PangaeaWSPattern is the websocket pattern for pangaea.
PangaeaWSPattern = "ws://s%d.pga.hmny.io:9800"
PangaeaWSPattern = "wss://ws.s%d.pga.hmny.io"
// transaction throttling disabled on pangaea network
pangaeaEnableTxnThrottling = false
)

@ -24,13 +24,9 @@ func TestMainnetInstanceForEpoch(t *testing.T) {
mainnetV1_1,
},
{
big.NewInt(21),
big.NewInt(25),
mainnetV1_2,
},
{
big.NewInt(8),
mainnetV0_3,
},
}
for _, test := range tests {

@ -31,9 +31,9 @@ const (
testnetEnableTxnThrottling = true
// TestNetHTTPPattern is the http pattern for testnet.
TestNetHTTPPattern = "http://s%d.b.hmny.io:9500"
TestNetHTTPPattern = "https://api.s%d.b.hmny.io"
// TestNetWSPattern is the websocket pattern for testnet.
TestNetWSPattern = "ws://s%d.b.hmny.io:9800"
TestNetWSPattern = "wss://ws.s%d.b.hmny.io"
)
func (testnetSchedule) InstanceForEpoch(epoch *big.Int) Instance {

@ -1708,7 +1708,7 @@ var FoundationalNodeAccountsV1_1 = []DeployAccount{
{Index: "319", Address: "one19c4uqfzezuws7e4ka4kvc5r09suks2ghpyg6xw", BlsPublicKey: "51b2019b222df63fc99d202b03834dee09f1ef11e25a03592a96c1d01bca2bedfc25e0f26d88dcbb8a7176e30e1ec116"},
}
// FoundationalNodeAccountsV1_2 are the accounts for the foundational nodes from Epoch 21.
// FoundationalNodeAccountsV1_2 are the accounts for the foundational nodes from Epoch 25.
var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "0", Address: "one1y0xcf40fg65n2ehm8fx5vda4thrkymhpg45ecj", BlsPublicKey: "9e70e8d76851f6e8dc648255acdd57bb5c49cdae7571aed43f86e9f140a6343caed2ffa860919d03e0912411fee4850a"},
{Index: "1", Address: "one18lp2w7ghhuajdpzl8zqeddza97u92wtkfcwpjk", BlsPublicKey: "fce3097d9fc234d34d6eaef3eecd0365d435d1118f69f2da1ed2a69ba725270771572e40347c222aca784cb973307b11"},
@ -1830,8 +1830,8 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "117", Address: "one1wt5darzj8wd385xl8stccj4sv6553hgckaypfr", BlsPublicKey: "9622f8a5590d6ef8ca94e6c866d663aa0398caf00a88b2dd059dc7a63daa8600828a85737eca4e595caa382b5d407205"},
{Index: "118", Address: "one1k80wv3uvfw5r0qhzp9yxn94u4jxu8my2xwuk87", BlsPublicKey: "bcd24c722dc5dd3727bc3f027e3f681e4d1f5a552513d158645833eb8d8d39ec1076370b55e063aeed5a7825eb6aa20a"},
{Index: "119", Address: "one1kwqkyzq2pmhvufe9528g9nd966ur54v6auzruf", BlsPublicKey: "aaac4eb8260e6cee7f19fbcae721ce2d68f125461953a583adca44407194452e7ac41de0757e2921c8fed83469172f92"},
{Index: "120", Address: "one1kykp8kzgmrkvhsz60t4yx8a06s44wt6jvpuuzg", BlsPublicKey: "85d0761a56dd18a67ee71e38ff949d93301467e5fbf9ab3fcb6875a6778341b271ae6acfc94d3046f454aeda138a1b89"},
{Index: "121", Address: "one1zcgx6pncyxrv6t7cza0f6r0e0pnp0gqd6knvcw", BlsPublicKey: "f1787fff85d1d5a6d458e768b425d2d9a9aec09ad8ddd020fd7d2f275970a8bef1f9a0c4a705e8f8b8734f9daf39d604"},
{Index: "120", Address: "one1gjas4xurmc0rguafq63ql65rwuxayukm74w2mn", BlsPublicKey: "d6c8cf5553fa77257d26ba6b201294a2a497d070d420ab76c044efc0f4325f40b5664e7a7f973940ef1ea57530215886"},
{Index: "121", Address: "one1pkw7wnplp077fn6phv2kfejw3u7wvx0m9vppzc", BlsPublicKey: "92d5e3fb5d3f1e64af4be7c0acbd457b68a2ec59cf34aaaa0bac04d0e0346b283a65e0227378a60e1fe7af2407d9c50a"},
{Index: "122", Address: "one12mn87akec4dqpfzy8q0jtqz9epf2pme605vc0h", BlsPublicKey: "6dd84461d038ccf467bfc2413b23b8eced80534b4507c3cdffa6f68111a799dccf1d42fd552ffeb3d595d2ec9733ef99"},
{Index: "123", Address: "one18xnzxwn5hwuuh229cuv7mr0hnjcvgru7qt2wdv", BlsPublicKey: "83426cbc962bc18900540a3f886486f35c17d182f2e13c06a23d3242c5ca50138aa98ac842d2a3d03d8a273a8329ac82"},
{Index: "124", Address: "one1efat5elqnvttf7gm86q9kmt48z69njax464rhv", BlsPublicKey: "2066a0b39474b9cdaec88bb94953219a2690e1dd6008bb6dc31c7f76c08a9db54758ff3aaed3249fcb0029dfc1e92995"},
@ -1840,7 +1840,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "127", Address: "one18r7axamzxgn57s9xcsvhevv58ukxrhsrfug6vg", BlsPublicKey: "17cd5fbf29ff9d1e21fc0f1b22be0dbaa9b05b49e011cbedceb6584153669e1318c052d431d9e3faec82fff823a0fb90"},
{Index: "128", Address: "one1w3pvyg56gal0ajef487944gzjkg6sv68j26pey", BlsPublicKey: "dc2f315a1dbe67ed3cfd8925fbfed964203c11e5c77112643d53b51e3c23135c695d218233c8a6cccea4a5032488d28c"},
{Index: "129", Address: "one1r3mh2h7flr6sgcjvpaadlfjcnguwfk5z6mjuvu", BlsPublicKey: "2684b9b856e2b3f6ff0916b17137ba61e2495a9636859ff108defcca38f4dc49508c44ff16ad8f74e5182769c6a5a699"},
{Index: "130", Address: "one17kjexl6hyh0evgt7en7q04m9q7lwgym5grs9q7", BlsPublicKey: "6c793f4a4d09ac07fe6b8fb7fcc4796b08c37994f9b5ec3a68e30ff48c32bd3fed1e358fced864558054f5e11f7e1398"},
{Index: "130", Address: "one1u9g5g7qxx3rx802c4xfjre8vz7mwz87s0e8y4k", BlsPublicKey: "c34f6916cf06ed15a3771678c73941271253e14f8b21a16872d8e30bcdf241ef9a4f58453c3953a25e17b2387f6fd813"},
{Index: "131", Address: "one12kdc0fqxne5f3394wrwadxq9yfaquyxgtaed3q", BlsPublicKey: "826f98d1f8ce8751dac0441888f4b4f1190ec90691d5e40110c39068d0f95ea9cca8efe23d0f7a865bd9ed37ebf38d12"},
{Index: "132", Address: "one1h2dynptqmtgdfg9fgpd8dvmv8scupkgtzapx4l", BlsPublicKey: "814843ee8475adb2245027e9531e036c4135cf25a1051d5ea97f9f9cea506503e4a83a49bea1ee40e5b9a9c5f55f8014"},
{Index: "133", Address: "one1l4yrxjz8ugwjdcxrm5snwgmgk8few9v2q2xv8h", BlsPublicKey: "aa37a8ad8fb42a5b1413be9ae5b053e7a47d2f36a11a1e7ba74c4b338919c9c76eca3a6332c06fbbd161a6aef20e9a0b"},
@ -1854,11 +1854,11 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "141", Address: "one1ljvq9tkvfp583zzl85mgjh3qjvjufnuwmn7krv", BlsPublicKey: "37644619ae24ffa2ab63dde85f8116effddaa500c6e28c57a5f100cb8a97262ed97f671d903a9c32a53301eec1100b96"},
{Index: "142", Address: "one12c23ekslj469g0g0tu9jcvecfkla7rahmrhe37", BlsPublicKey: "19101de3d0578c3146a1904f25a3344a998dcb0a18433dc5cc977d05f378676b0652b4a64fa8dff6c819cfd52dc94c14"},
{Index: "143", Address: "one19l9equxmql4jkcah8g4f6qva732npajarffj6q", BlsPublicKey: "f531f442dc19d2967fbcdcab68e77b16bc49cd05698ed8a56b39f3704315915c2b2b0175be6982be7238290b3504d487"},
{Index: "144", Address: "one1nq5dglmw0vunsa34mve8sdyrkhfd0373v4xgtv", BlsPublicKey: "a48d7cd3f3004cf2cecd4d4eba14d257da046f13ff461fedc2c3daaf725776e9a96ef38098a9a04f968f9c2287fc220d"},
{Index: "145", Address: "one1qmgqawpflw4pu9ytryz69mrk0mhhsswdmjgfrj", BlsPublicKey: "013116498f069ab0b1e3bb1043e45283c9fb8641eed8470d77d59bc8a42fca53ce6d3714ceb5c37aa71e3d1839b25690"},
{Index: "146", Address: "one1h7c7pgwnht4nns40k6swdzwy8xn9uvl0e65e49", BlsPublicKey: "05f9655186ef16363e2ae14629bc0fef17781bac5eed7875e3b442e57e45340115f6d087035f626a84832479c9a54a03"},
{Index: "147", Address: "one1cwzleselrsq3x76vjzy7u65a9tqmsrcne2w83h", BlsPublicKey: "783841b0eaea497e2f894d482b6bde16b96359cb837a9f7dc3bf89abcb45b75e8ea597d60d2e89775c6fb37164fa3694"},
{Index: "148", Address: "one1kkcw2y5d9w9celf0vu025hflyxu33gekmntx9u", BlsPublicKey: "b12b30b10c9b002ec0832d061025b99a695052800ebf642299fd439be505322208543566efda12b524b284ce5ea06510"},
{Index: "144", Address: "one18ehxprgnrnjug4yqcwq48ynnw3am237hlwmwxv", BlsPublicKey: "3a4212d1fdeb837d5c7cb709298c596c71a4faf57a592dc8a675eba752c31f256f0a99478fc0f38f13790df087f9ff0f"},
{Index: "145", Address: "one1aydmy5m3ymurgac9yaea9nq7tg4fwu8dnetz02", BlsPublicKey: "87ec47a0ee00ea2b31640283644cf093f4ba6d63ddefc50a699994aedb29bc83727a32a7d238ff66683269a0230eb593"},
{Index: "146", Address: "one14kljysylyq0nypcxzndhfqxrkjk290l9kkhmnz", BlsPublicKey: "d6b1b753e1d3333fc3898f394edae8cf79d9168c6b8c07ac8109cb754086f1402e82cc6a52131e0399c2d7cac2bf260b"},
{Index: "147", Address: "one18ahgl50uyq29cuacn76ucmsgxppgmjx80zs2cj", BlsPublicKey: "6e72635ed8e416ebdc48c59d2642cc8b884b0a5281a36a31220bb9e1bb453f8db22ea7905cda29271bde4cf858244514"},
{Index: "148", Address: "one1n392jsedk9pfx7rkw2gpdpjptsgfrkxla5ve6t", BlsPublicKey: "3a9217c7d6574e3922c4c55a258a4ba70ce54872e097022807dc15a1999a99919f3afcc0301b1cb8feb7ffbbe42b8a16"},
{Index: "149", Address: "one16xzyq2zarperhxjzmk8dyp629v4vugdkzfx7r2", BlsPublicKey: "4ff265748fbc2e3f99e95fc2e1818d02a38bcb72e95efa8ffb50820919f543f98705b330522d51e5272a5427e98ad38b"},
{Index: "150", Address: "one175jcxcdk2xlmccndr2mux3c8se8gsmddesg5ed", BlsPublicKey: "74a8762803bfbe8893540480cc9f789914b56f86a1458c38d5bf1b6737cd149a28f761bec95f3e9cf4095fb55a6e7294"},
{Index: "151", Address: "one1lmqycl6wezcdf7nqxj34slstamt0hlhp4s0rj4", BlsPublicKey: "764cec13fff061afecd226e167ac0452e2f16e5e8faa9c2152ba060243e6c29220850c9acae6f13612c842d277d5118b"},
@ -1873,8 +1873,8 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "160", Address: "one1hdxnmfgkxtlmrym9ljjw2dpweke85pheu8g8zl", BlsPublicKey: "95f9d0f1a688b9c4d2a9d876580f7920625f0859846711203fb4f0f364290895daab76557d09f4acb6d3b562eed00782"},
{Index: "161", Address: "one1d0k2x6ue6yd0pvuumxgxjk3qqfjpn08mu7r253", BlsPublicKey: "254772532f47cd28c0ed95986c678e589661de36eeef7502c57a95f753e1c0c1ea6348f6f19e10729faf31ae37fe1a0c"},
{Index: "162", Address: "one10j0tswg6x4udqafvsetjj3fl0g4e52spwp0wsh", BlsPublicKey: "289eb7f7c6b601dc83534734f500365f4fcf2189a5813b806b9ef6a777183c697d5fb22c07a26d19b63f1e10fa88e50c"},
{Index: "163", Address: "one15fswyv4znc8mw5dqutgssdj35ut2ghpg0wqg7p", BlsPublicKey: "4f5e3f01edbfc48e6260a8557f2bafd7bdfe76e727055110882ccadbcbba2ddd96a7f4008f576c94977e488c35086c18"},
{Index: "164", Address: "one1txjkm9v7avmpy4u9ugktwwt4tkkfz2r40v6v7a", BlsPublicKey: "998bb2fc775f08bc748acded8f2b331edf3d01da8d7bf5f1c6cc7bb8afb3bd384bb2fb40351c245ba7b5027c2f1c0608"},
{Index: "163", Address: "one1hmmh8tfh8p7ad2szyn5wntfplt5mn79mlfemwn", BlsPublicKey: "224fac479a8c0554faf2ecf0227453831bdeae7d03e96c2b0c9e7b3a33ac8701f57431833c7898a3c885d89f850e5696"},
{Index: "164", Address: "one1fallaznqw6mv2wyzggpxvlk0xjeu8hry9u7v74", BlsPublicKey: "3c8c4ecf114e0d12516db91d9eb17a0dde8bc80f3d95df7b95d02b1efcc70098d3e15ac6d38f8cd389114f3247dde883"},
{Index: "165", Address: "one10jkak35au0w9lmsqvjvpnx7t0qc6svklup48k3", BlsPublicKey: "1acf941d944db3f7410f4bf4f2d541e17d50bfde0f853ddcb2630b7ecf701c7be454ec6577aabe9320e2a8524cc60715"},
{Index: "166", Address: "one1zy628rt2d87nlad0dlvlzrnnqsv5cnt4uaa05m", BlsPublicKey: "898c9ec1fa5073e96ff71b66a5353eb2d9bbbaf103adeac5625ccae8cc926e99165ca2afed4f22065e8becf7b012f20f"},
{Index: "167", Address: "one1zefrcfgjuaqxggql0syz28cq40cy6ujwq6zmpx", BlsPublicKey: "6510d39a8db76f952c9dfa34b16cd1324877dabef2f441095c8b1f9809b884c4628b16363349871350d4422f4c61bf0c"},
@ -1896,7 +1896,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "183", Address: "one1933f2dpa5a7ezh2q6ul5nakd26tynn4l75pt6h", BlsPublicKey: "7fa5a39d171f5ceb2c58571731d6273e55420722984caaf17607a6ebdc18e32350406f5e537474a26cb31b9c97504a04"},
{Index: "184", Address: "one1wrvlznh27fywscexnc2l9fxk5gjelcqdnw8pvw", BlsPublicKey: "84b6b842abc1c28aa505f22ac553443f919ff742f1b6cc2c1b9ab07518a649dafc4d5e1360ee20462168753361569293"},
{Index: "185", Address: "one1v6e7uc4jwamdcjw43042jt5fw5ck5mxzt8c4wx", BlsPublicKey: "eb0f1af6910302340af0c20f05c08654131bf7eb0e8118548e7dfe25cc2991d953bbe8ace2786f5aed77484ee0adeb95"},
{Index: "186", Address: "one1q50h43adwq85nd28pq5yhw3sjlcd63x6urhmnh", BlsPublicKey: "c9e1ff04c12679c5315d960ecd5c1ced35616381c33ab8bb612eb78088c601295535a2f43140d647e6d9095b6ef60380"},
{Index: "186", Address: "one1seaavwphcu5a3mm4p36fpl9r68f0hk24g289st", BlsPublicKey: "25279696affa165739d769c135f901a2008555426979be6c92fd94a0c5d0f33415f85bfc3308be0c64e3de34cc2b198d"},
{Index: "187", Address: "one1hyqkrvkad8kpttpglrl42rcthd6h0uqg2etgf4", BlsPublicKey: "1bc81f9fd333524032c5a8fbbd976b00ba8d5e0e2efe60fd8336abbaec1b5744ecef07b242efddf09bd42dbcbd2f5795"},
{Index: "188", Address: "one12xfkkmqm7cnv52tml8vpdawvax0kr8f4wrlp96", BlsPublicKey: "64745645766808aeee7eb00081d6d11119dcd3a925255960faccc0c6d22f17b6fd0814c061dd3ab39b5877df119f6711"},
{Index: "189", Address: "one1yuqws7le57naq90w2z42k3undccy8nh7wdmuhz", BlsPublicKey: "2611e59381f2f117746ea12cea76a09ba5895bc503d4bc5561b546d673ccc4fff854aea26e9c43c3682444c25396490a"},
@ -1914,7 +1914,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "201", Address: "one1fgsn4xrhtq4ljfd394gesj4aj6pkkf0lsytmnn", BlsPublicKey: "11e163fab1c890a81d224dc9d462d56b0cef5cd1645d2edd848067ff34f480b02ac8c920c4646618f235a7e3f9bdc505"},
{Index: "202", Address: "one1g6l7xj9w8z3uxjud8da69twzvyccqam6k5xymd", BlsPublicKey: "a11451a324fffc50f33e934bcee1bd673b7e285c24817285cea8207212aad9951aa9b6c40a5b78a7fea7ed047c48d188"},
{Index: "203", Address: "one1l476nnnhekrnnk5m78k8jr9uxvh6y4ql7xq9fq", BlsPublicKey: "4fd4f3a680528b60f6de7d944febe5426485f0ca0816a7c44fe3355bef2f48519f75235f42732678b2a5fab860e67304"},
{Index: "204", Address: "one134phdwr88wnpadw4x8p7r972f7wkqerd7kwxg9", BlsPublicKey: "7a37baf96f89557f4157d206af89ac343b17df147e44d6d94b76637ba16668854205a639f39d2c7f51b8d1e26205b296"},
{Index: "204", Address: "one1a962kd80qmqu6wwd2ywjnh2f0e95zmrur2qmde", BlsPublicKey: "11fc5e3e3d1b8671f4fa2da7eec3174d832450c4b45530ee24bde9f8e549600ddebe2c60631926f15f287fdc28e77619"},
{Index: "205", Address: "one1qfs3q0mwzmg3dsyzc6vf2telzx54t0t3zlc8a7", BlsPublicKey: "b3edc1d8778cae41255c06f2425347c7230de80e5f6a3b9e46a28a5223fb36d3fa6433cb5d589ee990ab73db3ea8a419"},
{Index: "206", Address: "one1kgu8jl9vtff4yklvxspg4whjwdvkqsx50zwy6g", BlsPublicKey: "94c13845fa5fb93967e1f4485255993e42cddc6cef75bd5709af76275fd9a6d6fc4e062a9f4d2b776348794fc25a2408"},
{Index: "207", Address: "one1zr57fc7txdwvcwd6w2rn4yg6tdy0j0tjhvyj8f", BlsPublicKey: "76ac6c7d29e5e47874619816d6a441645adb0138bd02aa71d5b654cfeb9bdc1d4dd66d4e493100d48aeb558824d63408"},
@ -2015,7 +2015,7 @@ var FoundationalNodeAccountsV1_2 = []DeployAccount{
{Index: "302", Address: "one10pptg3m2k4c7ntgyqqpdmcamwg9e29naq4a4gd", BlsPublicKey: "931c600eb04d9fe1fde45104a00e5da9bcdd94787e18b5867ab6c0c5d33834f9a1fb7f090f3eafcb26e368b43f82f213"},
{Index: "303", Address: "one1hcru2lx6nagvf49hfc5skhhxummq863nlmvvu5", BlsPublicKey: "9a2529a40c23b45e33058e4758e63d2c5285bc64c0f71563725767274e9d0c25b6a9e96cf37d8ddf3a7047a2f5f8778d"},
{Index: "304", Address: "one13zzrvzrdxz2pf4y6ewkgjy7h2f602esjzjx4zv", BlsPublicKey: "48c89306e6eda48a4bd55145a89dc55ef7fdee4f3938afd1a6a3c503be8678e9aee3636f63b0df08a7491597e793f492"},
{Index: "305", Address: "one1zkpxghkct52crp2tda7xkdpxga32j56jjevntf", BlsPublicKey: "8137e81449de355de1befa684f89c38f5769a3744af8f9066d517474476b20be930cfad7de30b73b26c2353480111d92"},
{Index: "305", Address: "one1apdxhkhen9svzkk4x7n475lx8lmckawet7fdrx", BlsPublicKey: "16975468c2cedd7ce9785e41b264d11fe7fa75ae5d4561ec8c088ec03c3a0b4518b3ffc4830e71ddfc55bf6098cc4186"},
{Index: "306", Address: "one1r8ywmftsqgew4sfx9808dtww4r6j3a3v98c8vt", BlsPublicKey: "a221b590df1f201b653c31c0df894471519c0601dac36189ddb9511b4b920d5a43e597842971e784d0e0cb17bda7e983"},
{Index: "307", Address: "one1w2t3d2g435az4cnyqf7rkuzq2mu08mfuwlqhcz", BlsPublicKey: "8520f6519d3bc40fbffc703248cdcb6222e24394238cc7e705048ea62dceb35c3a2d0151d863556e8f172ca54ab8fe87"},
{Index: "308", Address: "one1hz4n5vn4e6ucvppxf6n8zayhml8wsqken60ksg", BlsPublicKey: "9d5063f3d82ca4b6855f3b00a45bdff5dd309fd6c4e12488ef18fbe808c847c2ff89334e641b3850e025b9bb85f4538b"},

@ -28,7 +28,7 @@ func (d DeployAccount) String() string {
func BeaconAccountPriKey() *ecdsa.PrivateKey {
prikey, err := ecdsa.GenerateKey(crypto.S256(), strings.NewReader(genesisString))
if err != nil && prikey == nil {
utils.GetLogInstance().Error("Failed to generate beacon chain contract deployer account")
utils.Logger().Error().Msg("Failed to generate beacon chain contract deployer account")
os.Exit(111)
}
return prikey

@ -121,7 +121,9 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr r
func doCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
defer func(start time.Time) {
utils.GetLogInstance().Debug("Executing EVM call finished", "runtime", time.Since(start))
utils.Logger().Debug().
Dur("runtime", time.Since(start)).
Msg("Executing EVM call finished")
}(time.Now())
state, header, err := b.StateAndHeaderByNumber(ctx, blockNr)
@ -152,7 +154,10 @@ func doCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumb
gas = uint64(*args.Gas)
}
if globalGasCap != nil && globalGasCap.Uint64() < gas {
utils.GetLogInstance().Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
utils.Logger().Warn().
Uint64("requested", gas).
Uint64("cap", globalGasCap.Uint64()).
Msg("Caller gas above allowance, capping")
gas = globalGasCap.Uint64()
}
gasPrice := new(big.Int).SetUint64(defaultGasPrice)

@ -38,7 +38,12 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs
}
signed, err := s.signTransaction(ctx, &args, passwd)
if err != nil {
utils.GetLogger().Warn("Failed transaction send attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err)
utils.Logger().Warn().
Str("from", args.From.Hex()).
Str("to", args.To.Hex()).
Uint64("value", args.Value.ToInt().Uint64()).
AnErr("err", err).
Msg("Failed transaction send attempt")
return common.Hash{}, err
}
return SubmitTransaction(ctx, s.b, signed)

@ -23,9 +23,15 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
return common.Hash{}, err
}
addr := crypto.CreateAddress(from, tx.Nonce())
utils.GetLogger().Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", common2.MustAddressToBech32(addr))
utils.Logger().Info().
Str("fullhash", tx.Hash().Hex()).
Str("contract", common2.MustAddressToBech32(addr)).
Msg("Submitted contract creation")
} else {
utils.GetLogger().Info("Submitted transaction", "fullhash", tx.Hash().Hex(), "recipient", tx.To())
utils.Logger().Info().
Str("fullhash", tx.Hash().Hex()).
Str("recipient", tx.To().Hex()).
Msg("Submitted transaction")
}
return tx.Hash(), nil
}

@ -47,7 +47,9 @@ func (m *MemProfiling) Config() {
Addr: fmt.Sprintf("%s:%s", nodeconfig.GetDefaultConfig().IP, utils.GetPortFromDiff(nodeconfig.GetDefaultConfig().Port, MemProfilingPortDiff)),
Handler: m.h,
}
utils.GetLogInstance().Info("running mem profiling", "port", utils.GetPortFromDiff(nodeconfig.GetDefaultConfig().Port, MemProfilingPortDiff))
utils.Logger().Info().
Str("port", utils.GetPortFromDiff(nodeconfig.GetDefaultConfig().Port, MemProfilingPortDiff)).
Msgf("running mem profiling")
}
// Add adds variables to watch for profiling.
@ -67,7 +69,6 @@ func (m *MemProfiling) Add(name string, v interface{}) {
func (m *MemProfiling) Start() {
go m.s.ListenAndServe()
m.PeriodicallyScanMemSize()
utils.GetLogInstance().Info("Start memprofiling.")
}
// Stop stops mem profiling.
@ -86,7 +87,7 @@ func (m *MemProfiling) PeriodicallyScanMemSize() {
for k, v := range m.observedObject {
s := memsize.Scan(v)
r := s.Report()
utils.GetLogInstance().Info(fmt.Sprintf("memsize report for %s:\n %s", k, r))
utils.Logger().Info().Msgf("memsize report for %s:\n %s", k, r)
}
m.mu.Unlock()
}
@ -120,11 +121,11 @@ func MaybeCallGCPeriodically() {
func PrintMemUsage(msg string) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
utils.GetLogInstance().Info(msg,
"alloc", bToMb(m.Alloc),
"totalalloc", bToMb(m.TotalAlloc),
"sys", bToMb(m.Sys),
"numgc", m.NumGC)
utils.Logger().Info().
Uint64("alloc", bToMb(m.Alloc)).
Uint64("totalalloc", bToMb(m.TotalAlloc)).
Uint64("sys", bToMb(m.Sys)).
Uint32("numgc", m.NumGC)
}
func bToMb(b uint64) uint64 {

@ -11,10 +11,10 @@ var (
// MainnetChainConfig is the chain parameters to run a node on the main network.
MainnetChainConfig = &ChainConfig{
ChainID: big.NewInt(1),
CrossTxEpoch: big.NewInt(29),
CrossTxEpoch: big.NewInt(28),
CrossLinkEpoch: big.NewInt(10000000), // Temporarily made very large until a exact number is decided.
EIP155Epoch: big.NewInt(30),
S3Epoch: big.NewInt(30),
EIP155Epoch: big.NewInt(28),
S3Epoch: big.NewInt(28),
}
// TestnetChainConfig contains the chain parameters to run a node on the harmony test network.

@ -3,6 +3,7 @@ package profiler
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"sync"
@ -46,7 +47,15 @@ func (profiler *Profiler) LogMemory() {
// log mem usage
info, _ := profiler.proc.MemoryInfo()
memMap, _ := profiler.proc.MemoryMaps(false)
utils.GetLogInstance().Info("Mem Report", "info", info, "map", memMap, "shardID", profiler.shardID)
loggedMemMap := ""
for _, mems := range *memMap {
loggedMemMap = fmt.Sprintf("%v; %v", loggedMemMap, mems)
}
utils.Logger().Info().
Str("info", info.String()).
Str("map", loggedMemMap).
Uint32("shardID", profiler.shardID).
Msg("Mem Report")
time.Sleep(3 * time.Second)
}
@ -58,7 +67,11 @@ func (profiler *Profiler) LogCPU() {
// log cpu usage
percent, _ := profiler.proc.CPUPercent()
times, _ := profiler.proc.Times()
utils.GetLogInstance().Info("CPU Report", "percent", percent, "times", times, "shardID", profiler.shardID)
utils.Logger().Info().
Float64("percent", percent).
Str("times", times.String()).
Uint32("shardID", profiler.shardID).
Msg("CPU Report")
time.Sleep(3 * time.Second)
}

@ -81,8 +81,9 @@ func (sc *CollectionImpl) ShardChain(shardID uint32) (*core.BlockChain, error) {
return nil, ctxerror.New("cannot open chain database").WithCause(err)
}
if rawdb.ReadCanonicalHash(db, 0) == (common.Hash{}) {
utils.GetLogger().Info("initializing a new chain database",
"shardID", shardID)
utils.Logger().Info().
Uint32("shardID", shardID).
Msg("initializing a new chain database")
if err := sc.dbInit.InitChainDB(db, shardID); err != nil {
return nil, ctxerror.New("cannot initialize a new chain database").
WithCause(err)
@ -119,11 +120,15 @@ func (sc *CollectionImpl) CloseShardChain(shardID uint32) error {
if !ok {
return ctxerror.New("shard chain not found", "shardID", shardID)
}
utils.GetLogger().Info("closing shard chain", "shardID", shardID)
utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closing shard chain")
delete(sc.pool, shardID)
bc.Stop()
bc.ChainDb().Close()
utils.GetLogger().Info("closed shard chain", "shardID", shardID)
utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closed shard chain")
return nil
}
@ -135,10 +140,14 @@ func (sc *CollectionImpl) Close() error {
sc.pool = newPool
sc.mtx.Unlock()
for shardID, bc := range oldPool {
utils.GetLogger().Info("closing shard chain", "shardID", shardID)
utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closing shard chain")
bc.Stop()
bc.ChainDb().Close()
utils.GetLogger().Info("closed shard chain", "shardID", shardID)
utils.Logger().Info().
Uint32("shardID", shardID).
Msg("closed shard chain")
}
return nil
}

@ -63,7 +63,9 @@ func ConvertFixedDataIntoByteArray(data interface{}) []byte {
buff := new(bytes.Buffer)
err := binary.Write(buff, binary.BigEndian, data)
if err != nil {
GetLogger().Crit("Failed to convert fixed data into byte array", "err", err)
Logger().Error().
AnErr("err", err).
Msg("Failed to convert fixed data into byte array")
}
return buff.Bytes()
}
@ -195,16 +197,22 @@ func LoadKeyFromFile(keyfile string) (key p2p_crypto.PrivKey, pk p2p_crypto.PubK
var keyStruct PrivKeyStore
err = Load(keyfile, &keyStruct)
if err != nil {
GetLogger().Info("No priviate key can be loaded from file", "keyfile", keyfile)
GetLogger().Info("Using random private key")
Logger().Info().
Str("keyfile", keyfile).
Msg("No private key can be loaded from file")
Logger().Info().Msg("Using random private key")
key, pk, err = GenKeyP2PRand()
if err != nil {
GetLogger().Crit("LoadKeyFromFile", "GenKeyP2PRand Error", err)
Logger().Error().
AnErr("GenKeyP2PRand Error", err).
Msg("LoadedKeyFromFile")
panic(err)
}
err = SaveKeyToFile(keyfile, key)
if err != nil {
GetLogger().Error("failed to save key to keyfile", "keyfile", err)
Logger().Error().
AnErr("keyfile", err).
Msg("failed to save key to keyfile")
}
return key, pk, nil
}

@ -246,7 +246,7 @@ func (node *Node) reducePendingTransactions() {
// If length of pendingTransactions is greater than TxPoolLimit then by greedy take the TxPoolLimit recent transactions.
if curLen > txPoolLimit+txPoolLimit {
node.pendingTransactions = append(types.Transactions(nil), node.pendingTransactions[curLen-txPoolLimit:]...)
utils.GetLogger().Info("mem stat reduce pending transaction")
utils.Logger().Info().Msg("mem stat reduce pending transaction")
}
}

@ -457,8 +457,8 @@ func (node *Node) AddNewBlock(newBlock *types.Block) error {
utils.Logger().Error().
Err(err).
Uint64("blockNum", newBlock.NumberU64()).
Bytes("parentHash", newBlock.Header().ParentHash().Bytes()[:]).
Bytes("hash", newBlock.Header().Hash().Bytes()[:]).
Str("parentHash", newBlock.Header().ParentHash().Hex()).
Str("hash", newBlock.Header().Hash().Hex()).
Msg("Error Adding new block to blockchain")
} else {
utils.Logger().Info().

@ -0,0 +1,700 @@
#!/bin/bash
# This Script is for Testing the API functionality on both local and betanet.
# -l to run localnet, -b to run betanet(mutually exclusive)
# -v to see returns from each request
# Right now only tests whether a response is recieved
# You must have properly cloned into the dapp-examples repo and have installed nodejs
VERBOSE="FALSE"
TESTS_RAN=0
TESTS_PASSED=0
red=`tput setaf 1`
green=`tput setaf 2`
blue=`tput setaf 6`
white=`tput sgr0`
yellow=`tput setaf 11`
reset=`tput sgr0`
function response_test() {
if [ "$1" != "" ]; then
echo "${green}RESPONSE RECIEVED${reset}"
return 1
else
echo "${red}NO RESPONSE${reset}"
return 0
fi
}
function isHashTest() {
if [ "$TRANSACTION" != "null" ]; then
if [[ "$TRANSACTION_HASH" =~ ^0x[0-9a-f]{64}$ ]]; then
echo ${green}TRANSACTION HASH VALID${reset}
echo
return 1
fi
fi
echo ${red}TRANSACTION HASH INVALID${reset}
return 0
}
function isHexTest() {
if [ "$1" != "null" ]; then
if [[ "$1" =~ ^0x[0-9a-f]+$ ]]; then
echo ${green}VALID HEX RECIEVED${reset}
echo
return 1
fi
fi
echo ${red}INVALID HEX RECIEVED${reset}
return 0
}
### SETUP COMMANDLINE FLAGS ###
while getopts "lbvp" OPTION; do
case $OPTION in
b)
NETWORK="betanet"
declare -A PORT=( [POST]="http://s0.b.hmny.io:9500/" [GET]="http://e0.b.hmny.io:5000/" )
BLOCK_0_HASH=$(curl --location --request POST "http://l0.b.hmny.io:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getBlockByNumber\",\"params\":[\"0x1\", true],\"id\":1}" | jq -r '.result.hash')
echo "BLOCK0HASH:"
echo "$BLOCK_0_HASH"
SIGNED_RAW_TRANSACTION=$(node ../dapp-examples/nodejs/apiTestSign.js)
echo "RAWTX"
echo "$SIGNED_RAW_TRANSACTION"
TRANSACTION_HASH=$(curl --location --request POST "http://l0.b.hmny.io:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_sendRawTransaction\",\"params\":[\""$SIGNED_RAW_TRANSACTION"\"],\"id\":1}" | jq -r '.result')
echo "TRANSACTION_HASH:"
echo $TRANSACTION_HASH
sleep 20s
TRANSACTION=$(curl --location --request POST "http://l0.b.hmny.io:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"],\"id\":1}")
echo "TRANSACTION:"
echo "$TRANSACTION"
TRANSACTION_BLOCK_HASH=$(echo $TRANSACTION | jq -r '.result.blockHash')
TRANSACTION_BLOCK_NUMBER=$(echo $TRANSACTION | jq -r '.result.blockNumber')
TRANSACTION_INDEX=$(echo $TRANSACTION | jq -r '.result.transactionIndex') #Needs to be get transaction Index
TRANSACTION_BLOCK_ID=$(( $TRANSACTION_BLOCK_NUMBER ))
echo TRANSACTION_BLOCK_ID
echo $TRANSACTION_BLOCK_ID
echo "TRANSACTION_BLOCK_HASH:"
echo $TRANSACTION_BLOCK_HASH
echo "TRANSACTION_BLOCK_NUMBER:"
echo "$TRANSACTION_BLOCK_NUMBER"
echo "TRANSACTION_INDEX:"
echo $TRANSACTION_INDEX
;;
l)
NETWORK="localnet"
declare -A PORT=( [POST]="localhost:9500/" [GET]="localhost:5099/" )
BLOCK_0_HASH=$(curl -s --location --request POST "localhost:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getBlockByNumber\",\"params\":[\"0x1\", true],\"id\":1}" | jq -r '.result.hash')
echo "BLOCK0HASH:"
echo "$BLOCK_0_HASH"
SIGNED_RAW_TRANSACTION=$(node ../dapp-examples/nodejs/apiTestSign.js localnet)
echo "RAWTX"
echo "$SIGNED_RAW_TRANSACTION"
TRANSACTION_HASH=$(curl --location --request POST "localhost:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_sendRawTransaction\",\"params\":[\""$SIGNED_RAW_TRANSACTION"\"],\"id\":1}" | jq -r '.result')
echo "TRANSACTION_HASH:"
echo $TRANSACTION_HASH
sleep 20s
TRANSACTION=$(curl --location --request POST "http://localhost:9500" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"],\"id\":1}")
echo "TRANSACTION:"
echo "$TRANSACTION"
TRANSACTION_BLOCK_HASH=$(echo $TRANSACTION | jq -r '.result.blockHash')
TRANSACTION_BLOCK_NUMBER=$(echo $TRANSACTION | jq -r '.result.blockNumber')
TRANSACTION_INDEX=$(echo $TRANSACTION | jq -r '.result.transactionIndex')
TRANSACTION_BLOCK_ID=$(( $TRANSACTION_BLOCK_NUMBER ))
echo TRANSACTION_BLOCK_ID
echo $TRANSACTION_BLOCK_ID
echo "TRANSACTION_BLOCK_HASH:"
echo $TRANSACTION_BLOCK_HASH
echo "TRANSACTION_BLOCK_NUMBER:"
echo "$TRANSACTION_BLOCK_NUMBER"
echo "TRANSACTION_INDEX:"
echo $TRANSACTION_INDEX
;;
v)
VERBOSE="TRUE"
;;
p)
PRETTY="TRUE"
;;
esac
done
if [ $OPTIND -eq 1 ]; then echo "No options were passed, -l for localnet, -b for betanet, -v to view logs of either"; exit; fi
declare -A GETDATA=( [GET_blocks]="blocks?from=$TRANSACTION_BLOCK_ID&to=$TRANSACTION_BLOCK_ID" [GET_tx]="tx?id=0" [GET_address]="address?id=0" [GET_node-count]="node-count" [GET_shard]="shard?id=0" [GET_committee]="committee?shard_id=0&epoch=0" )
declare -A POSTDATA
if [ "$NETWORK" == "localnet" ]; then
POSTDATA[hmy_getBlockByHash]="hmy_getBlockByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", true]"
POSTDATA[hmy_getBlockByNumber]="hmy_getBlockByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", true]"
POSTDATA[hmy_getBlockTransactionCountByHash]="hmy_getBlockTransactionCountByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\"]"
POSTDATA[hmy_getBlockTransactionCountByNumber]="hmy_getBlockTransactionCountByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\"]"
POSTDATA[hmy_getCode]="hmy_getCode\",\"params\":[\"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\", \"latest\"]"
POSTDATA[hmy_getTransactionByBlockHashAndIndex]="hmy_getTransactionByBlockHashAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByBlockNumberAndIndex]="hmy_getTransactionByBlockNumberAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByHash]="hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_getTransactionReceipt]="hmy_getTransactionReceipt\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_syncing]="hmy_syncing\",\"params\":[]"
POSTDATA[net_peerCount]="net_peerCount\",\"params\":[]"
POSTDATA[hmy_getBalance]="hmy_getBalance\",\"params\":[\"one18t4yj4fuutj83uwqckkvxp9gfa0568uc48ggj7\", \"latest\"]"
POSTDATA[hmy_getStorageAt]="hmy_getStorageAt\",\"params\":[\"0xD7Ff41CA29306122185A07d04293DdB35F24Cf2d\", \"0\", \"latest\"]"
POSTDATA[hmy_getTransactionCount]="hmy_getTransactionCount\",\"params\":[\"0x806171f95C5a74371a19e8a312c9e5Cb4E1D24f6\", \"latest\"]" # what is this
POSTDATA[hmy_sendRawTransaction]="hmy_sendRawTransaction\",\"params\":[\"$SIGNED_RAW_TRANSACTION\"]"
POSTDATA[hmy_getLogs]="hmy_getLogs\", \"params\":[{\"BlockHash\": \"$TRANSACTION_BLOCK_HASH\"}]"
POSTDATA[hmy_getFilterChanges]="hmy_getFilterChanges\", \"params\":[\"0x58010795a282878ed0d61da72a14b8b0\"]"
POSTDATA[hmy_newPendingTransactionFilter]="hmy_newPendingTransactionFilter\", \"params\":[]"
POSTDATA[hmy_newBlockFilter]="hmy_newBlockFilter\", \"params\":[]"
POSTDATA[hmy_newFilter]="hmy_newFilter\", \"params\":[{\"BlockHash\": \"0x5725b5b2ab28206e7256a78cda4f9050c2629fd85110ffa54eacd2a13ba68072\"}]"
POSTDATA[hmy_call]="hmy_call\", \"params\":[{\"to\": \"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\"}, \"latest\"]"
POSTDATA[hmy_gasPrice]="hmy_gasPrice\",\"params\":[]"
POSTDATA[hmy_blockNumber]="hmy_blockNumber\",\"params\":[]"
POSTDATA[net_version]="net_version\",\"params\":[]"
POSTDATA[hmy_protocolVersion]="hmy_protocolVersion\",\"params\":[]"
fi
if [ "$NETWORK" == "betanet" ]; then
POSTDATA[hmy_getBlockByHash]="hmy_getBlockByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", true]"
POSTDATA[hmy_getBlockByNumber]="hmy_getBlockByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", true]"
POSTDATA[hmy_getBlockTransactionCountByHash]="hmy_getBlockTransactionCountByHash\",\"params\":[\"$TRANSACTION_BLOCK_HASH\"]"
POSTDATA[hmy_getBlockTransactionCountByNumber]="hmy_getBlockTransactionCountByNumber\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\"]"
POSTDATA[hmy_getCode]="hmy_getCode\",\"params\":[\"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\", \"latest\"]"
POSTDATA[hmy_getTransactionByBlockHashAndIndex]="hmy_getTransactionByBlockHashAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_HASH\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByBlockNumberAndIndex]="hmy_getTransactionByBlockNumberAndIndex\",\"params\":[\"$TRANSACTION_BLOCK_NUMBER\", \"$TRANSACTION_INDEX\"]"
POSTDATA[hmy_getTransactionByHash]="hmy_getTransactionByHash\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_getTransactionReceipt]="hmy_getTransactionReceipt\",\"params\":[\"$TRANSACTION_HASH\"]"
POSTDATA[hmy_syncing]="hmy_syncing\",\"params\":[]"
POSTDATA[net_peerCount]="net_peerCount\",\"params\":[]"
POSTDATA[hmy_getBalance]="hmy_getBalance\",\"params\":[\"one18t4yj4fuutj83uwqckkvxp9gfa0568uc48ggj7\", \"latest\"]"
POSTDATA[hmy_getStorageAt]="hmy_getStorageAt\",\"params\":[\"0xD7Ff41CA29306122185A07d04293DdB35F24Cf2d\", \"0\", \"latest\"]"
POSTDATA[hmy_getTransactionCount]="hmy_getTransactionCount\",\"params\":[\"0x806171f95C5a74371a19e8a312c9e5Cb4E1D24f6\", \"latest\"]" # what is this
POSTDATA[hmy_sendRawTransaction]="hmy_sendRawTransaction\",\"params\":[\"$SIGNED_RAW_TRANSACTION\"]"
POSTDATA[hmy_getLogs]="hmy_getLogs\", \"params\":[{\"BlockHash\": \"$TRANSACTION_BLOCK_HASH\"}]"
POSTDATA[hmy_getFilterChanges]="hmy_getFilterChanges\", \"params\":[\"0x58010795a282878ed0d61da72a14b8b0\"]"
POSTDATA[hmy_newPendingTransactionFilter]="hmy_newPendingTransactionFilter\", \"params\":[]"
POSTDATA[hmy_newBlockFilter]="hmy_newBlockFilter\", \"params\":[]"
POSTDATA[hmy_newFilter]="hmy_newFilter\", \"params\":[{\"BlockHash\": \"0x5725b5b2ab28206e7256a78cda4f9050c2629fd85110ffa54eacd2a13ba68072\"}]"
POSTDATA[hmy_call]="hmy_call\", \"params\":[{\"to\": \"0x08AE1abFE01aEA60a47663bCe0794eCCD5763c19\"}, \"latest\"]"
POSTDATA[hmy_gasPrice]="hmy_gasPrice\",\"params\":[]"
POSTDATA[hmy_blockNumber]="hmy_blockNumber\",\"params\":[]"
POSTDATA[net_version]="net_version\",\"params\":[]"
POSTDATA[hmy_protocolVersion]="hmy_protocolVersion\",\"params\":[]"
fi
declare -A RESPONSES
RESPONSES[GET_blocks]=""
RESPONSES[GET_tx]=""
RESPONSES[GET_address]=""
RESPONSES[GET_node-count]=""
RESPONSES[GET_shard]=""
RESPONSES[GET_committee]=""
RESPONSES[hmy_getBlockByHash]=""
RESPONSES[hmy_getBlockByNumber]=""
RESPONSES[hmy_getBlockTransactionCountByHash]=""
RESPONSES[hmy_getBlockTransactionCountByNumber]=""
RESPONSES[hmy_getCode]=""
RESPONSES[hmy_getTransactionByBlockHashAndIndex]=""
RESPONSES[hmy_getTransactionByBlockNumberAndIndex]=""
RESPONSES[hmy_getTransactionByHash]=""
RESPONSES[hmy_getTransactionReceipt]=""
RESPONSES[hmy_syncing]=""
RESPONSES[net_peerCount]=""
RESPONSES[hmy_getBalance]=""
RESPONSES[hmy_getStorageAt]=""
RESPONSES[hmy_getTransactionCount]=""
RESPONSES[hmy_sendRawTransaction]=""
RESPONSES[hmy_getLogs]=""
RESPONSES[hmy_getFilterChanges]=""
RESPONSES[hmy_newPendingTransactionFilter]=""
RESPONSES[hmy_newBlockFilter]=""
RESPONSES[hmy_newFilter]=""
RESPONSES[hmy_call]=""
RESPONSES[hmy_gasPrice]=""
RESPONSES[hmy_blockNumber]=""
RESPONSES[net_version]=""
RESPONSES[hmy_protocolVersion]=""
### Processes GET requests and stores reponses in RESPONSES ###
function GET_requests() {
for K in "${!GETDATA[@]}";
do
RESPONSES[$K]=$(curl -s --location --request GET "${PORT[GET]}${GETDATA[$K]}" \
--header "Content-Type: application/json" \
--data "")
done
}
### Processes POST requests and stores reponses in RESPONSES ###
function POST_requests() {
for K in "${!POSTDATA[@]}";
do
RESPONSES[$K]="$(curl -s --location --request POST "${PORT[POST]}" \
--header "Content-Type: application/json" \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"${POSTDATA[$K]},\"id\":1}")"
done
}
function log_API_responses() {
for K in "${!GETDATA[@]}";
do
echo "${yellow}$K"
echo "${blue}REQUEST:"
echo "${white}${GETDATA[$K]}"
echo "${blue}RESPONSE:" ${white}
echo ${RESPONSES[$K]} #| jq .
echo
echo
done
for K in "${!POSTDATA[@]}";
do
echo "${yellow}$K"
echo "${blue}REQUEST:"
echo "${white}${POSTDATA[$K]}"
echo "${blue}RESPONSE: $white"
echo ${RESPONSES[$K]} #| jq .
echo
echo
done
}
GET_requests
POST_requests
### BASIC QUERY TESTS ###
function Explorer_getBlock_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET blocks(explorer) test:"
response_test ${RESPONSES[GET_blocks]}
if [ "$?" == "1" ]; then
BLOCKBYIDHASH=$(echo ${RESPONSES[GET_blocks]} | jq -r .[0].id)
if [ "$BLOCKBYIDHASH" != "null" ]; then
if [ "$BLOCKBYIDHASH" == "$TRANSACTION_BLOCK_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
fi
echo
}
#Needs updating - wtf does getTx do - no arguments?
function Explorer_getTx_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET tx(explorer) test:"
response_test ${RESPONSES[GET_tx]}
if [ "$?" == "1" ]; then
TX_HASH=$(echo ${RESPONSES[GET_tx]} | jq -r .id) # fix agrs to jq
if [ "$TX_HASH" != "null" ]; then
if [ "$TX_HASH" == "$TX_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
fi
echo
}
function Explorer_getExplorerNodeAdress_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET address(explorer) test:"
response_test ${RESPONSES[GET_address]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function Explorer_getExplorerNode_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET node-count(explorer) test:"
response_test ${RESPONSES[GET_node-count]}
if [ ${RESPONSES[GET_node-count]}="2" ]; then
echo ${green}SANE VALUE, 2 explorer nodes reported $reset
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
else
echo ${red}non 2 explorer nodes reported $reset
fi
echo
}
function Explorer_getShard_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET shard(explorer) test:"
response_test ${RESPONSES[GET_shard]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function Explorer_getCommitte_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "GET committe(explorer) test:"
response_test ${RESPONSES[GET_committee]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
### API POST REQUESTS ###
function API_getBlockByNumber_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockByNumber test:"
response_test ${RESPONSES[hmy_getBlockByNumber]}
BLOCKBYNUMBERHASH=$(echo ${RESPONSES[hmy_getBlockByNumber]} | jq -r '.result.hash')
if [ "$BLOCKBLOCKBYNUMBERHASH" != "null" ]; then
if [ "$BLOCKBYNUMBERHASH" == "$TRANSACTION_BLOCK_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
echo
}
function API_getBlockByHash_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockByHash test:"
response_test ${RESPONSES[hmy_getBlockByHash]}
BLOCKBYHASHHASH=$(echo ${RESPONSES[hmy_getBlockByHash]} | jq -r '.result.hash')
if [ "$BLOCKBYHASHBYHASH" != "null" ]; then
if [ "$BLOCKBYHASHHASH" == "$TRANSACTION_BLOCK_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}BLOCK HASH MATCHES TX${reset}
echo
return
fi
fi
echo ${red}BLOCK HASH DOES NOT MATCH TX OR IS NULL${reset}
echo
}
function API_getBlockTransactionCountByHash_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockTransactionCountByHash test:"
response_test ${RESPONSES[hmy_getBlockTransactionCountByHash]}
TRANSACTIONCOUNTBYHASH=$(echo ${RESPONSES[hmy_getBlockTransactionCountByHash]} | jq -r '.result')
TRANSACTIONCOUNTBYHASH=$(( TRANSACTIONCOUNTBYHASH ))
if [ "$TRANSACTIONCOUNTBYHASH" != "null" ]; then
if [ $TRANSACTIONCOUNTBYHASH -gt 0 ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}NON ZERO TRANSACTION COUNT IN BLOCK${reset}
echo
return
fi
fi
echo ${red}INVALID TRANSACTION COUNT IN BLOCK${reset}
echo
}
function API_getBlockTransactionCountByNumber_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBlockTransactionCountByNumber test:"
response_test ${RESPONSES[hmy_getBlockTransactionCountByNumber]}
TRANSACTIONCOUNTBYNUMBER=$(echo ${RESPONSES[hmy_getBlockTransactionCountByNumber]} | jq -r '.result')
TRANSACTIONCOUNTBYNUMBER=$(( TRANSACTIONCOUNTBYNUMBER ))
if [ "$BLOCKBYHASH" != "null" ]; then
if [ $TRANSACTIONCOUNTBYNUMBER -gt 0 ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}NON ZERO TRANSACTION COUNT IN BLOCK${reset}
echo
return
fi
fi
echo ${red}NON NATURAL TRANSACTION COUNT IN BLOCK${reset}
echo
}
function API_getCode_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getCode test:"
response_test ${RESPONSES[hmy_getCode]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getTransactionByBlockHashAndIndex_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionByBlockHashAndIndex test:"
response_test ${RESPONSES[hmy_getTransactionByBlockHashAndIndex]}
TRANSACTIONHASHBYHASHANDINDEX=$(echo ${RESPONSES[hmy_getTransactionByBlockHashAndIndex]} | jq -r '.result.hash')
if [ "$TRANSACTIONHASHBYHASHANDINDEX" != "null" ]; then
if [ "$TRANSACTIONHASHBYHASHANDINDEX" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION FROM BLOCKHASH AND INDEX MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION FROM BLOCKHASH AND INDEX MATCH${reset}
echo
}
function API_getTransactionByBlockNumberAndIndex_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionByBlockNumberAndIndex test:"
response_test ${RESPONSES[hmy_getTransactionByBlockNumberAndIndex]}
TRANSACTIONHASHBYNUMBERANDINDEX=$(echo ${RESPONSES[hmy_getTransactionByBlockNumberAndIndex]} | jq -r '.result.hash')
if [ "$TRANSACTIONHASHBYNUMBERANDINDEX" != "null" ]; then
if [ "$TRANSACTIONHASHBYNUMBERANDINDEX" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION FROM BLOCKNUMBER AND INDEX MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION FROM BLOCKNUMBER AND INDEX MISMATCH${reset}
echo
}
function API_getTransactionByHash_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionByHash test:"
TX_HASH=$(echo ${RESPONSES[hmy_getTransactionByHash]} | jq -r '.result.hash')
response_test ${RESPONSES[hmy_getTransactionByHash]}
if [ "$TX_HASH" != "null" ]; then
if [ "$TX_HASH" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION HASH MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION HASH MISMATCH${reset}
echo
}
function API_getTransactionReceipt_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionReceipt test:"
TX_HASH=$(echo ${RESPONSES[hmy_getTransactionReceipt]} | jq -r '.result.transactionHash')
response_test ${RESPONSES[hmy_getTransactionReceipt]}
if [ "$TX_HASH" != "null" ]; then
if [ "$TX_HASH" == "$TRANSACTION_HASH" ]; then
TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo ${green}TRANSACTION HASH MATCH${reset}
echo
return
fi
fi
echo ${red} TRANSACTION HASH MISMATCH${reset}
echo
}
function API_syncing_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_syncing test:"
response_test ${RESPONSES[hmy_syncing]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_netPeerCount_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST net_peerCount test:"
response_test ${RESPONSES[net_peerCount]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getBalance_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getBalance test:"
response_test ${RESPONSES[hmy_getBalance]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getStorageAt_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getStorageAt test:"
response_test ${RESPONSES[hmy_getStorageAt]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getTransactionCount_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getTransactionCount test:"
response_test ${RESPONSES[hmy_getTransactionCount]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_sendRawTransaction_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_sendRawTransaction test:"
response_test ${RESPONSES[hmy_sendRawTransaction]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getLogs_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getLogs test:"
response_test ${RESPONSES[hmy_getLogs]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_getFilterChanges_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_getFilterChanges test:"
response_test ${RESPONSES[hmy_getFilterChanges]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_newPendingTransactionFilter_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_sendRawTransaction test:"
response_test ${RESPONSES[hmy_newPendingTransactionFilter]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_newBlockFilter_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_newBlockFilter test:"
response_test ${RESPONSES[hmy_newBlockFilter]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_newFilter_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_newFilter test:"
response_test ${RESPONSES[hmy_newFilter]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_call_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_call test:"
response_test ${RESPONSES[hmy_call]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_gasPrice_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_gasPrice test:"
response_test ${RESPONSES[hmy_gasPrice]}
if [ "$?" == "1" ]; then
RESULT=$(echo ${RESPONSES[hmy_gasPrice]} | jq -r '.result')
isHexTest $RESULT
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
fi
}
function API_blockNumber_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_blockNumber test:"
response_test ${RESPONSES[hmy_blockNumber]}
if [ "$?" == "1" ]; then
RESULT=$(echo ${RESPONSES[hmy_blockNumber]} | jq -r '.result')
isHexTest $RESULT
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
fi
}
function API_net_version_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST net_version test:"
response_test ${RESPONSES[net_version]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function API_protocolVersion_test() {
TESTS_RAN=$(( TESTS_RAN + 1 ))
echo "POST hmy_protocolVersion test:"
response_test ${RESPONSES[hmy_protocolVersion]}
[ "$?" == "1" ] && TESTS_PASSED=$(( TESTS_PASSED + 1 ))
echo
}
function run_tests() {
echo "### TESTING RPC CALLS ###"
echo
### Calls to the individual API method test ###
Explorer_getBlock_test
Explorer_getTx_test
Explorer_getExplorerNodeAdress_test
Explorer_getExplorerNode_test
Explorer_getShard_test
Explorer_getCommitte_test
API_getBlockByNumber_test
API_getBlockByHash_test
API_getBlockTransactionCountByHash_test
API_getBlockTransactionCountByNumber_test
API_getCode_test
API_getTransactionByBlockHashAndIndex_test
API_getTransactionByBlockNumberAndIndex_test
API_getTransactionByHash_test
API_getTransactionReceipt_test
API_syncing_test
API_netPeerCount_test
API_getBalance_test
API_getStorageAt_test
API_getTransactionCount_test
API_sendRawTransaction_test
API_getLogs_test
API_getFilterChanges_test
API_newPendingTransactionFilter_test
API_sendRawTransaction_test
API_newBlockFilter_test
API_newFilter_test
API_call_test
API_gasPrice_test
API_blockNumber_test
API_net_version_test
API_protocolVersion_test
TESTS_FAILED=$(( $TESTS_RAN - $TESTS_PASSED ))
echo -n ${red}
[ $TESTS_FAILED -eq 0 ] && echo -n ${green}
echo "PASSED $TESTS_PASSED/$TESTS_RAN: $TESTS_FAILED TESTS FAILED"${reset}
}
if [ "$VERBOSE" == "TRUE" ]; then
log_API_responses
fi
### BETANET TESTS ###
run_tests
Loading…
Cancel
Save