Merge branch 'master' of github.com:harmony-one/harmony into blockheader_download_request

pull/1530/head
Dennis Won 5 years ago
commit 4535619f6e
  1. 24
      .hmy/wallet.ini
  2. 5
      Makefile
  3. 394
      api/service/explorer/service.go
  4. 8
      api/service/explorer/storage.go
  5. 8
      api/service/explorer/structs.go
  6. 3
      api/service/networkinfo/service.go
  7. 33
      api/service/syncing/syncing.go
  8. 1
      block/factory/factory.go
  9. 24
      cmd/client/wallet/generated_wallet.ini.go
  10. 13
      cmd/client/wallet/main.go
  11. 24
      cmd/client/wallet_stress_test/generated_wallet.ini.go
  12. 15
      cmd/client/wallet_stress_test/main.go
  13. 48
      cmd/harmony/main.go
  14. 4
      consensus/consensus_service.go
  15. 5
      consensus/consensus_v2.go
  16. 2
      consensus/engine/consensus_engine.go
  17. 14
      core/block_validator.go
  18. 37
      core/blockchain.go
  19. 58
      core/cx_pool.go
  20. 73
      core/rawdb/accessors_indexes.go
  21. 10
      core/rawdb/schema.go
  22. 18
      core/resharding.go
  23. 6
      core/state_processor.go
  24. 4
      core/types/block.go
  25. 7
      core/types/bodyv0.go
  26. 16
      core/types/bodyv1.go
  27. 37
      hmy/api_backend.go
  28. 7
      hmy/backend.go
  29. 50
      internal/chain/engine.go
  30. 17
      internal/configs/node/config.go
  31. 2
      internal/configs/sharding/localnet.go
  32. 2
      internal/configs/sharding/mainnet.go
  33. 2
      internal/genesis/localnodes.go
  34. 3
      internal/hmyapi/README.md
  35. 6
      internal/hmyapi/backend.go
  36. 51
      internal/hmyapi/blockchain.go
  37. 8
      internal/hmyapi/error.go
  38. 20
      internal/hmyapi/harmony.go
  39. 11
      internal/hmyapi/transactionpool.go
  40. 94
      internal/hmyapi/types.go
  41. 49
      internal/params/config.go
  42. 26
      internal/utils/configfile.go
  43. 3
      internal/utils/configfile_test.go
  44. 4
      node/contract.go
  45. 41
      node/node.go
  46. 56
      node/node_cross_shard.go
  47. 57
      node/node_explorer.go
  48. 8
      node/node_genesis.go
  49. 12
      node/node_handler.go
  50. 6
      node/node_newblock.go
  51. 148
      node/node_syncing.go
  52. 14
      node/rpc.go
  53. 38
      node/worker/worker.go
  54. 4
      node/worker/worker_test.go
  55. 2
      scripts/docker/run
  56. 68
      scripts/go_executable_build.sh
  57. 253
      scripts/node.sh
  58. 2
      scripts/wallet.sh
  59. 2
      test/chain/main.go
  60. 4
      test/configs/local-resharding.txt
  61. 26
      test/deploy.sh

@ -1,4 +1,5 @@
[main] [main]
chain_id = 1
bootnode = /ip4/100.26.90.187/tcp/9874/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9874/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/54.213.43.194/tcp/9874/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.213.43.194/tcp/9874/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/13.113.101.219/tcp/12019/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX bootnode = /ip4/13.113.101.219/tcp/12019/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
@ -22,6 +23,7 @@ rpc = l3.t.hmny.io:14555
rpc = s3.t.hmny.io:14555 rpc = s3.t.hmny.io:14555
[local] [local]
chain_id = 2
bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv
shards = 2 shards = 2
@ -36,6 +38,7 @@ rpc = 127.0.0.1:14558
rpc = 127.0.0.1:14560 rpc = 127.0.0.1:14560
[beta] [beta]
chain_id = 2
bootnode = /ip4/54.213.43.194/tcp/9868/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.213.43.194/tcp/9868/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/100.26.90.187/tcp/9868/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9868/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/13.113.101.219/tcp/12018/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX bootnode = /ip4/13.113.101.219/tcp/12018/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
@ -50,22 +53,23 @@ rpc = l1.b.hmny.io:14555
rpc = s1.b.hmny.io:14555 rpc = s1.b.hmny.io:14555
[pangaea] [pangaea]
bootnode = /ip4/54.86.126.90/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv chain_id = 3
bootnode = /ip4/52.40.84.2/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.86.126.90/tcp/9889/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/52.40.84.2/tcp/9889/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
shards = 4 shards = 4
[pangaea.shard0.rpc] [pangaea.shard0.rpc]
rpc = l0.n.hmny.io:14555 rpc = l0.p.hmny.io:14555
rpc = s0.n.hmny.io:14555 rpc = s0.p.hmny.io:14555
[pangaea.shard1.rpc] [pangaea.shard1.rpc]
rpc = l1.n.hmny.io:14555 rpc = l1.p.hmny.io:14555
rpc = s1.n.hmny.io:14555 rpc = s1.p.hmny.io:14555
[pangaea.shard2.rpc] [pangaea.shard2.rpc]
rpc = l2.n.hmny.io:14555 rpc = l2.p.hmny.io:14555
rpc = s2.n.hmny.io:14555 rpc = s2.p.hmny.io:14555
[pangaea.shard3.rpc] [pangaea.shard3.rpc]
rpc = l3.n.hmny.io:14555 rpc = l3.p.hmny.io:14555
rpc = s3.n.hmny.io:14555 rpc = s3.p.hmny.io:14555

@ -6,7 +6,7 @@ export LIBRARY_PATH:=$(LD_LIBRARY_PATH)
export DYLD_FALLBACK_LIBRARY_PATH:=$(LD_LIBRARY_PATH) export DYLD_FALLBACK_LIBRARY_PATH:=$(LD_LIBRARY_PATH)
export GO111MODULE:=on export GO111MODULE:=on
.PHONY: all libs exe .PHONY: all libs exe test
all: libs all: libs
./scripts/go_executable_build.sh ./scripts/go_executable_build.sh
@ -17,3 +17,6 @@ libs:
exe: exe:
./scripts/go_executable_build.sh ./scripts/go_executable_build.sh
test:
./test/debug.sh

@ -50,7 +50,7 @@ type Service struct {
Port string Port string
GetNodeIDs func() []libp2p_peer.ID GetNodeIDs func() []libp2p_peer.ID
ShardID uint32 ShardID uint32
storage *Storage Storage *Storage
server *http.Server server *http.Server
messageChan chan *msg_pb.Message messageChan chan *msg_pb.Message
GetAccountBalance func(common.Address) (*big.Int, error) GetAccountBalance func(common.Address) (*big.Int, error)
@ -67,6 +67,16 @@ func New(selfPeer *p2p.Peer, shardID uint32, GetNodeIDs func() []libp2p_peer.ID,
} }
} }
// ServiceAPI is rpc api.
type ServiceAPI struct {
Service *Service
}
// NewServiceAPI returns explorer service api.
func NewServiceAPI(explorerService *Service) *ServiceAPI {
return &ServiceAPI{Service: explorerService}
}
// StartService starts explorer service. // StartService starts explorer service.
func (s *Service) StartService() { func (s *Service) StartService() {
utils.Logger().Info().Msg("Starting explorer service.") utils.Logger().Info().Msg("Starting explorer service.")
@ -95,7 +105,7 @@ func GetExplorerPort(nodePort string) string {
// Init is to initialize for ExplorerService. // Init is to initialize for ExplorerService.
func (s *Service) Init(remove bool) { func (s *Service) Init(remove bool) {
s.storage = GetStorageInstance(s.IP, s.Port, remove) s.Storage = GetStorageInstance(s.IP, s.Port, remove)
} }
// Run is to run serving explorer. // Run is to run serving explorer.
@ -150,7 +160,7 @@ func (s *Service) ReadBlocksFromDB(from, to int) []*types.Block {
continue continue
} }
key := GetBlockKey(i) key := GetBlockKey(i)
data, err := storage.db.Get([]byte(key)) data, err := s.Storage.db.Get([]byte(key))
if err != nil { if err != nil {
blocks = append(blocks, nil) blocks = append(blocks, nil)
continue continue
@ -172,6 +182,12 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
to := r.FormValue("to") to := r.FormValue("to")
pageParam := r.FormValue("page") pageParam := r.FormValue("page")
offsetParam := r.FormValue("offset") offsetParam := r.FormValue("offset")
withSignersParam := r.FormValue("with_signers")
withSigners := false
if withSignersParam == "true" {
withSigners = true
}
data := &Data{ data := &Data{
Blocks: []*Block{}, Blocks: []*Block{},
} }
@ -186,7 +202,7 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
return return
} }
db := s.storage.GetDB() db := s.Storage.GetDB()
fromInt, err := strconv.Atoi(from) fromInt, err := strconv.Atoi(from)
if err != nil { if err != nil {
utils.Logger().Warn().Err(err).Msg("invalid from parameter") utils.Logger().Warn().Err(err).Msg("invalid from parameter")
@ -236,19 +252,21 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
accountBlocks := s.ReadBlocksFromDB(fromInt, toInt) accountBlocks := s.ReadBlocksFromDB(fromInt, toInt)
curEpoch := int64(-1) curEpoch := int64(-1)
committee := &shard.Committee{} committee := &shard.Committee{}
if withSigners {
if bytes, err := db.Get([]byte(GetCommitteeKey(uint32(s.ShardID), 0))); err == nil {
if err = rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee for new epoch")
}
}
}
for id, accountBlock := range accountBlocks { for id, accountBlock := range accountBlocks {
if id == 0 || id == len(accountBlocks)-1 || accountBlock == nil { if id == 0 || id == len(accountBlocks)-1 || accountBlock == nil {
continue continue
} }
block := NewBlock(accountBlock, id+fromInt-1) block := NewBlock(accountBlock, id+fromInt-1)
if int64(block.Epoch) > curEpoch { if withSigners && int64(block.Epoch) > curEpoch {
if bytes, err := db.Get([]byte(GetCommitteeKey(uint32(s.ShardID), block.Epoch))); err == nil { if accountBlocks[id-1] != nil {
committee = &shard.Committee{} state, err := accountBlocks[id-1].Header().GetShardState()
if err = rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee for new epoch")
}
} else {
state, err := accountBlock.Header().GetShardState()
if err == nil { if err == nil {
for _, shardCommittee := range state { for _, shardCommittee := range state {
if shardCommittee.ShardID == accountBlock.ShardID() { if shardCommittee.ShardID == accountBlock.ShardID() {
@ -256,28 +274,32 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
break break
} }
} }
} else {
utils.Logger().Warn().Err(err).Msg("error parsing shard state")
} }
} }
curEpoch = int64(block.Epoch) curEpoch = int64(block.Epoch)
} }
pubkeys := make([]*bls.PublicKey, len(committee.NodeList)) if withSigners {
for i, validator := range committee.NodeList { pubkeys := make([]*bls.PublicKey, len(committee.NodeList))
pubkeys[i] = new(bls.PublicKey) for i, validator := range committee.NodeList {
validator.BlsPublicKey.ToLibBLSPublicKey(pubkeys[i]) pubkeys[i] = new(bls.PublicKey)
} validator.BlsPublicKey.ToLibBLSPublicKey(pubkeys[i])
mask, err := bls2.NewMask(pubkeys, nil) }
if err == nil && accountBlocks[id+1] != nil { mask, err := bls2.NewMask(pubkeys, nil)
err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap()) if err == nil && accountBlocks[id+1] != nil {
if err == nil { err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap())
for _, validator := range committee.NodeList { if err == nil {
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress) for _, validator := range committee.NodeList {
if err != nil { oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)
continue if err != nil {
} continue
blsPublicKey := new(bls.PublicKey) }
validator.BlsPublicKey.ToLibBLSPublicKey(blsPublicKey) blsPublicKey := new(bls.PublicKey)
if ok, err := mask.KeyEnabled(blsPublicKey); err == nil && ok { validator.BlsPublicKey.ToLibBLSPublicKey(blsPublicKey)
block.Signers = append(block.Signers, oneAddress) if ok, err := mask.KeyEnabled(blsPublicKey); err == nil && ok {
block.Signers = append(block.Signers, oneAddress)
}
} }
} }
} }
@ -315,12 +337,122 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
} }
data.Blocks = append(data.Blocks, block) data.Blocks = append(data.Blocks, block)
} }
if offset*page+offset > len(data.Blocks) {
data.Blocks = data.Blocks[offset*page:]
} else {
data.Blocks = data.Blocks[offset*page : offset*page+offset]
}
}
paginatedBlocks := make([]*Block, 0) // GetExplorerBlocks rpc end-point.
for i := 0; i < offset && i+offset*page < len(data.Blocks); i++ { func (s *ServiceAPI) GetExplorerBlocks(ctx context.Context, from, to, page, offset int, withSigners bool) ([]*Block, error) {
paginatedBlocks = append(paginatedBlocks, data.Blocks[i+offset*page]) if offset == 0 {
offset = paginationOffset
}
db := s.Service.Storage.GetDB()
if to == 0 {
bytes, err := db.Get([]byte(BlockHeightKey))
if err == nil {
to, err = strconv.Atoi(string(bytes))
if err != nil {
utils.Logger().Info().Msg("failed to fetch block height")
return nil, err
}
}
} }
data.Blocks = paginatedBlocks blocks := make([]*Block, 0)
accountBlocks := s.Service.ReadBlocksFromDB(from, to)
curEpoch := int64(-1)
committee := &shard.Committee{}
if withSigners {
if bytes, err := db.Get([]byte(GetCommitteeKey(uint32(s.Service.ShardID), 0))); err == nil {
if err = rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee for new epoch")
}
}
}
for id, accountBlock := range accountBlocks {
if id == 0 || id == len(accountBlocks)-1 || accountBlock == nil {
continue
}
block := NewBlock(accountBlock, id+from-1)
if withSigners && int64(block.Epoch) > curEpoch {
if accountBlocks[id-1] != nil {
state, err := accountBlocks[id-1].Header().GetShardState()
if err == nil {
for _, shardCommittee := range state {
if shardCommittee.ShardID == accountBlock.ShardID() {
committee = &shardCommittee
break
}
}
}
}
curEpoch = int64(block.Epoch)
}
if withSigners {
pubkeys := make([]*bls.PublicKey, len(committee.NodeList))
for i, validator := range committee.NodeList {
pubkeys[i] = new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(pubkeys[i])
}
mask, err := bls2.NewMask(pubkeys, nil)
if err == nil && accountBlocks[id+1] != nil {
err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap())
if err == nil {
for _, validator := range committee.NodeList {
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)
if err != nil {
continue
}
blsPublicKey := new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(blsPublicKey)
if ok, err := mask.KeyEnabled(blsPublicKey); err == nil && ok {
block.Signers = append(block.Signers, oneAddress)
}
}
}
}
}
// Populate transactions
for _, tx := range accountBlock.Transactions() {
transaction := GetTransaction(tx, accountBlock)
if transaction != nil {
block.TXs = append(block.TXs, transaction)
}
}
if accountBlocks[id-1] == nil {
block.BlockTime = int64(0)
block.PrevBlock = RefBlock{
ID: "",
Height: "",
}
} else {
block.BlockTime = accountBlock.Time().Int64() - accountBlocks[id-1].Time().Int64()
block.PrevBlock = RefBlock{
ID: accountBlocks[id-1].Hash().Hex(),
Height: strconv.Itoa(id + from - 2),
}
}
if accountBlocks[id+1] == nil {
block.NextBlock = RefBlock{
ID: "",
Height: "",
}
} else {
block.NextBlock = RefBlock{
ID: accountBlocks[id+1].Hash().Hex(),
Height: strconv.Itoa(id + from),
}
}
blocks = append(blocks, block)
}
if offset*page+offset > len(blocks) {
blocks = blocks[offset*page:]
} else {
blocks = blocks[offset*page : offset*page+offset]
}
return blocks, nil
} }
// GetExplorerTransaction servers /tx end-point. // GetExplorerTransaction servers /tx end-point.
@ -339,7 +471,7 @@ func (s *Service) GetExplorerTransaction(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
return return
} }
db := s.storage.GetDB() db := s.Storage.GetDB()
bytes, err := db.Get([]byte(GetTXKey(id))) bytes, err := db.Get([]byte(GetTXKey(id)))
if err != nil { if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read TX") utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read TX")
@ -355,6 +487,26 @@ func (s *Service) GetExplorerTransaction(w http.ResponseWriter, r *http.Request)
data.TX = *tx data.TX = *tx
} }
// GetExplorerTransaction rpc end-point.
func (s *ServiceAPI) GetExplorerTransaction(ctx context.Context, id string) (*Transaction, error) {
if id == "" {
utils.Logger().Warn().Msg("invalid id parameter")
return nil, nil
}
db := s.Service.Storage.GetDB()
bytes, err := db.Get([]byte(GetTXKey(id)))
if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read TX")
return nil, err
}
tx := new(Transaction)
if rlp.DecodeBytes(bytes, tx) != nil {
utils.Logger().Warn().Str("id", id).Msg("cannot convert data from DB")
return nil, err
}
return tx, nil
}
// GetExplorerCommittee servers /comittee end-point. // GetExplorerCommittee servers /comittee end-point.
func (s *Service) GetExplorerCommittee(w http.ResponseWriter, r *http.Request) { func (s *Service) GetExplorerCommittee(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
@ -385,7 +537,7 @@ func (s *Service) GetExplorerCommittee(w http.ResponseWriter, r *http.Request) {
return return
} }
// fetch current epoch if epoch is 0 // fetch current epoch if epoch is 0
db := s.storage.GetDB() db := s.Storage.GetDB()
if epoch == 0 { if epoch == 0 {
bytes, err := db.Get([]byte(BlockHeightKey)) bytes, err := db.Get([]byte(BlockHeightKey))
blockHeight, err := strconv.Atoi(string(bytes)) blockHeight, err := strconv.Atoi(string(bytes))
@ -435,6 +587,56 @@ func (s *Service) GetExplorerCommittee(w http.ResponseWriter, r *http.Request) {
} }
} }
// GetExplorerCommittee rpc end-point.
func (s *ServiceAPI) GetExplorerCommittee(ctx context.Context, shardID uint32, epoch uint64) (*Committee, error) {
if s.Service.ShardID != uint32(shardID) {
utils.Logger().Warn().Msg("incorrect shard id")
return nil, nil
}
// fetch current epoch if epoch is 0
db := s.Service.Storage.GetDB()
if epoch == 0 {
bytes, err := db.Get([]byte(BlockHeightKey))
blockHeight, err := strconv.Atoi(string(bytes))
if err != nil {
utils.Logger().Warn().Err(err).Msg("cannot decode block height from DB")
return nil, err
}
key := GetBlockKey(blockHeight)
data, err := db.Get([]byte(key))
block := new(types.Block)
if rlp.DecodeBytes(data, block) != nil {
utils.Logger().Warn().Err(err).Msg("cannot get block from db")
return nil, err
}
epoch = block.Epoch().Uint64()
}
bytes, err := db.Get([]byte(GetCommitteeKey(uint32(shardID), epoch)))
if err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee")
return nil, err
}
committee := &shard.Committee{}
if err := rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot decode committee data from DB")
return nil, err
}
validators := &Committee{}
for _, validator := range committee.NodeList {
validatorBalance := big.NewInt(0)
validatorBalance, err := s.Service.GetAccountBalance(validator.EcdsaAddress)
if err != nil {
continue
}
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)
if err != nil {
continue
}
validators.Validators = append(validators.Validators, &Validator{Address: oneAddress, Balance: validatorBalance})
}
return validators, nil
}
// GetExplorerAddress serves /address end-point. // GetExplorerAddress serves /address end-point.
func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) { func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
@ -493,24 +695,20 @@ func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
balance, err := s.GetAccountBalance(address) balance, err := s.GetAccountBalance(address)
if err == nil { if err == nil {
balanceAddr = balance balanceAddr = balance
data.Address.Balance = balance
} }
} }
db := s.storage.GetDB() db := s.Storage.GetDB()
bytes, err := db.Get([]byte(key)) bytes, err := db.Get([]byte(key))
if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read address from db")
return
}
if err = rlp.DecodeBytes(bytes, &data.Address); err != nil { if err = rlp.DecodeBytes(bytes, &data.Address); err != nil {
utils.Logger().Warn().Str("id", id).Msg("cannot convert data from DB") utils.Logger().Warn().Str("id", id).Msg("cannot convert data from DB")
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
return return
} }
if balanceAddr.Cmp(big.NewInt(0)) != 0 {
data.Address.Balance = balanceAddr data.Address.Balance = balanceAddr
}
switch txView { switch txView {
case txViewNone: case txViewNone:
data.Address.TXs = nil data.Address.TXs = nil
@ -531,11 +729,80 @@ func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
} }
data.Address.TXs = sentTXs data.Address.TXs = sentTXs
} }
paginatedTXs := make([]*Transaction, 0) if offset*page+offset > len(data.Address.TXs) {
for i := 0; i < offset && i+offset*page < len(data.Address.TXs); i++ { data.Address.TXs = data.Address.TXs[offset*page:]
paginatedTXs = append(paginatedTXs, data.Address.TXs[i+offset*page]) } else {
data.Address.TXs = data.Address.TXs[offset*page : offset*page+offset]
}
}
// GetExplorerAddress rpc end-point.
func (s *ServiceAPI) GetExplorerAddress(ctx context.Context, id, txView string, page, offset int) (*Address, error) {
if offset == 0 {
offset = paginationOffset
}
if txView == "" {
txView = txViewNone
}
utils.Logger().Info().Str("Address", id).Msg("Querying address")
if id == "" {
utils.Logger().Warn().Msg("missing address id param")
return nil, nil
}
address := &Address{}
address.ID = id
// Try to populate the banace by directly calling get balance.
// Check the balance from blockchain rather than local DB dump
balanceAddr := big.NewInt(0)
if s.Service.GetAccountBalance != nil {
addr := common2.ParseAddr(id)
balance, err := s.Service.GetAccountBalance(addr)
if err == nil {
balanceAddr = balance
address.Balance = balance
}
}
key := GetAddressKey(id)
db := s.Service.Storage.GetDB()
bytes, err := db.Get([]byte(key))
if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read address from db")
return address, nil
}
if err = rlp.DecodeBytes(bytes, &address); err != nil {
utils.Logger().Warn().Str("id", id).Msg("cannot convert data from DB")
return nil, err
}
address.Balance = balanceAddr
switch txView {
case txViewNone:
address.TXs = nil
case Received:
receivedTXs := make([]*Transaction, 0)
for _, tx := range address.TXs {
if tx.Type == Received {
receivedTXs = append(receivedTXs, tx)
}
}
address.TXs = receivedTXs
case Sent:
sentTXs := make([]*Transaction, 0)
for _, tx := range address.TXs {
if tx.Type == Sent {
sentTXs = append(sentTXs, tx)
}
}
address.TXs = sentTXs
}
if offset*page+offset > len(address.TXs) {
address.TXs = address.TXs[offset*page:]
} else {
address.TXs = address.TXs[offset*page : offset*page+offset]
} }
data.Address.TXs = paginatedTXs return address, nil
} }
// GetExplorerNodeCount serves /nodes end-point. // GetExplorerNodeCount serves /nodes end-point.
@ -547,7 +814,12 @@ func (s *Service) GetExplorerNodeCount(w http.ResponseWriter, r *http.Request) {
} }
} }
// GetExplorerShard serves /shard end-point // GetExplorerNodeCount rpc end-point.
func (s *ServiceAPI) GetExplorerNodeCount(ctx context.Context) int {
return len(s.Service.GetNodeIDs())
}
// GetExplorerShard serves /shard end-point.
func (s *Service) GetExplorerShard(w http.ResponseWriter, r *http.Request) { func (s *Service) GetExplorerShard(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
var nodes []Node var nodes []Node
@ -562,7 +834,18 @@ func (s *Service) GetExplorerShard(w http.ResponseWriter, r *http.Request) {
} }
} }
// NotifyService notify service // GetExplorerShard rpc end-point.
func (s *ServiceAPI) GetExplorerShard(ctx context.Context) *Shard {
var nodes []Node
for _, nodeID := range s.Service.GetNodeIDs() {
nodes = append(nodes, Node{
ID: libp2p_peer.IDB58Encode(nodeID),
})
}
return &Shard{Nodes: nodes}
}
// NotifyService notify service.
func (s *Service) NotifyService(params map[string]interface{}) { func (s *Service) NotifyService(params map[string]interface{}) {
return return
} }
@ -574,5 +857,12 @@ func (s *Service) SetMessageChan(messageChan chan *msg_pb.Message) {
// APIs for the services. // APIs for the services.
func (s *Service) APIs() []rpc.API { func (s *Service) APIs() []rpc.API {
return nil return []rpc.API{
{
Namespace: "explorer",
Version: "1.0",
Service: NewServiceAPI(s),
Public: true,
},
}
} }

@ -155,6 +155,7 @@ func (storage *Storage) UpdateTXStorage(batch ethdb.Batch, explorerTransaction *
} }
// UpdateAddress ... // UpdateAddress ...
// TODO: deprecate this logic
func (storage *Storage) UpdateAddress(batch ethdb.Batch, explorerTransaction *Transaction, tx *types.Transaction) { func (storage *Storage) UpdateAddress(batch ethdb.Batch, explorerTransaction *Transaction, tx *types.Transaction) {
explorerTransaction.Type = Received explorerTransaction.Type = Received
storage.UpdateAddressStorage(batch, explorerTransaction.To, explorerTransaction, tx) storage.UpdateAddressStorage(batch, explorerTransaction.To, explorerTransaction, tx)
@ -163,6 +164,7 @@ func (storage *Storage) UpdateAddress(batch ethdb.Batch, explorerTransaction *Tr
} }
// UpdateAddressStorage updates specific addr Address. // UpdateAddressStorage updates specific addr Address.
// TODO: deprecate this logic
func (storage *Storage) UpdateAddressStorage(batch ethdb.Batch, addr string, explorerTransaction *Transaction, tx *types.Transaction) { func (storage *Storage) UpdateAddressStorage(batch ethdb.Batch, addr string, explorerTransaction *Transaction, tx *types.Transaction) {
key := GetAddressKey(addr) key := GetAddressKey(addr)
@ -170,7 +172,11 @@ func (storage *Storage) UpdateAddressStorage(batch ethdb.Batch, addr string, exp
if data, err := storage.db.Get([]byte(key)); err == nil { if data, err := storage.db.Get([]byte(key)); err == nil {
err = rlp.DecodeBytes(data, &address) err = rlp.DecodeBytes(data, &address)
if err == nil { if err == nil {
address.Balance.Add(address.Balance, tx.Value()) if explorerTransaction.Type == Received {
address.Balance.Add(address.Balance, tx.Value())
} else {
address.Balance.Sub(address.Balance, tx.Value())
}
} else { } else {
utils.Logger().Error().Err(err).Msg("Failed to error") utils.Logger().Error().Err(err).Msg("Failed to error")
} }

@ -57,6 +57,9 @@ type Transaction struct {
Value *big.Int `json:"value"` Value *big.Int `json:"value"`
Bytes string `json:"bytes"` Bytes string `json:"bytes"`
Data string `json:"data"` Data string `json:"data"`
GasFee *big.Int `json:"gasFee"`
FromShard uint32 `json:"fromShard"`
ToShard uint32 `json:"toShard"`
Type string `json:"type"` Type string `json:"type"`
} }
@ -118,6 +121,8 @@ func GetTransaction(tx *types.Transaction, addressBlock *types.Block) *Transacti
if err != nil { if err != nil {
utils.Logger().Error().Err(err).Msg("Error when parsing tx into message") utils.Logger().Error().Err(err).Msg("Error when parsing tx into message")
} }
gasFee := big.NewInt(0)
gasFee = gasFee.Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas()))
return &Transaction{ return &Transaction{
ID: tx.Hash().Hex(), ID: tx.Hash().Hex(),
Timestamp: strconv.Itoa(int(addressBlock.Time().Int64() * 1000)), Timestamp: strconv.Itoa(int(addressBlock.Time().Int64() * 1000)),
@ -126,6 +131,9 @@ func GetTransaction(tx *types.Transaction, addressBlock *types.Block) *Transacti
Value: msg.Value(), Value: msg.Value(),
Bytes: strconv.Itoa(int(tx.Size())), Bytes: strconv.Itoa(int(tx.Size())),
Data: hex.EncodeToString(tx.Data()), Data: hex.EncodeToString(tx.Data()),
GasFee: gasFee,
FromShard: tx.ShardID(),
ToShard: tx.ToShardID(),
Type: "", Type: "",
} }
} }

@ -11,6 +11,7 @@ import (
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
msg_pb "github.com/harmony-one/harmony/api/proto/message" msg_pb "github.com/harmony-one/harmony/api/proto/message"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
badger "github.com/ipfs/go-ds-badger" badger "github.com/ipfs/go-ds-badger"
@ -60,7 +61,7 @@ const (
func New(h p2p.Host, rendezvous p2p.GroupID, peerChan chan p2p.Peer, bootnodes utils.AddrList) *Service { func New(h p2p.Host, rendezvous p2p.GroupID, peerChan chan p2p.Peer, bootnodes utils.AddrList) *Service {
var cancel context.CancelFunc var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), connectionTimeout) ctx, cancel = context.WithTimeout(context.Background(), connectionTimeout)
dataStore, err := badger.NewDatastore(fmt.Sprintf(".dht-%s-%s", h.GetSelfPeer().IP, h.GetSelfPeer().Port), nil) dataStore, err := badger.NewDatastore(fmt.Sprintf("%s/.dht-%s-%s", nodeconfig.GetTempDir(), h.GetSelfPeer().IP, h.GetSelfPeer().Port), nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }

@ -32,6 +32,7 @@ const (
inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus
BatchSize uint32 = 1000 //maximum size for one query of block hashes BatchSize uint32 = 1000 //maximum size for one query of block hashes
SyncLoopFrequency = 1 // unit in second SyncLoopFrequency = 1 // unit in second
LastMileBlocksSize = 10
) )
// SyncPeerConfig is peer config to sync. // SyncPeerConfig is peer config to sync.
@ -105,6 +106,7 @@ type StateSync struct {
syncConfig *SyncConfig syncConfig *SyncConfig
stateSyncTaskQueue *queue.Queue stateSyncTaskQueue *queue.Queue
syncMux sync.Mutex syncMux sync.Mutex
lastMileMux sync.Mutex
} }
func (ss *StateSync) purgeAllBlocksFromCache() { func (ss *StateSync) purgeAllBlocksFromCache() {
@ -130,9 +132,13 @@ func (ss *StateSync) purgeOldBlocksFromCache() {
} }
// AddLastMileBlock add the lastest a few block into queue for syncing // AddLastMileBlock add the lastest a few block into queue for syncing
// only keep the latest blocks with size capped by LastMileBlocksSize
func (ss *StateSync) AddLastMileBlock(block *types.Block) { func (ss *StateSync) AddLastMileBlock(block *types.Block) {
ss.syncMux.Lock() ss.lastMileMux.Lock()
defer ss.syncMux.Unlock() defer ss.lastMileMux.Unlock()
if len(ss.lastMileBlocks) >= LastMileBlocksSize {
ss.lastMileBlocks = ss.lastMileBlocks[1:]
}
ss.lastMileBlocks = append(ss.lastMileBlocks, block) ss.lastMileBlocks = append(ss.lastMileBlocks, block)
} }
@ -340,8 +346,8 @@ func (ss *StateSync) GetConsensusHashes(startHash []byte, size uint32) bool {
response := peerConfig.client.GetBlockHashes(startHash, size, ss.selfip, ss.selfport) response := peerConfig.client.GetBlockHashes(startHash, size, ss.selfip, ss.selfport)
if response == nil { if response == nil {
utils.Logger().Warn(). utils.Logger().Warn().
Str("peer IP", peerConfig.ip). Str("peerIP", peerConfig.ip).
Str("peer Port", peerConfig.port). Str("peerPort", peerConfig.port).
Msg("[SYNC] GetConsensusHashes Nil Response") Msg("[SYNC] GetConsensusHashes Nil Response")
return return
} }
@ -540,9 +546,15 @@ func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChai
// Verify block signatures // Verify block signatures
// TODO chao: only when block is verified against last commit sigs, we can update the block and status // TODO chao: only when block is verified against last commit sigs, we can update the block and status
if block.NumberU64() > 1 { if block.NumberU64() > 1 {
err := bc.Engine().VerifyHeader(bc, block.Header(), true) // Verify signature every 100 blocks
verifySig := block.NumberU64()%100 == 0
err := bc.Engine().VerifyHeader(bc, block.Header(), verifySig)
if err != nil { if err != nil {
utils.Logger().Error().Err(err).Msgf("[SYNC] failed verifying signatures for new block %d", block.NumberU64()) utils.Logger().Error().Err(err).Msgf("[SYNC] failed verifying signatures for new block %d", block.NumberU64())
utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back last 99 blocks!")
for i := 0; i < 99; i++ {
bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()})
}
return false return false
} }
} }
@ -555,11 +567,6 @@ func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChai
bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()}) bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()})
return false return false
} }
ss.syncMux.Lock()
if err := worker.UpdateCurrent(block.Header().Coinbase()); err != nil {
utils.Logger().Warn().Err(err).Msg("[SYNC] (*Worker).UpdateCurrent failed")
}
ss.syncMux.Unlock()
utils.Logger().Info(). utils.Logger().Info().
Uint64("blockHeight", bc.CurrentBlock().NumberU64()). Uint64("blockHeight", bc.CurrentBlock().NumberU64()).
Str("blockHex", bc.CurrentBlock().Hash().Hex()). Str("blockHex", bc.CurrentBlock().Hash().Hex()).
@ -696,10 +703,10 @@ func (ss *StateSync) getMaxPeerHeight(isBeacon bool) uint64 {
go func() { go func() {
defer wg.Done() defer wg.Done()
//debug //debug
// utils.Logger().Debug().Bool("isBeacon", isBeacon).Str("IP", peerConfig.ip).Str("Port", peerConfig.port).Msg("[Sync]getMaxPeerHeight") // utils.Logger().Debug().Bool("isBeacon", isBeacon).Str("peerIP", peerConfig.ip).Str("peerPort", peerConfig.port).Msg("[Sync]getMaxPeerHeight")
response, err := peerConfig.client.GetBlockChainHeight() response, err := peerConfig.client.GetBlockChainHeight()
if err != nil { if err != nil {
utils.Logger().Warn().Err(err).Str("IP", peerConfig.ip).Str("Port", peerConfig.port).Msg("[Sync]GetBlockChainHeight failed") utils.Logger().Warn().Err(err).Str("peerIP", peerConfig.ip).Str("peerPort", peerConfig.port).Msg("[Sync]GetBlockChainHeight failed")
return return
} }
ss.syncMux.Lock() ss.syncMux.Lock()
@ -734,7 +741,7 @@ func (ss *StateSync) IsOutOfSync(bc *core.BlockChain) bool {
} }
// SyncLoop will keep syncing with peers until catches up // SyncLoop will keep syncing with peers until catches up
func (ss *StateSync) SyncLoop(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool, isBeacon bool) { func (ss *StateSync) SyncLoop(bc *core.BlockChain, worker *worker.Worker, isBeacon bool) {
if !isBeacon { if !isBeacon {
ss.RegisterNodeInfo() ss.RegisterNodeInfo()
} }

@ -45,6 +45,7 @@ var (
ForTest = NewFactory(params.TestChainConfig) ForTest = NewFactory(params.TestChainConfig)
ForTestnet = NewFactory(params.TestnetChainConfig) ForTestnet = NewFactory(params.TestnetChainConfig)
ForMainnet = NewFactory(params.MainnetChainConfig) ForMainnet = NewFactory(params.MainnetChainConfig)
ForPangaea = NewFactory(params.PangaeaChainConfig)
) )
// NewTestHeader creates a new, empty header object for epoch 0 using the test // NewTestHeader creates a new, empty header object for epoch 0 using the test

@ -2,6 +2,7 @@ package main
const ( const (
defaultWalletIni = `[main] defaultWalletIni = `[main]
chain_id = 1
bootnode = /ip4/100.26.90.187/tcp/9874/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9874/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/54.213.43.194/tcp/9874/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.213.43.194/tcp/9874/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/13.113.101.219/tcp/12019/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX bootnode = /ip4/13.113.101.219/tcp/12019/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
@ -25,6 +26,7 @@ rpc = l3.t.hmny.io:14555
rpc = s3.t.hmny.io:14555 rpc = s3.t.hmny.io:14555
[local] [local]
chain_id = 2
bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv
shards = 2 shards = 2
@ -39,6 +41,7 @@ rpc = 127.0.0.1:14558
rpc = 127.0.0.1:14560 rpc = 127.0.0.1:14560
[beta] [beta]
chain_id = 2
bootnode = /ip4/54.213.43.194/tcp/9868/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.213.43.194/tcp/9868/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/100.26.90.187/tcp/9868/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9868/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/13.113.101.219/tcp/12018/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX bootnode = /ip4/13.113.101.219/tcp/12018/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
@ -53,24 +56,25 @@ rpc = l1.b.hmny.io:14555
rpc = s1.b.hmny.io:14555 rpc = s1.b.hmny.io:14555
[pangaea] [pangaea]
bootnode = /ip4/54.86.126.90/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv chain_id = 3
bootnode = /ip4/52.40.84.2/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.86.126.90/tcp/9889/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/52.40.84.2/tcp/9889/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
shards = 4 shards = 4
[pangaea.shard0.rpc] [pangaea.shard0.rpc]
rpc = l0.n.hmny.io:14555 rpc = l0.p.hmny.io:14555
rpc = s0.n.hmny.io:14555 rpc = s0.p.hmny.io:14555
[pangaea.shard1.rpc] [pangaea.shard1.rpc]
rpc = l1.n.hmny.io:14555 rpc = l1.p.hmny.io:14555
rpc = s1.n.hmny.io:14555 rpc = s1.p.hmny.io:14555
[pangaea.shard2.rpc] [pangaea.shard2.rpc]
rpc = l2.n.hmny.io:14555 rpc = l2.p.hmny.io:14555
rpc = s2.n.hmny.io:14555 rpc = s2.p.hmny.io:14555
[pangaea.shard3.rpc] [pangaea.shard3.rpc]
rpc = l3.n.hmny.io:14555 rpc = l3.p.hmny.io:14555
rpc = s3.n.hmny.io:14555 rpc = s3.p.hmny.io:14555
` `
) )

@ -15,8 +15,9 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
color "github.com/fatih/color" "github.com/fatih/color"
ffi_bls "github.com/harmony-one/bls/ffi/go/bls" ffi_bls "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/accounts" "github.com/harmony-one/harmony/accounts"
"github.com/harmony-one/harmony/accounts/keystore" "github.com/harmony-one/harmony/accounts/keystore"
"github.com/harmony-one/harmony/api/client" "github.com/harmony-one/harmony/api/client"
@ -29,7 +30,6 @@ import (
common2 "github.com/harmony-one/harmony/internal/common" common2 "github.com/harmony-one/harmony/internal/common"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/internal/shardchain" "github.com/harmony-one/harmony/internal/shardchain"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node" "github.com/harmony-one/harmony/node"
@ -767,11 +767,12 @@ func processTransferCommand() {
fmt.Printf("Unlock account succeeded! '%v'\n", senderPass) fmt.Printf("Unlock account succeeded! '%v'\n", senderPass)
chainConfig := params.MainnetChainConfig chainID, ok := new(big.Int).SetString(walletProfile.ChainID, 0)
if walletProfile.Profile != defaultProfile { if !ok {
chainConfig = params.TestnetChainConfig fmt.Printf("invalid chain ID %#v in config", walletProfile.ChainID)
return
} }
tx, err = ks.SignTx(account, tx, chainConfig.ChainID) tx, err = ks.SignTx(account, tx, chainID)
if err != nil { if err != nil {
fmt.Printf("SignTx Error: %v\n", err) fmt.Printf("SignTx Error: %v\n", err)
return return

@ -2,6 +2,7 @@ package main
const ( const (
defaultWalletIni = `[main] defaultWalletIni = `[main]
chain_id = 1
bootnode = /ip4/100.26.90.187/tcp/9874/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9874/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/54.213.43.194/tcp/9874/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.213.43.194/tcp/9874/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/13.113.101.219/tcp/12019/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX bootnode = /ip4/13.113.101.219/tcp/12019/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
@ -25,6 +26,7 @@ rpc = l3.t.hmny.io:14555
rpc = s3.t.hmny.io:14555 rpc = s3.t.hmny.io:14555
[local] [local]
chain_id = 2
bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv
shards = 2 shards = 2
@ -39,6 +41,7 @@ rpc = 127.0.0.1:14558
rpc = 127.0.0.1:14560 rpc = 127.0.0.1:14560
[beta] [beta]
chain_id = 2
bootnode = /ip4/54.213.43.194/tcp/9868/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.213.43.194/tcp/9868/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/100.26.90.187/tcp/9868/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9868/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/13.113.101.219/tcp/12018/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX bootnode = /ip4/13.113.101.219/tcp/12018/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
@ -53,24 +56,25 @@ rpc = l1.b.hmny.io:14555
rpc = s1.b.hmny.io:14555 rpc = s1.b.hmny.io:14555
[pangaea] [pangaea]
bootnode = /ip4/54.86.126.90/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv chain_id = 3
bootnode = /ip4/52.40.84.2/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/54.86.126.90/tcp/9889/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/52.40.84.2/tcp/9889/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
shards = 4 shards = 4
[pangaea.shard0.rpc] [pangaea.shard0.rpc]
rpc = l0.n.hmny.io:14555 rpc = l0.p.hmny.io:14555
rpc = s0.n.hmny.io:14555 rpc = s0.p.hmny.io:14555
[pangaea.shard1.rpc] [pangaea.shard1.rpc]
rpc = l1.n.hmny.io:14555 rpc = l1.p.hmny.io:14555
rpc = s1.n.hmny.io:14555 rpc = s1.p.hmny.io:14555
[pangaea.shard2.rpc] [pangaea.shard2.rpc]
rpc = l2.n.hmny.io:14555 rpc = l2.p.hmny.io:14555
rpc = s2.n.hmny.io:14555 rpc = s2.p.hmny.io:14555
[pangaea.shard3.rpc] [pangaea.shard3.rpc]
rpc = l3.n.hmny.io:14555 rpc = l3.p.hmny.io:14555
rpc = s3.n.hmny.io:14555 rpc = s3.p.hmny.io:14555
` `
) )

@ -11,10 +11,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/harmony-one/harmony/internal/params"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/accounts" "github.com/harmony-one/harmony/accounts"
"github.com/harmony-one/harmony/accounts/keystore" "github.com/harmony-one/harmony/accounts/keystore"
"github.com/harmony-one/harmony/api/client" "github.com/harmony-one/harmony/api/client"
@ -216,6 +215,12 @@ func processStressTestCommand() {
*/ */
chainID, ok := new(big.Int).SetString(walletProfile.ChainID, 0)
if !ok {
fmt.Printf("invalid chain ID %#v in config", walletProfile.ChainID)
return
}
fmt.Println("Creating wallet node") fmt.Println("Creating wallet node")
walletNode := createWalletNode() walletNode := createWalletNode()
@ -288,11 +293,7 @@ func processStressTestCommand() {
ks.Unlock(account, senderPass) ks.Unlock(account, senderPass)
chainConfig := params.MainnetChainConfig tx, _ = ks.SignTx(account, tx, chainID)
if walletProfile.Profile != defaultProfile {
chainConfig = params.TestnetChainConfig
}
tx, _ = ks.SignTx(account, tx, chainConfig.ChainID)
if err := submitTransaction(tx, walletNode, uint32(shardID)); err != nil { if err := submitTransaction(tx, walletNode, uint32(shardID)); err != nil {
fmt.Println(ctxerror.New("submitTransaction failed", fmt.Println(ctxerror.New("submitTransaction failed",

@ -42,7 +42,7 @@ var (
commit string commit string
) )
// InitLDBDatabase initializes a LDBDatabase. isGenesis=true will return the beacon chain database for normal shard nodes // InitLDBDatabase initializes a LDBDatabase. will return the beacon chain database for normal shard nodes
func InitLDBDatabase(ip string, port string, freshDB bool, isBeacon bool) (*ethdb.LDBDatabase, error) { func InitLDBDatabase(ip string, port string, freshDB bool, isBeacon bool) (*ethdb.LDBDatabase, error) {
var dbFileName string var dbFileName string
if isBeacon { if isBeacon {
@ -80,14 +80,12 @@ var (
minPeers = flag.Int("min_peers", 32, "Minimal number of Peers in shard") minPeers = flag.Int("min_peers", 32, "Minimal number of Peers in shard")
// Key file to store the private key // Key file to store the private key
keyFile = flag.String("key", "./.hmykey", "the p2p key file of the harmony node") keyFile = flag.String("key", "./.hmykey", "the p2p key file of the harmony node")
// isGenesis indicates this node is a genesis node
isGenesis = flag.Bool("is_genesis", true, "true means this node is a genesis node")
// isArchival indicates this node is an archival node that will save and archive current blockchain // isArchival indicates this node is an archival node that will save and archive current blockchain
isArchival = flag.Bool("is_archival", true, "false makes node faster by turning caching off") isArchival = flag.Bool("is_archival", true, "false makes node faster by turning caching off")
// delayCommit is the commit-delay timer, used by Harmony nodes // delayCommit is the commit-delay timer, used by Harmony nodes
delayCommit = flag.String("delay_commit", "0ms", "how long to delay sending commit messages in consensus, ex: 500ms, 1s") delayCommit = flag.String("delay_commit", "0ms", "how long to delay sending commit messages in consensus, ex: 500ms, 1s")
// isExplorer indicates this node is a node to serve explorer // nodeType indicates the type of the node: validator, explorer
isExplorer = flag.Bool("is_explorer", false, "true means this node is a node to serve explorer") nodeType = flag.String("node_type", "validator", "node type: validator, explorer")
// networkType indicates the type of the network // networkType indicates the type of the network
networkType = flag.String("network_type", "mainnet", "type of the network: mainnet, testnet, devnet, localnet") networkType = flag.String("network_type", "mainnet", "type of the network: mainnet, testnet, devnet, localnet")
// blockPeriod indicates the how long the leader waits to propose a new block. // blockPeriod indicates the how long the leader waits to propose a new block.
@ -126,6 +124,8 @@ var (
metricsFlag = flag.Bool("metrics", false, "Collect and upload node metrics") metricsFlag = flag.Bool("metrics", false, "Collect and upload node metrics")
pushgatewayIP = flag.String("pushgateway_ip", "grafana.harmony.one", "Metrics view ip") pushgatewayIP = flag.String("pushgateway_ip", "grafana.harmony.one", "Metrics view ip")
pushgatewayPort = flag.String("pushgateway_port", "9091", "Metrics view port") pushgatewayPort = flag.String("pushgateway_port", "9091", "Metrics view port")
publicRPC = flag.Bool("public_rpc", false, "Enable Public RPC Access (default: false)")
) )
func initSetup() { func initSetup() {
@ -170,9 +170,11 @@ func initSetup() {
func passphraseForBls() { func passphraseForBls() {
// If FN node running, they should either specify blsPrivateKey or the file with passphrase // If FN node running, they should either specify blsPrivateKey or the file with passphrase
if *isExplorer { // However, explorer or non-validator nodes need no blskey
if *nodeType != "validator" {
return return
} }
if *blsKeyFile == "" || *blsPass == "" { if *blsKeyFile == "" || *blsPass == "" {
fmt.Println("Internal nodes need to have pass to decrypt blskey") fmt.Println("Internal nodes need to have pass to decrypt blskey")
os.Exit(101) os.Exit(101)
@ -233,7 +235,7 @@ func createGlobalConfig() *nodeconfig.ConfigType {
var err error var err error
nodeConfig := nodeconfig.GetShardConfig(initialAccount.ShardID) nodeConfig := nodeconfig.GetShardConfig(initialAccount.ShardID)
if !*isExplorer { if *nodeType == "validator" {
// Set up consensus keys. // Set up consensus keys.
setupConsensusKey(nodeConfig) setupConsensusKey(nodeConfig)
} else { } else {
@ -336,17 +338,17 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
currentNode.NodeConfig.SetBeaconGroupID(p2p.NewGroupIDByShardID(0)) currentNode.NodeConfig.SetBeaconGroupID(p2p.NewGroupIDByShardID(0))
if *isExplorer { switch *nodeType {
case "explorer":
currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode) currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode)
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(*shardID))) currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(*shardID)))
currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(*shardID))) currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(*shardID)))
} else { case "validator":
currentNode.NodeConfig.SetRole(nodeconfig.Validator)
if nodeConfig.ShardID == 0 { if nodeConfig.ShardID == 0 {
currentNode.NodeConfig.SetRole(nodeconfig.Validator)
currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDBeacon) currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDBeacon)
currentNode.NodeConfig.SetClientGroupID(p2p.GroupIDBeaconClient) currentNode.NodeConfig.SetClientGroupID(p2p.GroupIDBeaconClient)
} else { } else {
currentNode.NodeConfig.SetRole(nodeconfig.Validator)
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID))) currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID))) currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
} }
@ -365,8 +367,8 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
// currentNode.DRand = dRand // currentNode.DRand = dRand
// This needs to be executed after consensus and drand are setup // This needs to be executed after consensus and drand are setup
if err := currentNode.GetInitShardState(); err != nil { if err := currentNode.CalculateInitShardState(); err != nil {
ctxerror.Crit(utils.GetLogger(), err, "GetInitShardState failed", ctxerror.Crit(utils.GetLogger(), err, "CalculateInitShardState failed",
"shardID", *shardID) "shardID", *shardID)
} }
@ -397,6 +399,16 @@ func main() {
flag.Var(&utils.BootNodes, "bootnodes", "a list of bootnode multiaddress (delimited by ,)") flag.Var(&utils.BootNodes, "bootnodes", "a list of bootnode multiaddress (delimited by ,)")
flag.Parse() flag.Parse()
switch *nodeType {
case "validator":
case "explorer":
break
default:
fmt.Fprintf(os.Stderr, "Unknown node type: %s\n", *nodeType)
os.Exit(1)
}
nodeconfig.SetPublicRPC(*publicRPC)
nodeconfig.SetVersion(fmt.Sprintf("Harmony (C) 2019. %v, version %v-%v (%v %v)", path.Base(os.Args[0]), version, commit, builtBy, builtAt)) nodeconfig.SetVersion(fmt.Sprintf("Harmony (C) 2019. %v, version %v-%v (%v %v)", path.Base(os.Args[0]), version, commit, builtBy, builtAt))
if *versionFlag { if *versionFlag {
printVersion() printVersion()
@ -433,7 +445,7 @@ func main() {
memprofiling.MaybeCallGCPeriodically() memprofiling.MaybeCallGCPeriodically()
} }
if !*isExplorer { if *nodeType == "validator" {
setupInitialAccount() setupInitialAccount()
} }
@ -448,13 +460,13 @@ func main() {
nodeConfig := createGlobalConfig() nodeConfig := createGlobalConfig()
currentNode := setupConsensusAndNode(nodeConfig) currentNode := setupConsensusAndNode(nodeConfig)
if nodeConfig.ShardID != 0 { if nodeConfig.ShardID != 0 && currentNode.NodeConfig.Role() != nodeconfig.ExplorerNode {
utils.GetLogInstance().Info("SupportBeaconSyncing", "shardID", currentNode.Blockchain().ShardID(), "shardID1", nodeConfig.ShardID) utils.GetLogInstance().Info("SupportBeaconSyncing", "shardID", currentNode.Blockchain().ShardID(), "shardID", nodeConfig.ShardID)
go currentNode.SupportBeaconSyncing() go currentNode.SupportBeaconSyncing()
} }
startMsg := "==== New Harmony Node ====" startMsg := "==== New Harmony Node ===="
if *isExplorer { if *nodeType == "explorer" {
startMsg = "==== New Explorer Node ====" startMsg = "==== New Explorer Node ===="
} }
@ -474,11 +486,11 @@ func main() {
go currentNode.SupportSyncing() go currentNode.SupportSyncing()
currentNode.ServiceManagerSetup() currentNode.ServiceManagerSetup()
currentNode.RunServices()
// RPC for SDK not supported for mainnet. // RPC for SDK not supported for mainnet.
if err := currentNode.StartRPC(*port); err != nil { if err := currentNode.StartRPC(*port); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "StartRPC failed") ctxerror.Warn(utils.GetLogger(), err, "StartRPC failed")
} }
currentNode.RunServices()
// Run additional node collectors // Run additional node collectors
// Collect node metrics if metrics flag is set // Collect node metrics if metrics flag is set

@ -505,7 +505,7 @@ func (consensus *Consensus) UpdateConsensusInformation() Mode {
header := consensus.ChainReader.CurrentHeader() header := consensus.ChainReader.CurrentHeader()
epoch := header.Epoch() epoch := header.Epoch()
curPubKeys := core.GetPublicKeys(epoch, header.ShardID()) curPubKeys := core.CalculatePublicKeys(epoch, header.ShardID())
consensus.numPrevPubKeys = len(curPubKeys) consensus.numPrevPubKeys = len(curPubKeys)
consensus.getLogger().Info().Msg("[UpdateConsensusInformation] Updating.....") consensus.getLogger().Info().Msg("[UpdateConsensusInformation] Updating.....")
@ -515,7 +515,7 @@ func (consensus *Consensus) UpdateConsensusInformation() Mode {
consensus.SetEpochNum(epoch.Uint64() + 1) consensus.SetEpochNum(epoch.Uint64() + 1)
consensus.getLogger().Info().Uint64("headerNum", header.Number().Uint64()).Msg("[UpdateConsensusInformation] Epoch updated for next epoch") consensus.getLogger().Info().Uint64("headerNum", header.Number().Uint64()).Msg("[UpdateConsensusInformation] Epoch updated for next epoch")
nextEpoch := new(big.Int).Add(epoch, common.Big1) nextEpoch := new(big.Int).Add(epoch, common.Big1)
pubKeys = core.GetPublicKeys(nextEpoch, header.ShardID()) pubKeys = core.CalculatePublicKeys(nextEpoch, header.ShardID())
} else { } else {
consensus.SetEpochNum(epoch.Uint64()) consensus.SetEpochNum(epoch.Uint64())
pubKeys = curPubKeys pubKeys = curPubKeys

@ -107,6 +107,7 @@ func (consensus *Consensus) announce(block *types.Block) {
// save announce message to PbftLog // save announce message to PbftLog
msgPayload, _ := proto.GetConsensusMessagePayload(msgToSend) msgPayload, _ := proto.GetConsensusMessagePayload(msgToSend)
// TODO(chao): don't unmarshall the message here and direclty pass the original object.
msg := &msg_pb.Message{} msg := &msg_pb.Message{}
_ = protobuf.Unmarshal(msgPayload, msg) _ = protobuf.Unmarshal(msgPayload, msg)
pbftMsg, err := ParsePbftMessage(msg) pbftMsg, err := ParsePbftMessage(msg)
@ -115,6 +116,7 @@ func (consensus *Consensus) announce(block *types.Block) {
return return
} }
// TODO(chao): review pbft log data structure
consensus.PbftLog.AddMessage(pbftMsg) consensus.PbftLog.AddMessage(pbftMsg)
utils.Logger().Debug(). utils.Logger().Debug().
Str("MsgBlockHash", pbftMsg.BlockHash.Hex()). Str("MsgBlockHash", pbftMsg.BlockHash.Hex()).
@ -183,6 +185,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
} }
// verify validity of block header object // verify validity of block header object
// TODO: think about just sending the block hash instead of the header.
encodedHeader := recvMsg.Payload encodedHeader := recvMsg.Payload
header := new(block.Header) header := new(block.Header)
err = rlp.DecodeBytes(encodedHeader, header) err = rlp.DecodeBytes(encodedHeader, header)
@ -384,6 +387,7 @@ func (consensus *Consensus) onPrepare(msg *msg_pb.Message) {
consensus.aggregatedPrepareSig = aggSig consensus.aggregatedPrepareSig = aggSig
//leader adds prepared message to log //leader adds prepared message to log
// TODO(chao): don't unmarshall the payload again
msgPayload, _ := proto.GetConsensusMessagePayload(msgToSend) msgPayload, _ := proto.GetConsensusMessagePayload(msgToSend)
msg := &msg_pb.Message{} msg := &msg_pb.Message{}
_ = protobuf.Unmarshal(msgPayload, msg) _ = protobuf.Unmarshal(msgPayload, msg)
@ -678,6 +682,7 @@ func (consensus *Consensus) onCommit(msg *msg_pb.Message) {
return return
} }
// has to be called before verifying signature
quorumWasMet := len(commitSigs) >= consensus.Quorum() quorumWasMet := len(commitSigs) >= consensus.Quorum()
// Verify the signature on commitPayload is correct // Verify the signature on commitPayload is correct

@ -56,7 +56,7 @@ type Engine interface {
// is used for verifying "incoming" block header against commit signature and bitmap sent from the other chain cross-shard via libp2p. // is used for verifying "incoming" block header against commit signature and bitmap sent from the other chain cross-shard via libp2p.
// i.e. this header verification api is more flexible since the caller specifies which commit signature and bitmap to use // i.e. this header verification api is more flexible since the caller specifies which commit signature and bitmap to use
// for verifying the block header, which is necessary for cross-shard block header verification. Example of such is cross-shard transaction. // for verifying the block header, which is necessary for cross-shard block header verification. Example of such is cross-shard transaction.
VerifyHeaderWithSignature(header *block.Header, commitSig []byte, commitBitmap []byte) error VerifyHeaderWithSignature(chain ChainReader, header *block.Header, commitSig []byte, commitBitmap []byte) error
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and // concurrently. The method returns a quit channel to abort the operations and

@ -98,9 +98,11 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash(), receiptSha) return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash(), receiptSha)
} }
cxsSha := types.DeriveMultipleShardsSha(cxReceipts) if v.config.IsCrossTx(block.Epoch()) {
if cxsSha != header.OutgoingReceiptHash() { cxsSha := types.DeriveMultipleShardsSha(cxReceipts)
return fmt.Errorf("invalid cross shard receipt root hash (remote: %x local: %x)", header.OutgoingReceiptHash(), cxsSha) if cxsSha != header.OutgoingReceiptHash() {
return fmt.Errorf("invalid cross shard receipt root hash (remote: %x local: %x)", header.OutgoingReceiptHash(), cxsSha)
}
} }
// Validate the state root against the received state root and throw // Validate the state root against the received state root and throw
@ -171,6 +173,10 @@ func CalcGasLimit(parent *types.Block, gasFloor, gasCeil uint64) uint64 {
// ValidateCXReceiptsProof checks whether the given CXReceiptsProof is consistency with itself // ValidateCXReceiptsProof checks whether the given CXReceiptsProof is consistency with itself
func (v *BlockValidator) ValidateCXReceiptsProof(cxp *types.CXReceiptsProof) error { func (v *BlockValidator) ValidateCXReceiptsProof(cxp *types.CXReceiptsProof) error {
if !v.config.IsCrossTx(cxp.Header.Epoch()) {
return ctxerror.New("[ValidateCXReceiptsProof] cross shard receipt received before cx fork")
}
toShardID, err := cxp.GetToShardID() toShardID, err := cxp.GetToShardID()
if err != nil { if err != nil {
return ctxerror.New("[ValidateCXReceiptsProof] invalid shardID").WithCause(err) return ctxerror.New("[ValidateCXReceiptsProof] invalid shardID").WithCause(err)
@ -219,5 +225,5 @@ func (v *BlockValidator) ValidateCXReceiptsProof(cxp *types.CXReceiptsProof) err
} }
// (4) verify blockHeader with seal // (4) verify blockHeader with seal
return v.engine.VerifyHeaderWithSignature(cxp.Header, cxp.CommitSig, cxp.CommitBitmap) return v.engine.VerifyHeaderWithSignature(v.bc, cxp.Header, cxp.CommitSig, cxp.CommitBitmap)
} }

@ -1076,22 +1076,23 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
epoch := block.Header().Epoch() epoch := block.Header().Epoch()
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch) if bc.chainConfig.IsCrossTx(block.Epoch()) {
shardNum := int(shardingConfig.NumShards()) shardingConfig := ShardingSchedule.InstanceForEpoch(epoch)
for i := 0; i < shardNum; i++ { shardNum := int(shardingConfig.NumShards())
if i == int(block.ShardID()) { for i := 0; i < shardNum; i++ {
continue if i == int(block.ShardID()) {
} continue
shardReceipts := GetToShardReceipts(cxReceipts, uint32(i)) }
err := rawdb.WriteCXReceipts(batch, uint32(i), block.NumberU64(), block.Hash(), shardReceipts, false) shardReceipts := GetToShardReceipts(cxReceipts, uint32(i))
if err != nil { err := rawdb.WriteCXReceipts(batch, uint32(i), block.NumberU64(), block.Hash(), shardReceipts, false)
utils.Logger().Debug().Err(err).Interface("shardReceipts", shardReceipts).Int("toShardID", i).Msg("WriteCXReceipts cannot write into database") if err != nil {
utils.Logger().Debug().Err(err).Interface("shardReceipts", shardReceipts).Int("toShardID", i).Msg("WriteCXReceipts cannot write into database")
}
} }
// Mark incomingReceipts in the block as spent
bc.WriteCXReceiptsProofSpent(block.IncomingReceipts())
} }
// Mark incomingReceipts in the block as spent
bc.WriteCXReceiptsProofSpent(block.IncomingReceipts())
// If the total difficulty is higher than our known, add it to the canonical chain // If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining. // Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@ -1108,6 +1109,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
rawdb.WriteTxLookupEntries(batch, block) rawdb.WriteTxLookupEntries(batch, block)
rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages()) rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
// write the positional metadata for CXReceipts lookups
rawdb.WriteCxLookupEntries(batch, block)
status = CanonStatTy status = CanonStatTy
} else { } else {
status = SideStatTy status = SideStatTy
@ -2237,3 +2241,10 @@ func (bc *BlockChain) UpdateCXReceiptsCheckpointsByBlock(block *types.Block) {
bc.updateCXReceiptsCheckpoints(k, v) bc.updateCXReceiptsCheckpoints(k, v)
} }
} }
// ReadTxLookupEntry returns where the given transaction resides in the chain,
// as a (block hash, block number, index in transaction list) triple.
// returns 0, 0 if not found
func (bc *BlockChain) ReadTxLookupEntry(txID common.Hash) (common.Hash, uint64, uint64) {
return rawdb.ReadTxLookupEntry(bc.db, txID)
}

@ -0,0 +1,58 @@
package core
import (
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
)
const (
// CxPoolSize is the maximum size of the pool
CxPoolSize = 50
)
// CxEntry represents the egress receipt's blockHash and ToShardID
type CxEntry struct {
BlockHash common.Hash
ToShardID uint32
}
// CxPool is to hold a pool of block outgoing receipts to be resend in next round broadcast
// When a user/client doesn't find the destination shard get the money from cross shard tx
// it can send RPC call along with txID to allow the any validator to
// add the corresponding block's receipts to be resent
type CxPool struct {
pool mapset.Set
maxSize int
}
// NewCxPool creates a new CxPool
func NewCxPool(limit int) *CxPool {
pool := mapset.NewSet()
cxPool := CxPool{pool: pool, maxSize: limit}
return &cxPool
}
// Pool returns the pool of blockHashes of missing receipts
func (cxPool *CxPool) Pool() mapset.Set {
return cxPool.pool
}
// Size return size of the pool
func (cxPool *CxPool) Size() int {
return cxPool.pool.Cardinality()
}
// Add add element into the pool if not exceed limit
func (cxPool *CxPool) Add(entry CxEntry) bool {
if cxPool.Size() > cxPool.maxSize {
return false
}
cxPool.pool.Add(entry)
return true
}
// Clear empty the pool
func (cxPool *CxPool) Clear() {
cxPool.pool.Clear()
}

@ -123,3 +123,76 @@ func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Has
utils.Logger().Error().Err(err).Msg("Failed to store bloom bits") utils.Logger().Error().Err(err).Msg("Failed to store bloom bits")
} }
} }
// ReadCxLookupEntry retrieves the positional metadata associated with a transaction hash
// to allow retrieving cross shard receipt by hash in destination shard
// not the original transaction in source shard
// return nil if not found
func ReadCxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
data, _ := db.Get(cxLookupKey(hash))
if len(data) == 0 {
return common.Hash{}, 0, 0
}
var entry TxLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil {
utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid transaction lookup entry RLP")
return common.Hash{}, 0, 0
}
return entry.BlockHash, entry.BlockIndex, entry.Index
}
// WriteCxLookupEntries stores a positional metadata for every transaction from
// a block, enabling hash based transaction and receipt lookups.
func WriteCxLookupEntries(db DatabaseWriter, block *types.Block) {
previousSum := 0
for _, cxp := range block.IncomingReceipts() {
for j, cx := range cxp.Receipts {
entry := TxLookupEntry{
BlockHash: block.Hash(),
BlockIndex: block.NumberU64(),
Index: uint64(j + previousSum),
}
data, err := rlp.EncodeToBytes(entry)
if err != nil {
utils.Logger().Error().Err(err).Msg("Failed to encode transaction lookup entry")
}
if err := db.Put(cxLookupKey(cx.TxHash), data); err != nil {
utils.Logger().Error().Err(err).Msg("Failed to store transaction lookup entry")
}
}
previousSum += len(cxp.Receipts)
}
}
// DeleteCxLookupEntry removes all transaction data associated with a hash.
func DeleteCxLookupEntry(db DatabaseDeleter, hash common.Hash) {
db.Delete(cxLookupKey(hash))
}
// ReadCXReceipt retrieves a specific transaction from the database, along with
// its added positional metadata.
func ReadCXReceipt(db DatabaseReader, hash common.Hash) (*types.CXReceipt, common.Hash, uint64, uint64) {
blockHash, blockNumber, cxIndex := ReadCxLookupEntry(db, hash)
if blockHash == (common.Hash{}) {
return nil, common.Hash{}, 0, 0
}
body := ReadBody(db, blockHash, blockNumber)
if body == nil {
utils.Logger().Error().
Uint64("number", blockNumber).
Str("hash", blockHash.Hex()).
Uint64("index", cxIndex).
Msg("block Body referenced missing")
return nil, common.Hash{}, 0, 0
}
cx := body.CXReceiptAt(int(cxIndex))
if cx == nil {
utils.Logger().Error().
Uint64("number", blockNumber).
Str("hash", blockHash.Hex()).
Uint64("index", cxIndex).
Msg("CXReceipt referenced missing")
return nil, common.Hash{}, 0, 0
}
return cx, blockHash, blockNumber, cxIndex
}

@ -51,8 +51,9 @@ var (
blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits cxLookupPrefix = []byte("cx") // cxLookupPrefix + hash -> cxReceipt lookup metadata
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
shardStatePrefix = []byte("ss") // shardStatePrefix + num (uint64 big endian) + hash -> shardState shardStatePrefix = []byte("ss") // shardStatePrefix + num (uint64 big endian) + hash -> shardState
lastCommitsKey = []byte("LastCommits") lastCommitsKey = []byte("LastCommits")
@ -137,6 +138,11 @@ func txLookupKey(hash common.Hash) []byte {
return append(txLookupPrefix, hash.Bytes()...) return append(txLookupPrefix, hash.Bytes()...)
} }
// cxLookupKey = cxLookupPrefix + hash
func cxLookupKey(hash common.Hash) []byte {
return append(cxLookupPrefix, hash.Bytes()...)
}
// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash // bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...) key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)

@ -163,7 +163,7 @@ func CalculateNewShardState(
stakeInfo *map[common.Address]*structs.StakeInfo, stakeInfo *map[common.Address]*structs.StakeInfo,
) (shard.State, error) { ) (shard.State, error) {
if epoch.Cmp(big.NewInt(GenesisEpoch)) == 0 { if epoch.Cmp(big.NewInt(GenesisEpoch)) == 0 {
return GetInitShardState(), nil return CalculateInitShardState(), nil
} }
prevEpoch := new(big.Int).Sub(epoch, common.Big1) prevEpoch := new(big.Int).Sub(epoch, common.Big1)
ss, err := GetShardingStateFromBlockChain(bc, prevEpoch) ss, err := GetShardingStateFromBlockChain(bc, prevEpoch)
@ -215,15 +215,15 @@ func (ss *ShardingState) UpdateShardingState(stakeInfo *map[common.Address]*stru
// Depends on the type of the network. Defaults to the mainnet schedule. // Depends on the type of the network. Defaults to the mainnet schedule.
var ShardingSchedule shardingconfig.Schedule = shardingconfig.MainnetSchedule var ShardingSchedule shardingconfig.Schedule = shardingconfig.MainnetSchedule
// GetInitShardState returns the initial shard state at genesis. // CalculateInitShardState returns the initial shard state at genesis.
func GetInitShardState() shard.State { func CalculateInitShardState() shard.State {
return GetShardState(big.NewInt(GenesisEpoch)) return CalculateShardState(big.NewInt(GenesisEpoch))
} }
// GetShardState returns the shard state based on epoch number // CalculateShardState returns the shard state based on epoch number
// This api for getting shard state is what should be used to get shard state regardless of // This api for getting shard state is what should be used to get shard state regardless of
// current chain dependency (ex. getting shard state from block header received during cross-shard transaction) // current chain dependency (ex. getting shard state from block header received during cross-shard transaction)
func GetShardState(epoch *big.Int) shard.State { func CalculateShardState(epoch *big.Int) shard.State {
utils.Logger().Info().Int64("epoch", epoch.Int64()).Msg("Get Shard State of Epoch.") utils.Logger().Info().Int64("epoch", epoch.Int64()).Msg("Get Shard State of Epoch.")
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch) shardingConfig := ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards()) shardNum := int(shardingConfig.NumShards())
@ -271,9 +271,9 @@ func GetShardState(epoch *big.Int) shard.State {
return shardState return shardState
} }
// GetPublicKeys returns the publickeys given epoch and shardID // CalculatePublicKeys returns the publickeys given epoch and shardID
func GetPublicKeys(epoch *big.Int, shardID uint32) []*bls.PublicKey { func CalculatePublicKeys(epoch *big.Int, shardID uint32) []*bls.PublicKey {
shardState := GetShardState(epoch) shardState := CalculateShardState(epoch)
// Update validator public keys // Update validator public keys
committee := shardState.FindCommitteeByID(shardID) committee := shardState.FindCommitteeByID(shardID)

@ -103,8 +103,8 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.DB, cfg vm.C
} }
// return true if it is valid // return true if it is valid
func getTransactionType(header *block.Header, tx *types.Transaction) types.TransactionType { func getTransactionType(config *params.ChainConfig, header *block.Header, tx *types.Transaction) types.TransactionType {
if tx.ShardID() == tx.ToShardID() && header.ShardID() == tx.ShardID() { if header.ShardID() == tx.ShardID() && (!config.IsCrossTx(header.Epoch()) || tx.ShardID() == tx.ToShardID()) {
return types.SameShardTx return types.SameShardTx
} }
numShards := ShardingSchedule.InstanceForEpoch(header.Epoch()).NumShards() numShards := ShardingSchedule.InstanceForEpoch(header.Epoch()).NumShards()
@ -120,7 +120,7 @@ func getTransactionType(header *block.Header, tx *types.Transaction) types.Trans
// for the transaction, gas used and an error if the transaction failed, // for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid. // indicating the block was invalid.
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.DB, header *block.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, *types.CXReceipt, uint64, error) { func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.DB, header *block.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, *types.CXReceipt, uint64, error) {
txType := getTransactionType(header, tx) txType := getTransactionType(config, header, tx)
if txType == types.InvalidTx { if txType == types.InvalidTx {
return nil, nil, 0, fmt.Errorf("Invalid Transaction Type") return nil, nil, 0, fmt.Errorf("Invalid Transaction Type")
} }

@ -86,6 +86,10 @@ type BodyInterface interface {
// It returns nil if index is out of bounds. // It returns nil if index is out of bounds.
TransactionAt(index int) *Transaction TransactionAt(index int) *Transaction
// CXReceiptAt returns the CXReceipt given index (calculated from IncomingReceipts)
// It returns nil if index is out of bounds
CXReceiptAt(index int) *CXReceipt
// SetTransactions sets the list of transactions with a deep copy of the // SetTransactions sets the list of transactions with a deep copy of the
// given list. // given list.
SetTransactions(newTransactions []*Transaction) SetTransactions(newTransactions []*Transaction)

@ -39,6 +39,13 @@ func (b *BodyV0) TransactionAt(index int) *Transaction {
return b.f.Transactions[index].Copy() return b.f.Transactions[index].Copy()
} }
// CXReceiptAt returns the CXReceipt at given index in this block
// It returns nil if index is out of bounds
// V0 will just return nil because we don't support CXReceipt
func (b *BodyV0) CXReceiptAt(index int) *CXReceipt {
return nil
}
// SetTransactions sets the list of transactions with a deep copy of the given // SetTransactions sets the list of transactions with a deep copy of the given
// list. // list.
func (b *BodyV0) SetTransactions(newTransactions []*Transaction) { func (b *BodyV0) SetTransactions(newTransactions []*Transaction) {

@ -39,6 +39,22 @@ func (b *BodyV1) TransactionAt(index int) *Transaction {
return b.f.Transactions[index].Copy() return b.f.Transactions[index].Copy()
} }
// CXReceiptAt returns the CXReceipt at given index in this block
// It returns nil if index is out of bounds
func (b *BodyV1) CXReceiptAt(index int) *CXReceipt {
if index < 0 {
return nil
}
for _, cxp := range b.f.IncomingReceipts {
cxs := cxp.Receipts
if index < len(cxs) {
return cxs[index].Copy()
}
index -= len(cxs)
}
return nil
}
// SetTransactions sets the list of transactions with a deep copy of the given // SetTransactions sets the list of transactions with a deep copy of the given
// list. // list.
func (b *BodyV1) SetTransactions(newTransactions []*Transaction) { func (b *BodyV1) SetTransactions(newTransactions []*Transaction) {

@ -20,6 +20,7 @@ import (
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/params" "github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/shard"
) )
// APIBackend An implementation of internal/hmyapi/Backend. Full client. // APIBackend An implementation of internal/hmyapi/Backend. Full client.
@ -232,3 +233,39 @@ func (b *APIBackend) RPCGasCap() *big.Int {
func (b *APIBackend) GetShardID() uint32 { func (b *APIBackend) GetShardID() uint32 {
return b.hmy.shardID return b.hmy.shardID
} }
// GetCommittee returns committee for a particular epoch.
func (b *APIBackend) GetCommittee(epoch *big.Int) (*shard.Committee, error) {
state, err := b.hmy.BlockChain().ReadShardState(epoch)
if err != nil {
return nil, err
}
for _, committee := range state {
if committee.ShardID == b.GetShardID() {
return &committee, nil
}
}
return nil, nil
}
// ResendCx retrieve blockHash from txID and add blockHash to CxPool for resending
func (b *APIBackend) ResendCx(ctx context.Context, txID common.Hash) (uint64, bool) {
blockHash, blockNum, index := b.hmy.BlockChain().ReadTxLookupEntry(txID)
blk := b.hmy.BlockChain().GetBlockByHash(blockHash)
if blk == nil {
return 0, false
}
txs := blk.Transactions()
// a valid index is from 0 to len-1
if int(index) > len(txs)-1 {
return 0, false
}
tx := txs[int(index)]
// check whether it is a valid cross shard tx
if tx.ShardID() == tx.ToShardID() || blk.Header().ShardID() != tx.ShardID() {
return 0, false
}
entry := core.CxEntry{blockHash, tx.ToShardID()}
success := b.hmy.CxPool().Add(entry)
return blockNum, success
}

@ -21,6 +21,7 @@ type Harmony struct {
blockchain *core.BlockChain blockchain *core.BlockChain
txPool *core.TxPool txPool *core.TxPool
cxPool *core.CxPool
accountManager *accounts.Manager accountManager *accounts.Manager
eventMux *event.TypeMux eventMux *event.TypeMux
// DB interfaces // DB interfaces
@ -51,13 +52,14 @@ type NodeAPI interface {
// New creates a new Harmony object (including the // New creates a new Harmony object (including the
// initialisation of the common Harmony object) // initialisation of the common Harmony object)
func New(nodeAPI NodeAPI, txPool *core.TxPool, eventMux *event.TypeMux, shardID uint32) (*Harmony, error) { func New(nodeAPI NodeAPI, txPool *core.TxPool, cxPool *core.CxPool, eventMux *event.TypeMux, shardID uint32) (*Harmony, error) {
chainDb := nodeAPI.Blockchain().ChainDB() chainDb := nodeAPI.Blockchain().ChainDB()
hmy := &Harmony{ hmy := &Harmony{
shutdownChan: make(chan bool), shutdownChan: make(chan bool),
bloomRequests: make(chan chan *bloombits.Retrieval), bloomRequests: make(chan chan *bloombits.Retrieval),
blockchain: nodeAPI.Blockchain(), blockchain: nodeAPI.Blockchain(),
txPool: txPool, txPool: txPool,
cxPool: cxPool,
accountManager: nodeAPI.AccountManager(), accountManager: nodeAPI.AccountManager(),
eventMux: eventMux, eventMux: eventMux,
chainDb: chainDb, chainDb: chainDb,
@ -75,6 +77,9 @@ func New(nodeAPI NodeAPI, txPool *core.TxPool, eventMux *event.TypeMux, shardID
// TxPool ... // TxPool ...
func (s *Harmony) TxPool() *core.TxPool { return s.txPool } func (s *Harmony) TxPool() *core.TxPool { return s.txPool }
// CxPool is used to store the blockHashes, where the corresponding block contains the cross shard receipts to be sent
func (s *Harmony) CxPool() *core.CxPool { return s.cxPool }
// BlockChain ... // BlockChain ...
func (s *Harmony) BlockChain() *core.BlockChain { return s.blockchain } func (s *Harmony) BlockChain() *core.BlockChain { return s.blockchain }

@ -11,7 +11,6 @@ import (
"github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/engine" "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
@ -104,29 +103,7 @@ func (e *engineImpl) VerifyHeaders(chain engine.ChainReader, headers []*block.He
// ReadPublicKeysFromLastBlock finds the public keys of last block's committee // ReadPublicKeysFromLastBlock finds the public keys of last block's committee
func ReadPublicKeysFromLastBlock(bc engine.ChainReader, header *block.Header) ([]*bls.PublicKey, error) { func ReadPublicKeysFromLastBlock(bc engine.ChainReader, header *block.Header) ([]*bls.PublicKey, error) {
parentHeader := bc.GetHeaderByHash(header.ParentHash()) parentHeader := bc.GetHeaderByHash(header.ParentHash())
if parentHeader == nil { return GetPublicKeys(bc, parentHeader)
return nil, ctxerror.New("cannot find parent block header in DB",
"parentHash", header.ParentHash())
}
parentShardState := core.GetShardState(parentHeader.Epoch())
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID())
if parentCommittee == nil {
return nil, ctxerror.New("cannot find shard in the shard state",
"parentBlockNumber", parentHeader.Number(),
"shardID", parentHeader.ShardID(),
)
}
var committerKeys []*bls.PublicKey
for _, member := range parentCommittee.NodeList {
committerKey := new(bls.PublicKey)
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil {
return nil, ctxerror.New("cannot convert BLS public key",
"blsPublicKey", member.BlsPublicKey).WithCause(err)
}
committerKeys = append(committerKeys, committerKey)
}
return committerKeys, nil
} }
// VerifySeal implements Engine, checking whether the given block's parent block satisfies // VerifySeal implements Engine, checking whether the given block's parent block satisfies
@ -148,7 +125,7 @@ func (e *engineImpl) VerifySeal(chain engine.ChainReader, header *block.Header)
} }
parentHash := header.ParentHash() parentHash := header.ParentHash()
parentHeader := chain.GetHeader(parentHash, header.Number().Uint64()-1) parentHeader := chain.GetHeader(parentHash, header.Number().Uint64()-1)
parentQuorum, err := QuorumForBlock(parentHeader) parentQuorum, err := QuorumForBlock(chain, parentHeader)
if err != nil { if err != nil {
return errors.Wrapf(err, return errors.Wrapf(err,
"cannot calculate quorum for block %s", header.Number()) "cannot calculate quorum for block %s", header.Number())
@ -181,8 +158,12 @@ func (e *engineImpl) Finalize(chain engine.ChainReader, header *block.Header, st
} }
// QuorumForBlock returns the quorum for the given block header. // QuorumForBlock returns the quorum for the given block header.
func QuorumForBlock(h *block.Header) (quorum int, err error) { func QuorumForBlock(chain engine.ChainReader, h *block.Header) (quorum int, err error) {
ss := core.GetShardState(h.Epoch()) ss, err := chain.ReadShardState(h.Epoch())
if err != nil {
return 0, ctxerror.New("failed to read shard state of epoch",
"epoch", h.Epoch().Uint64())
}
c := ss.FindCommitteeByID(h.ShardID()) c := ss.FindCommitteeByID(h.ShardID())
if c == nil { if c == nil {
return 0, errors.Errorf( return 0, errors.Errorf(
@ -195,8 +176,8 @@ func QuorumForBlock(h *block.Header) (quorum int, err error) {
// is used for verifying "incoming" block header against commit signature and bitmap sent from the other chain cross-shard via libp2p. // is used for verifying "incoming" block header against commit signature and bitmap sent from the other chain cross-shard via libp2p.
// i.e. this header verification api is more flexible since the caller specifies which commit signature and bitmap to use // i.e. this header verification api is more flexible since the caller specifies which commit signature and bitmap to use
// for verifying the block header, which is necessary for cross-shard block header verification. Example of such is cross-shard transaction. // for verifying the block header, which is necessary for cross-shard block header verification. Example of such is cross-shard transaction.
func (e *engineImpl) VerifyHeaderWithSignature(header *block.Header, commitSig []byte, commitBitmap []byte) error { func (e *engineImpl) VerifyHeaderWithSignature(chain engine.ChainReader, header *block.Header, commitSig []byte, commitBitmap []byte) error {
publicKeys, err := GetPublicKeys(header) publicKeys, err := GetPublicKeys(chain, header)
if err != nil { if err != nil {
return ctxerror.New("[VerifyHeaderWithSignature] Cannot get publickeys for block header").WithCause(err) return ctxerror.New("[VerifyHeaderWithSignature] Cannot get publickeys for block header").WithCause(err)
} }
@ -208,7 +189,7 @@ func (e *engineImpl) VerifyHeaderWithSignature(header *block.Header, commitSig [
} }
hash := header.Hash() hash := header.Hash()
quorum, err := QuorumForBlock(header) quorum, err := QuorumForBlock(chain, header)
if err != nil { if err != nil {
return errors.Wrapf(err, return errors.Wrapf(err,
"cannot calculate quorum for block %s", header.Number()) "cannot calculate quorum for block %s", header.Number())
@ -229,8 +210,13 @@ func (e *engineImpl) VerifyHeaderWithSignature(header *block.Header, commitSig [
} }
// GetPublicKeys finds the public keys of the committee that signed the block header // GetPublicKeys finds the public keys of the committee that signed the block header
func GetPublicKeys(header *block.Header) ([]*bls.PublicKey, error) { func GetPublicKeys(chain engine.ChainReader, header *block.Header) ([]*bls.PublicKey, error) {
shardState := core.GetShardState(header.Epoch()) shardState, err := chain.ReadShardState(header.Epoch())
if err != nil {
return nil, ctxerror.New("failed to read shard state of epoch",
"epoch", header.Epoch().Uint64())
}
committee := shardState.FindCommitteeByID(header.ShardID()) committee := shardState.FindCommitteeByID(header.ShardID())
if committee == nil { if committee == nil {
return nil, ctxerror.New("cannot find shard in the shard state", return nil, ctxerror.New("cannot find shard in the shard state",

@ -7,6 +7,7 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"errors" "errors"
"fmt" "fmt"
"os"
"sync" "sync"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
@ -62,6 +63,7 @@ const (
) )
var version string var version string
var publicRPC bool // enable public RPC access
// ConfigType is the structure of all node related configuration variables // ConfigType is the structure of all node related configuration variables
type ConfigType struct { type ConfigType struct {
@ -246,3 +248,18 @@ func SetVersion(ver string) {
func GetVersion() string { func GetVersion() string {
return version return version
} }
// GetTempDir return temporary directory
func GetTempDir() string {
return os.TempDir()
}
// SetPublicRPC set the boolean value of public RPC access
func SetPublicRPC(v bool) {
publicRPC = v
}
// GetPublicRPC get the boolean value of public RPC access
func GetPublicRPC() bool {
return publicRPC
}

@ -155,5 +155,5 @@ func (ls localnetSchedule) GetShardingStructure(numShard, shardID int) []map[str
var localnetReshardingEpoch = []*big.Int{big.NewInt(0), big.NewInt(localnetV1Epoch), big.NewInt(localnetV2Epoch)} var localnetReshardingEpoch = []*big.Int{big.NewInt(0), big.NewInt(localnetV1Epoch), big.NewInt(localnetV2Epoch)}
var localnetV0 = MustNewInstance(2, 7, 5, genesis.LocalHarmonyAccounts, genesis.LocalFnAccounts, localnetReshardingEpoch) var localnetV0 = MustNewInstance(2, 7, 5, genesis.LocalHarmonyAccounts, genesis.LocalFnAccounts, localnetReshardingEpoch)
var localnetV1 = MustNewInstance(2, 7, 5, genesis.LocalHarmonyAccountsV1, genesis.LocalFnAccountsV1, localnetReshardingEpoch) var localnetV1 = MustNewInstance(2, 8, 5, genesis.LocalHarmonyAccountsV1, genesis.LocalFnAccountsV1, localnetReshardingEpoch)
var localnetV2 = MustNewInstance(2, 9, 6, genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, localnetReshardingEpoch) var localnetV2 = MustNewInstance(2, 9, 6, genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, localnetReshardingEpoch)

@ -13,7 +13,7 @@ const (
blocksPerShard = 16384 // 2^14 blocksPerShard = 16384 // 2^14
mainnetVdfDifficulty = 50000 // This takes about 100s to finish the vdf mainnetVdfDifficulty = 50000 // This takes about 100s to finish the vdf
mainnetConsensusRatio = float64(0.66) mainnetConsensusRatio = float64(0.1)
// TODO: remove it after randomness feature turned on mainnet // TODO: remove it after randomness feature turned on mainnet
mainnetRandomnessStartingEpoch = 100000 mainnetRandomnessStartingEpoch = 100000

@ -42,6 +42,8 @@ var LocalFnAccountsV1 = []DeployAccount{
{Index: " 1 ", Address: "one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg", BlsPublicKey: "a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100"}, {Index: " 1 ", Address: "one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg", BlsPublicKey: "a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100"},
{Index: " 2 ", Address: "one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7", BlsPublicKey: "678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c"}, {Index: " 2 ", Address: "one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7", BlsPublicKey: "678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c"},
{Index: " 3 ", Address: "one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k", BlsPublicKey: "63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512"}, {Index: " 3 ", Address: "one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k", BlsPublicKey: "63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512"},
{Index: " 4 ", Address: "one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc", BlsPublicKey: "16513c487a6bb76f37219f3c2927a4f281f9dd3fd6ed2e3a64e500de6545cf391dd973cc228d24f9bd01efe94912e714"},
{Index: " 5 ", Address: "one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep", BlsPublicKey: "576d3c48294e00d6be4a22b07b66a870ddee03052fe48a5abbd180222e5d5a1f8946a78d55b025de21635fd743bbad90"},
} }
// LocalHarmonyAccountsV2 are the accounts for the initial genesis nodes used for local test. // LocalHarmonyAccountsV2 are the accounts for the initial genesis nodes used for local test.

@ -10,6 +10,7 @@
* [x] hmy_protocolVersion - check protocol version * [x] hmy_protocolVersion - check protocol version
* [ ] net_version - get network id * [ ] net_version - get network id
* [ ] net_peerCount - peer count * [ ] net_peerCount - peer count
* [x] hmy_getNodeMetadata - get node's version, bls key
### BlockChain info related ### BlockChain info related
* [ ] hmy_gasPrice - return min-gas-price * [ ] hmy_gasPrice - return min-gas-price
@ -87,4 +88,4 @@ The ``shh`` is for the whisper protocol to communicate p2p and broadcast
* [ ] shh_newFilter * [ ] shh_newFilter
* [ ] shh_uninstallFilter * [ ] shh_uninstallFilter
* [ ] shh_getFilterChanges * [ ] shh_getFilterChanges
* [ ] shh_getMessages * [ ] shh_getMessages

@ -17,6 +17,7 @@ import (
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/params" "github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/shard"
) )
// Backend interface provides the common API services (that are provided by // Backend interface provides the common API services (that are provided by
@ -63,7 +64,12 @@ type Backend interface {
CurrentBlock() *types.Block CurrentBlock() *types.Block
// Get balance // Get balance
GetBalance(address common.Address) (*hexutil.Big, error) GetBalance(address common.Address) (*hexutil.Big, error)
// Get committee for a particular epoch
GetCommittee(epoch *big.Int) (*shard.Committee, error)
GetShardID() uint32 GetShardID() uint32
// retrieve the blockHash using txID and add blockHash to CxPool for resending
ResendCx(ctx context.Context, txID common.Hash) (uint64, bool)
} }
// GetAPIs returns all the APIs. // GetAPIs returns all the APIs.

@ -63,6 +63,36 @@ func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, blockHash comm
return nil, err return nil, err
} }
// GetCommittee returns committee for a particular epoch.
func (s *PublicBlockChainAPI) GetCommittee(ctx context.Context, epoch int64) (map[string]interface{}, error) {
committee, err := s.b.GetCommittee(big.NewInt(epoch))
if err != nil {
return nil, err
}
validators := make([]map[string]interface{}, 0)
for _, validator := range committee.NodeList {
validatorBalance := new(hexutil.Big)
validatorBalance, err = s.b.GetBalance(validator.EcdsaAddress)
if err != nil {
return nil, err
}
oneAddress, err := internal_common.AddressToBech32(validator.EcdsaAddress)
if err != nil {
return nil, err
}
validatorsFields := map[string]interface{}{
"address": oneAddress,
"balance": validatorBalance,
}
validators = append(validators, validatorsFields)
}
result := map[string]interface{}{
"shardID": committee.ShardID,
"validators": validators,
}
return result, nil
}
// GetShardingStructure returns an array of sharding structures. // GetShardingStructure returns an array of sharding structures.
func (s *PublicBlockChainAPI) GetShardingStructure(ctx context.Context) ([]map[string]interface{}, error) { func (s *PublicBlockChainAPI) GetShardingStructure(ctx context.Context) ([]map[string]interface{}, error) {
// Get header and number of shards. // Get header and number of shards.
@ -73,6 +103,11 @@ func (s *PublicBlockChainAPI) GetShardingStructure(ctx context.Context) ([]map[s
return core.ShardingSchedule.GetShardingStructure(int(numShard), int(s.b.GetShardID())), nil return core.ShardingSchedule.GetShardingStructure(int(numShard), int(s.b.GetShardID())), nil
} }
// GetShardID returns shard ID of the requested node.
func (s *PublicBlockChainAPI) GetShardID(ctx context.Context) (int, error) {
return int(s.b.GetShardID()), nil
}
// GetCode returns the code stored at the given address in the state for the given block number. // GetCode returns the code stored at the given address in the state for the given block number.
func (s *PublicBlockChainAPI) GetCode(ctx context.Context, addr string, blockNr rpc.BlockNumber) (hexutil.Bytes, error) { func (s *PublicBlockChainAPI) GetCode(ctx context.Context, addr string, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
address := internal_common.ParseAddr(addr) address := internal_common.ParseAddr(addr)
@ -112,6 +147,16 @@ func (s *PublicBlockChainAPI) BlockNumber() hexutil.Uint64 {
return hexutil.Uint64(header.Number().Uint64()) return hexutil.Uint64(header.Number().Uint64())
} }
// ResendCx requests that the egress receipt for the given cross-shard
// transaction be sent to the destination shard for credit. This is used for
// unblocking a half-complete cross-shard transaction whose fund has been
// withdrawn already from the source shard but not credited yet in the
// destination account due to transient failures.
func (s *PublicBlockChainAPI) ResendCx(ctx context.Context, txID common.Hash) (bool, error) {
_, success := s.b.ResendCx(ctx, txID)
return success, nil
}
// Call executes the given transaction on the state for the given block number. // Call executes the given transaction on the state for the given block number.
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values. // It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) { func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
@ -215,3 +260,9 @@ func doCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumb
} }
return res, gas, failed, err return res, gas, failed, err
} }
// LatestHeader returns the latest header information
func (s *PublicBlockChainAPI) LatestHeader(ctx context.Context) *HeaderInformation {
header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available
return newHeaderInformation(header)
}

@ -0,0 +1,8 @@
package hmyapi
import "errors"
var (
// ErrIncorrectChainID is an incorrect chain ID.
ErrIncorrectChainID = errors.New("Incorrect chain ID")
)

@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/harmony-one/harmony/api/proto" "github.com/harmony-one/harmony/api/proto"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
) )
// PublicHarmonyAPI provides an API to access Harmony related information. // PublicHarmonyAPI provides an API to access Harmony related information.
@ -41,3 +42,22 @@ func (s *PublicHarmonyAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) {
// TODO(ricl): add SuggestPrice API // TODO(ricl): add SuggestPrice API
return (*hexutil.Big)(big.NewInt(1)), nil return (*hexutil.Big)(big.NewInt(1)), nil
} }
// NodeMetadata captures select metadata of the RPC answering node
type NodeMetadata struct {
BLSPublicKey string `json:"blskey"`
Version string `json:"version"`
NetworkType string `json:"network"`
ChainID string `json:"chainid"`
}
// GetNodeMetadata produces a NodeMetadata record. Note the data is from the answering RPC
func (s *PublicHarmonyAPI) GetNodeMetadata() NodeMetadata {
cfg := nodeconfig.GetDefaultConfig()
return NodeMetadata{
cfg.ConsensusPubKey.SerializeToHexStr(),
nodeconfig.GetVersion(),
string(cfg.GetNetworkType()),
s.b.ChainConfig().ChainID.String(),
}
}

@ -131,6 +131,9 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encod
if err := rlp.DecodeBytes(encodedTx, tx); err != nil { if err := rlp.DecodeBytes(encodedTx, tx); err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
if tx.ChainID() != s.b.ChainConfig().ChainID {
return common.Hash{}, ErrIncorrectChainID
}
return SubmitTransaction(ctx, s.b, tx) return SubmitTransaction(ctx, s.b, tx)
} }
@ -212,3 +215,11 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, err
} }
return transactions, nil return transactions, nil
} }
// GetCXReceiptByHash returns the transaction for the given hash
func (s *PublicTransactionPoolAPI) GetCXReceiptByHash(ctx context.Context, hash common.Hash) *RPCCXReceipt {
if cx, blockHash, blockNumber, _ := rawdb.ReadCXReceipt(s.b.ChainDb(), hash); cx != nil {
return newRPCCXReceipt(cx, blockHash, blockNumber)
}
return nil
}

@ -1,12 +1,16 @@
package hmyapi package hmyapi
import ( import (
"encoding/hex"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
ethtypes "github.com/ethereum/go-ethereum/core/types" ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
common2 "github.com/harmony-one/harmony/internal/common"
) )
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
@ -22,11 +26,77 @@ type RPCTransaction struct {
To *common.Address `json:"to"` To *common.Address `json:"to"`
TransactionIndex hexutil.Uint `json:"transactionIndex"` TransactionIndex hexutil.Uint `json:"transactionIndex"`
Value *hexutil.Big `json:"value"` Value *hexutil.Big `json:"value"`
ShardID uint32 `json:"shardID"`
ToShardID uint32 `json:"toShardID"`
V *hexutil.Big `json:"v"` V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"` R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"` S *hexutil.Big `json:"s"`
} }
// RPCCXReceipt represents a CXReceipt that will serialize to the RPC representation of a CXReceipt
type RPCCXReceipt struct {
BlockHash common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"`
TxHash common.Hash `json:"hash"`
From common.Address `json:"from"`
To *common.Address `json:"to"`
ShardID uint32 `json:"shardID"`
ToShardID uint32 `json:"toShardID"`
Amount *hexutil.Big `json:"value"`
}
// HeaderInformation represents the latest consensus information
type HeaderInformation struct {
BlockHash common.Hash `json:"blockHash"`
BlockNumber uint64 `json:"blockNumber"`
ShardID uint32 `json:"shardID"`
Leader string `json:"leader"`
ViewID uint64 `json:"viewID"`
Epoch uint64 `json:"epoch"`
Timestamp string `json:"timestamp"`
UnixTime uint64 `json:"unixtime"`
LastCommitSig string `json:"lastCommitSig"`
LastCommitBitmap string `json:"lastCommitBitmap"`
}
func newHeaderInformation(header *block.Header) *HeaderInformation {
if header == nil {
return nil
}
result := &HeaderInformation{
BlockHash: header.Hash(),
BlockNumber: header.Number().Uint64(),
ShardID: header.ShardID(),
Leader: common2.MustAddressToBech32(header.Coinbase()),
ViewID: header.ViewID().Uint64(),
Epoch: header.Epoch().Uint64(),
UnixTime: header.Time().Uint64(),
Timestamp: time.Unix(header.Time().Int64(), 0).UTC().String(),
LastCommitBitmap: hex.EncodeToString(header.LastCommitBitmap()),
}
sig := header.LastCommitSignature()
result.LastCommitSig = hex.EncodeToString(sig[:])
return result
}
// newRPCCXReceipt returns a CXReceipt that will serialize to the RPC representation
func newRPCCXReceipt(cx *types.CXReceipt, blockHash common.Hash, blockNumber uint64) *RPCCXReceipt {
result := &RPCCXReceipt{
BlockHash: blockHash,
TxHash: cx.TxHash,
From: cx.From,
To: cx.To,
Amount: (*hexutil.Big)(cx.Amount),
ShardID: cx.ShardID,
ToShardID: cx.ToShardID,
}
if blockHash != (common.Hash{}) {
result.BlockHash = blockHash
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
}
return result
}
// newRPCTransaction returns a transaction that will serialize to the RPC // newRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available). // representation, with the given location metadata set (if available).
func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction { func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
@ -38,17 +108,19 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
v, r, s := tx.RawSignatureValues() v, r, s := tx.RawSignatureValues()
result := &RPCTransaction{ result := &RPCTransaction{
From: from, From: from,
Gas: hexutil.Uint64(tx.Gas()), Gas: hexutil.Uint64(tx.Gas()),
GasPrice: (*hexutil.Big)(tx.GasPrice()), GasPrice: (*hexutil.Big)(tx.GasPrice()),
Hash: tx.Hash(), Hash: tx.Hash(),
Input: hexutil.Bytes(tx.Data()), Input: hexutil.Bytes(tx.Data()),
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
To: tx.To(), To: tx.To(),
Value: (*hexutil.Big)(tx.Value()), Value: (*hexutil.Big)(tx.Value()),
V: (*hexutil.Big)(v), ShardID: tx.ShardID(),
R: (*hexutil.Big)(r), ToShardID: tx.ToShardID(),
S: (*hexutil.Big)(s), V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s),
} }
if blockHash != (common.Hash{}) { if blockHash != (common.Hash{}) {
result.BlockHash = blockHash result.BlockHash = blockHash

@ -7,45 +7,68 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
// Well-known chain IDs.
var (
MainnetChainID = big.NewInt(1)
TestnetChainID = big.NewInt(2)
PangaeaChainID = big.NewInt(3)
TestChainID = big.NewInt(99) // not a real network
AllProtocolChangesChainID = big.NewInt(100) // not a real network
)
// EpochTBD is a large, “not anytime soon” epoch. It used as a placeholder
// until the exact epoch is decided.
var EpochTBD = big.NewInt(10000000)
var ( var (
// MainnetChainConfig is the chain parameters to run a node on the main network. // MainnetChainConfig is the chain parameters to run a node on the main network.
MainnetChainConfig = &ChainConfig{ MainnetChainConfig = &ChainConfig{
ChainID: big.NewInt(1), ChainID: MainnetChainID,
CrossTxEpoch: big.NewInt(28), CrossTxEpoch: big.NewInt(28),
CrossLinkEpoch: big.NewInt(10000000), // Temporarily made very large until a exact number is decided. CrossLinkEpoch: EpochTBD,
EIP155Epoch: big.NewInt(28), EIP155Epoch: big.NewInt(28),
S3Epoch: big.NewInt(28), S3Epoch: big.NewInt(28),
} }
// TestnetChainConfig contains the chain parameters to run a node on the harmony test network. // TestnetChainConfig contains the chain parameters to run a node on the harmony test network.
TestnetChainConfig = &ChainConfig{ TestnetChainConfig = &ChainConfig{
ChainID: big.NewInt(2), ChainID: TestnetChainID,
CrossTxEpoch: big.NewInt(1), CrossTxEpoch: big.NewInt(1),
CrossLinkEpoch: big.NewInt(2), CrossLinkEpoch: big.NewInt(2),
EIP155Epoch: big.NewInt(0), EIP155Epoch: big.NewInt(0),
S3Epoch: big.NewInt(0), S3Epoch: big.NewInt(0),
} }
// PangaeaChainConfig contains the chain parameters for the Pangaea network.
// All features except for CrossLink are enabled at launch.
PangaeaChainConfig = &ChainConfig{
ChainID: PangaeaChainID,
CrossTxEpoch: big.NewInt(0),
CrossLinkEpoch: EpochTBD,
EIP155Epoch: big.NewInt(0),
S3Epoch: big.NewInt(0),
}
// AllProtocolChanges ... // AllProtocolChanges ...
// This configuration is intentionally not using keyed fields to force anyone // This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields. // adding flags to the config to also have to set these fields.
AllProtocolChanges = &ChainConfig{ AllProtocolChanges = &ChainConfig{
big.NewInt(100), // ChainID AllProtocolChangesChainID, // ChainID
big.NewInt(0), // CrossTxEpoch big.NewInt(0), // CrossTxEpoch
big.NewInt(0), // CrossLinkEpoch big.NewInt(0), // CrossLinkEpoch
big.NewInt(0), // EIP155Epoch big.NewInt(0), // EIP155Epoch
big.NewInt(0), // S3Epoch big.NewInt(0), // S3Epoch
} }
// TestChainConfig ... // TestChainConfig ...
// This configuration is intentionally not using keyed fields to force anyone // This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields. // adding flags to the config to also have to set these fields.
TestChainConfig = &ChainConfig{ TestChainConfig = &ChainConfig{
big.NewInt(99), // ChainID TestChainID, // ChainID
big.NewInt(0), // CrossTxEpoch big.NewInt(0), // CrossTxEpoch
big.NewInt(0), // CrossLinkEpoch big.NewInt(0), // CrossLinkEpoch
big.NewInt(0), // EIP155Epoch big.NewInt(0), // EIP155Epoch
big.NewInt(0), // S3Epoch big.NewInt(0), // S3Epoch
} }
// TestRules ... // TestRules ...

@ -3,15 +3,19 @@ package utils
// this module in utils handles the ini file read/write // this module in utils handles the ini file read/write
import ( import (
"fmt" "fmt"
"os"
"strings" "strings"
"gopkg.in/ini.v1"
"github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
ini "gopkg.in/ini.v1"
) )
// WalletProfile contains a section and key value pair map // WalletProfile contains a section and key value pair map
type WalletProfile struct { type WalletProfile struct {
Profile string Profile string
ChainID string
Bootnodes []string Bootnodes []string
Shards int Shards int
RPCServer [][]p2p.Peer RPCServer [][]p2p.Peer
@ -31,12 +35,32 @@ func ReadWalletProfile(iniBytes []byte, profile string) (*WalletProfile, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
profile = sec.Name() // sanitized name
if sec.HasKey("bootnode") { if sec.HasKey("bootnode") {
config.Bootnodes = sec.Key("bootnode").ValueWithShadows() config.Bootnodes = sec.Key("bootnode").ValueWithShadows()
} else { } else {
return nil, fmt.Errorf("can't find bootnode key") return nil, fmt.Errorf("can't find bootnode key")
} }
if sec.HasKey("chain_id") {
config.ChainID = sec.Key("chain_id").String()
} else {
// backward compatibility; use profile name to determine
// (deprecated; require chain_id after 2010-01).
switch profile {
case "main", "default":
config.ChainID = params.MainnetChainID.String()
case "pangaea":
config.ChainID = params.PangaeaChainID.String()
default:
config.ChainID = params.TestnetChainID.String()
}
_, _ = fmt.Fprintf(os.Stderr,
"NOTICE: Chain ID not found in config profile, assuming %s; "+
"please add \"chain_id = %s\" to section [%s] of wallet.ini "+
"before 2020-01\n",
config.ChainID, config.ChainID, profile)
}
if sec.HasKey("shards") { if sec.HasKey("shards") {
config.Shards = sec.Key("shards").MustInt() config.Shards = sec.Key("shards").MustInt()

@ -5,6 +5,7 @@ import (
"reflect" "reflect"
"testing" "testing"
"github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
) )
@ -12,6 +13,7 @@ func TestReadWalletProfile(t *testing.T) {
config := []*WalletProfile{ config := []*WalletProfile{
{ {
Profile: "default", Profile: "default",
ChainID: params.MainnetChainID.String(),
Bootnodes: []string{"127.0.0.1:9000/abcd", "127.0.0.1:9999/daeg"}, Bootnodes: []string{"127.0.0.1:9000/abcd", "127.0.0.1:9999/daeg"},
Shards: 4, Shards: 4,
RPCServer: [][]p2p.Peer{ RPCServer: [][]p2p.Peer{
@ -59,6 +61,7 @@ func TestReadWalletProfile(t *testing.T) {
}, },
{ {
Profile: "testnet", Profile: "testnet",
ChainID: params.TestnetChainID.String(),
Bootnodes: []string{"192.168.0.1:9990/abcd", "127.0.0.1:8888/daeg"}, Bootnodes: []string{"192.168.0.1:9990/abcd", "127.0.0.1:8888/daeg"},
Shards: 3, Shards: 3,
RPCServer: [][]p2p.Peer{ RPCServer: [][]p2p.Peer{

@ -150,7 +150,9 @@ func (node *Node) GetBalanceOfAddress(address common.Address) (*big.Int, error)
utils.Logger().Error().Err(err).Msg("Failed to get chain state") utils.Logger().Error().Err(err).Msg("Failed to get chain state")
return nil, err return nil, err
} }
return state.GetBalance(address), nil balance := big.NewInt(0)
balance.SetBytes(state.GetBalance(address).Bytes())
return balance, nil
} }
// AddFaucetContractToPendingTransactions adds the faucet contract the genesis block. // AddFaucetContractToPendingTransactions adds the faucet contract the genesis block.

@ -120,6 +120,8 @@ type Node struct {
TxPool *core.TxPool // TODO migrate to TxPool from pendingTransactions list below TxPool *core.TxPool // TODO migrate to TxPool from pendingTransactions list below
CxPool *core.CxPool // pool for missing cross shard receipts resend
pendingTransactions types.Transactions // All the transactions received but not yet processed for Consensus pendingTransactions types.Transactions // All the transactions received but not yet processed for Consensus
pendingTxMutex sync.Mutex pendingTxMutex sync.Mutex
recentTxsStats types.RecentTxsStats recentTxsStats types.RecentTxsStats
@ -288,12 +290,10 @@ func (node *Node) AddPendingTransaction(newTx *types.Transaction) {
// AddPendingReceipts adds one receipt message to pending list. // AddPendingReceipts adds one receipt message to pending list.
func (node *Node) AddPendingReceipts(receipts *types.CXReceiptsProof) { func (node *Node) AddPendingReceipts(receipts *types.CXReceiptsProof) {
if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet { node.pendingCXMutex.Lock()
node.pendingCXMutex.Lock() node.pendingCXReceipts = append(node.pendingCXReceipts, receipts)
node.pendingCXReceipts = append(node.pendingCXReceipts, receipts) node.pendingCXMutex.Unlock()
node.pendingCXMutex.Unlock() utils.Logger().Error().Int("totalPendingReceipts", len(node.pendingCXReceipts)).Msg("Got ONE more receipt message")
utils.Logger().Error().Int("totalPendingReceipts", len(node.pendingCXReceipts)).Msg("Got ONE more receipt message")
}
} }
// Take out a subset of valid transactions from the pending transaction list // Take out a subset of valid transactions from the pending transaction list
@ -367,8 +367,11 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, chainDBFactory shardc
} }
chainConfig := *params.TestnetChainConfig chainConfig := *params.TestnetChainConfig
if node.NodeConfig.GetNetworkType() == nodeconfig.Mainnet { switch node.NodeConfig.GetNetworkType() {
case nodeconfig.Mainnet:
chainConfig = *params.MainnetChainConfig chainConfig = *params.MainnetChainConfig
case nodeconfig.Pangaea:
chainConfig = *params.PangaeaChainConfig
} }
collection := shardchain.NewCollection( collection := shardchain.NewCollection(
@ -391,9 +394,10 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, chainDBFactory shardc
node.BeaconBlockChannel = make(chan *types.Block) node.BeaconBlockChannel = make(chan *types.Block)
node.recentTxsStats = make(types.RecentTxsStats) node.recentTxsStats = make(types.RecentTxsStats)
node.TxPool = core.NewTxPool(core.DefaultTxPoolConfig, node.Blockchain().Config(), blockchain) node.TxPool = core.NewTxPool(core.DefaultTxPoolConfig, node.Blockchain().Config(), blockchain)
node.Worker = worker.New(node.Blockchain().Config(), blockchain, chain.Engine, node.Consensus.ShardID) node.CxPool = core.NewCxPool(core.CxPoolSize)
node.Worker = worker.New(node.Blockchain().Config(), blockchain, chain.Engine)
if node.Blockchain().ShardID() != 0 { if node.Blockchain().ShardID() != 0 {
node.BeaconWorker = worker.New(node.Beaconchain().Config(), beaconChain, chain.Engine, node.Consensus.ShardID) node.BeaconWorker = worker.New(node.Beaconchain().Config(), beaconChain, chain.Engine)
} }
node.Consensus.VerifiedNewBlock = make(chan *types.Block) node.Consensus.VerifiedNewBlock = make(chan *types.Block)
@ -457,10 +461,10 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, chainDBFactory shardc
return &node return &node
} }
// GetInitShardState initialize shard state from latest epoch and update committee pub keys for consensus and drand // CalculateInitShardState initialize shard state from latest epoch and update committee pub keys for consensus and drand
func (node *Node) GetInitShardState() (err error) { func (node *Node) CalculateInitShardState() (err error) {
if node.Consensus == nil { if node.Consensus == nil {
return ctxerror.New("[GetInitShardState] consenus is nil; Cannot figure out shardID") return ctxerror.New("[CalculateInitShardState] consenus is nil; Cannot figure out shardID")
} }
shardID := node.Consensus.ShardID shardID := node.Consensus.ShardID
@ -472,11 +476,11 @@ func (node *Node) GetInitShardState() (err error) {
Uint64("blockNum", blockNum). Uint64("blockNum", blockNum).
Uint32("shardID", shardID). Uint32("shardID", shardID).
Uint64("epoch", epoch.Uint64()). Uint64("epoch", epoch.Uint64()).
Msg("[GetInitShardState] Try To Get PublicKeys from database") Msg("[CalculateInitShardState] Try To Get PublicKeys from database")
pubKeys := core.GetPublicKeys(epoch, shardID) pubKeys := core.CalculatePublicKeys(epoch, shardID)
if len(pubKeys) == 0 { if len(pubKeys) == 0 {
return ctxerror.New( return ctxerror.New(
"[GetInitShardState] PublicKeys is Empty, Cannot update public keys", "[CalculateInitShardState] PublicKeys is Empty, Cannot update public keys",
"shardID", shardID, "shardID", shardID,
"blockNum", blockNum) "blockNum", blockNum)
} }
@ -486,7 +490,7 @@ func (node *Node) GetInitShardState() (err error) {
utils.Logger().Info(). utils.Logger().Info().
Uint64("blockNum", blockNum). Uint64("blockNum", blockNum).
Int("numPubKeys", len(pubKeys)). Int("numPubKeys", len(pubKeys)).
Msg("[GetInitShardState] Successfully updated public keys") Msg("[CalculateInitShardState] Successfully updated public keys")
node.Consensus.UpdatePublicKeys(pubKeys) node.Consensus.UpdatePublicKeys(pubKeys)
node.Consensus.SetMode(consensus.Normal) node.Consensus.SetMode(consensus.Normal)
return nil return nil
@ -566,3 +570,8 @@ func (node *Node) initNodeConfiguration() (service.NodeConfig, chan p2p.Peer) {
func (node *Node) AccountManager() *accounts.Manager { func (node *Node) AccountManager() *accounts.Manager {
return node.accountManager return node.accountManager
} }
// ServiceManager ...
func (node *Node) ServiceManager() *service.Manager {
return node.serviceManager
}

@ -47,20 +47,56 @@ func (node *Node) BroadcastCXReceipts(newBlock *types.Block, lastCommits []byte)
if i == int(myShardID) { if i == int(myShardID) {
continue continue
} }
cxReceipts, err := node.Blockchain().ReadCXReceipts(uint32(i), newBlock.NumberU64(), newBlock.Hash(), false) go node.BroadcastCXReceiptsWithShardID(newBlock, commitSig, commitBitmap, uint32(i))
if err != nil || len(cxReceipts) == 0 { }
utils.Logger().Warn().Err(err).Uint32("ToShardID", uint32(i)).Int("numCXReceipts", len(cxReceipts)).Msg("[BroadcastCXReceipts] No ReadCXReceipts found") }
// BroadcastCXReceiptsWithShardID broadcasts cross shard receipts to given ToShardID
func (node *Node) BroadcastCXReceiptsWithShardID(block *types.Block, commitSig []byte, commitBitmap []byte, toShardID uint32) {
myShardID := node.Consensus.ShardID
utils.Logger().Info().Uint32("toShardID", toShardID).Uint32("myShardID", myShardID).Uint64("blockNum", block.NumberU64()).Msg("[BroadcastCXReceiptsWithShardID]")
cxReceipts, err := node.Blockchain().ReadCXReceipts(toShardID, block.NumberU64(), block.Hash(), false)
if err != nil || len(cxReceipts) == 0 {
utils.Logger().Info().Err(err).Uint32("ToShardID", toShardID).Int("numCXReceipts", len(cxReceipts)).Msg("[BroadcastCXReceiptsWithShardID] No ReadCXReceipts found")
return
}
merkleProof, err := node.Blockchain().CXMerkleProof(toShardID, block)
if err != nil {
utils.Logger().Warn().Uint32("ToShardID", toShardID).Msg("[BroadcastCXReceiptsWithShardID] Unable to get merkleProof")
return
}
utils.Logger().Info().Uint32("ToShardID", toShardID).Msg("[BroadcastCXReceiptsWithShardID] ReadCXReceipts and MerkleProof Found")
groupID := p2p.ShardID(toShardID)
go node.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(groupID)}, host.ConstructP2pMessage(byte(0), proto_node.ConstructCXReceiptsProof(cxReceipts, merkleProof, block.Header(), commitSig, commitBitmap)))
}
// BroadcastMissingCXReceipts broadcasts missing cross shard receipts per request
func (node *Node) BroadcastMissingCXReceipts() {
sendNextTime := []core.CxEntry{}
it := node.CxPool.Pool().Iterator()
for entry := range it.C {
cxEntry := entry.(core.CxEntry)
toShardID := cxEntry.ToShardID
blk := node.Blockchain().GetBlockByHash(cxEntry.BlockHash)
if blk == nil {
continue continue
} }
merkleProof, err := node.Blockchain().CXMerkleProof(uint32(i), newBlock) blockNum := blk.NumberU64()
if err != nil { nextHeader := node.Blockchain().GetHeaderByNumber(blockNum + 1)
utils.Logger().Warn().Uint32("ToShardID", uint32(i)).Msg("[BroadcastCXReceipts] Unable to get merkleProof") if nextHeader == nil {
sendNextTime = append(sendNextTime, cxEntry)
continue continue
} }
utils.Logger().Info().Uint32("ToShardID", uint32(i)).Msg("[BroadcastCXReceipts] ReadCXReceipts and MerkleProof Found") sig := nextHeader.LastCommitSignature()
bitmap := nextHeader.LastCommitBitmap()
groupID := p2p.ShardID(i) node.BroadcastCXReceiptsWithShardID(blk, sig[:], bitmap, toShardID)
go node.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(groupID)}, host.ConstructP2pMessage(byte(0), proto_node.ConstructCXReceiptsProof(cxReceipts, merkleProof, newBlock.Header(), commitSig, commitBitmap))) }
node.CxPool.Clear()
// this should not happen or maybe happen for impatient user
for _, entry := range sendNextTime {
node.CxPool.Add(entry)
} }
} }

@ -72,7 +72,7 @@ func (node *Node) ExplorerMessageHandler(payload []byte) {
return return
} }
node.AddNewBlockForExplorer() node.AddNewBlockForExplorer(block)
node.commitBlockForExplorer(block) node.commitBlockForExplorer(block)
} else if msg.Type == msg_pb.MessageType_PREPARED { } else if msg.Type == msg_pb.MessageType_PREPARED {
@ -91,7 +91,7 @@ func (node *Node) ExplorerMessageHandler(payload []byte) {
msgs := node.Consensus.PbftLog.GetMessagesByTypeSeqHash(msg_pb.MessageType_COMMITTED, blockObj.NumberU64(), blockObj.Hash()) msgs := node.Consensus.PbftLog.GetMessagesByTypeSeqHash(msg_pb.MessageType_COMMITTED, blockObj.NumberU64(), blockObj.Hash())
// If found, then add the new block into blockchain db. // If found, then add the new block into blockchain db.
if len(msgs) > 0 { if len(msgs) > 0 {
node.AddNewBlockForExplorer() node.AddNewBlockForExplorer(blockObj)
node.commitBlockForExplorer(blockObj) node.commitBlockForExplorer(blockObj)
} }
} }
@ -99,38 +99,29 @@ func (node *Node) ExplorerMessageHandler(payload []byte) {
} }
// AddNewBlockForExplorer add new block for explorer. // AddNewBlockForExplorer add new block for explorer.
func (node *Node) AddNewBlockForExplorer() { func (node *Node) AddNewBlockForExplorer(block *types.Block) {
utils.Logger().Debug().Msg("[Explorer] Add new block for explorer") utils.Logger().Debug().Uint64("blockHeight", block.NumberU64()).Msg("[Explorer] Adding new block for explorer node")
// Search for the next block in PbftLog and commit the block into blockchain for explorer node. if err := node.AddNewBlock(block); err == nil {
for { if core.IsEpochLastBlock(block) {
blocks := node.Consensus.PbftLog.GetBlocksByNumber(node.Blockchain().CurrentBlock().NumberU64() + 1) node.Consensus.UpdateConsensusInformation()
if len(blocks) == 0 {
break
} else {
if len(blocks) > 1 {
utils.Logger().Error().Msg("[Explorer] We should have not received more than one block with the same block height.")
}
utils.Logger().Debug().Uint64("blockHeight", blocks[0].NumberU64()).Msg("Adding new block for explorer node")
if err := node.AddNewBlock(blocks[0]); err == nil {
// Clean up the blocks to avoid OOM.
node.Consensus.PbftLog.DeleteBlockByNumber(blocks[0].NumberU64())
// Do dump all blocks from state syncing for explorer one time
// TODO: some blocks can be dumped before state syncing finished.
// And they would be dumped again here. Please fix it.
once.Do(func() {
utils.Logger().Info().Int64("starting height", int64(blocks[0].NumberU64())-1).
Msg("[Explorer] Populating explorer data from state synced blocks")
go func() {
for blockHeight := int64(blocks[0].NumberU64()) - 1; blockHeight >= 0; blockHeight-- {
explorer.GetStorageInstance(node.SelfPeer.IP, node.SelfPeer.Port, true).Dump(
node.Blockchain().GetBlockByNumber(uint64(blockHeight)), uint64(blockHeight))
}
}()
})
} else {
utils.Logger().Error().Err(err).Msg("[Explorer] Error when adding new block for explorer node")
}
} }
// Clean up the blocks to avoid OOM.
node.Consensus.PbftLog.DeleteBlockByNumber(block.NumberU64())
// Do dump all blocks from state syncing for explorer one time
// TODO: some blocks can be dumped before state syncing finished.
// And they would be dumped again here. Please fix it.
once.Do(func() {
utils.Logger().Info().Int64("starting height", int64(block.NumberU64())-1).
Msg("[Explorer] Populating explorer data from state synced blocks")
go func() {
for blockHeight := int64(block.NumberU64()) - 1; blockHeight >= 0; blockHeight-- {
explorer.GetStorageInstance(node.SelfPeer.IP, node.SelfPeer.Port, true).Dump(
node.Blockchain().GetBlockByNumber(uint64(blockHeight)), uint64(blockHeight))
}
}()
})
} else {
utils.Logger().Error().Err(err).Msg("[Explorer] Error when adding new block for explorer node")
} }
} }

@ -41,7 +41,7 @@ type genesisInitializer struct {
// InitChainDB sets up a new genesis block in the database for the given shard. // InitChainDB sets up a new genesis block in the database for the given shard.
func (gi *genesisInitializer) InitChainDB(db ethdb.Database, shardID uint32) error { func (gi *genesisInitializer) InitChainDB(db ethdb.Database, shardID uint32) error {
shardState := core.GetInitShardState() shardState := core.CalculateInitShardState()
if shardID != 0 { if shardID != 0 {
// store only the local shard for shard chains // store only the local shard for shard chains
c := shardState.FindCommitteeByID(shardID) c := shardState.FindCommitteeByID(shardID)
@ -64,7 +64,7 @@ func (node *Node) SetupGenesisBlock(db ethdb.Database, shardID uint32, myShardSt
// Initialize genesis block and blockchain // Initialize genesis block and blockchain
genesisAlloc := make(core.GenesisAlloc) genesisAlloc := make(core.GenesisAlloc)
chainConfig := params.ChainConfig{} chainConfig := *params.TestnetChainConfig
switch node.NodeConfig.GetNetworkType() { switch node.NodeConfig.GetNetworkType() {
case nodeconfig.Mainnet: case nodeconfig.Mainnet:
@ -75,8 +75,10 @@ func (node *Node) SetupGenesisBlock(db ethdb.Database, shardID uint32, myShardSt
genesisFunds = genesisFunds.Mul(genesisFunds, big.NewInt(denominations.One)) genesisFunds = genesisFunds.Mul(genesisFunds, big.NewInt(denominations.One))
genesisAlloc[foundationAddress] = core.GenesisAccount{Balance: genesisFunds} genesisAlloc[foundationAddress] = core.GenesisAccount{Balance: genesisFunds}
} }
case nodeconfig.Pangaea:
chainConfig = *params.PangaeaChainConfig
fallthrough // the rest is the same as testnet
default: // all other types share testnet config default: // all other types share testnet config
chainConfig = *params.TestnetChainConfig
// Tests account for txgen to use // Tests account for txgen to use
node.AddTestingAddresses(genesisAlloc, TestAccountNumber) node.AddTestingAddresses(genesisAlloc, TestAccountNumber)

@ -165,9 +165,7 @@ func (node *Node) messageHandler(content []byte, sender libp2p_peer.ID) {
Msg("block sync") Msg("block sync")
} else { } else {
// for non-beaconchain node, subscribe to beacon block broadcast // for non-beaconchain node, subscribe to beacon block broadcast
role := node.NodeConfig.Role() if node.Blockchain().ShardID() != 0 {
if role == nodeconfig.Validator {
for _, block := range blocks { for _, block := range blocks {
if block.ShardID() == 0 { if block.ShardID() == 0 {
utils.Logger().Info(). utils.Logger().Info().
@ -368,6 +366,7 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block, commitSigAndBit
// Update last consensus time for metrics // Update last consensus time for metrics
// TODO: randomly selected a few validators to broadcast messages instead of only leader broadcast // TODO: randomly selected a few validators to broadcast messages instead of only leader broadcast
// TODO: refactor the asynchronous calls to separate go routine.
node.lastConsensusTime = time.Now().Unix() node.lastConsensusTime = time.Now().Unix()
if node.Consensus.PubKey.IsEqual(node.Consensus.LeaderPubKey) { if node.Consensus.PubKey.IsEqual(node.Consensus.LeaderPubKey) {
if node.NodeConfig.ShardID == 0 { if node.NodeConfig.ShardID == 0 {
@ -392,6 +391,9 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block, commitSigAndBit
} }
} }
// Broadcast client requested missing cross shard receipts if there is any
node.BroadcastMissingCXReceipts()
// TODO chao: uncomment this after beacon syncing is stable // TODO chao: uncomment this after beacon syncing is stable
// node.Blockchain().UpdateCXReceiptsCheckpointsByBlock(newBlock) // node.Blockchain().UpdateCXReceiptsCheckpointsByBlock(newBlock)
@ -439,7 +441,7 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block, commitSigAndBit
// ctxerror.Log15(utils.Logger().Error, e) // ctxerror.Log15(utils.Logger().Error, e)
// } // }
// } // }
// shardState, err := newBlockHeader.GetShardState() // shardState, err := newBlockHeader.CalculateShardState()
// if err != nil { // if err != nil {
// e := ctxerror.New("cannot get shard state from header").WithCause(err) // e := ctxerror.New("cannot get shard state from header").WithCause(err)
// ctxerror.Log15(utils.Logger().Error, e) // ctxerror.Log15(utils.Logger().Error, e)
@ -482,7 +484,7 @@ var (
) )
func initGenesisCatalog() { func initGenesisCatalog() {
genesisShardState := core.GetInitShardState() genesisShardState := core.CalculateInitShardState()
for _, committee := range genesisShardState { for _, committee := range genesisShardState {
for i, nodeID := range committee.NodeList { for i, nodeID := range committee.NodeList {
genesisNode := &genesisNode{ genesisNode := &genesisNode{

@ -131,7 +131,7 @@ func (node *Node) proposeShardStateWithoutBeaconSync(block *types.Block) shard.S
} }
nextEpoch := new(big.Int).Add(block.Header().Epoch(), common.Big1) nextEpoch := new(big.Int).Add(block.Header().Epoch(), common.Big1)
return core.GetShardState(nextEpoch) return core.CalculateShardState(nextEpoch)
} }
func (node *Node) proposeShardState(block *types.Block) error { func (node *Node) proposeShardState(block *types.Block) error {
@ -193,6 +193,10 @@ func (node *Node) proposeLocalShardState(block *types.Block) {
} }
func (node *Node) proposeReceiptsProof() []*types.CXReceiptsProof { func (node *Node) proposeReceiptsProof() []*types.CXReceiptsProof {
if !node.Blockchain().Config().IsCrossTx(node.Worker.GetNewEpoch()) {
return []*types.CXReceiptsProof{}
}
numProposed := 0 numProposed := 0
validReceiptsList := []*types.CXReceiptsProof{} validReceiptsList := []*types.CXReceiptsProof{}
pendingReceiptsList := []*types.CXReceiptsProof{} pendingReceiptsList := []*types.CXReceiptsProof{}

@ -23,10 +23,11 @@ import (
// Constants related to doing syncing. // Constants related to doing syncing.
const ( const (
lastMileThreshold = 4 lastMileThreshold = 4
inSyncThreshold = 1 // unit in number of block inSyncThreshold = 1 // unit in number of block
SyncFrequency = 10 // unit in second SyncFrequency = 10 // unit in second
MinConnectedPeers = 10 // minimum number of peers connected to in node syncing BeaconSyncFrequency = 5 // unit in second
MinConnectedPeers = 10 // minimum number of peers connected to in node syncing
) )
// getNeighborPeers is a helper function to return list of peers // getNeighborPeers is a helper function to return list of peers
@ -160,84 +161,90 @@ func (p *LocalSyncingPeerProvider) SyncingPeers(shardID uint32) (peers []p2p.Pee
// DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks // DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks
func (node *Node) DoBeaconSyncing() { func (node *Node) DoBeaconSyncing() {
go func(node *Node) {
for {
select {
case beaconBlock := <-node.BeaconBlockChannel:
node.beaconSync.AddLastMileBlock(beaconBlock)
}
}
}(node)
for { for {
select { if node.beaconSync == nil {
case beaconBlock := <-node.BeaconBlockChannel: utils.Logger().Info().Msg("initializing beacon sync")
if node.beaconSync == nil { node.beaconSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
utils.Logger().Info().Msg("initializing beacon sync") }
node.beaconSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID()) if node.beaconSync.GetActivePeerNumber() == 0 {
utils.Logger().Info().Msg("no peers; bootstrapping beacon sync config")
// 0 means shardID=0 here
peers, err := node.SyncingPeerProvider.SyncingPeers(0)
if err != nil {
utils.Logger().Warn().
Err(err).
Msg("cannot retrieve beacon syncing peers")
continue
} }
if node.beaconSync.GetActivePeerNumber() == 0 { if err := node.beaconSync.CreateSyncConfig(peers, true); err != nil {
utils.Logger().Info().Msg("no peers; bootstrapping beacon sync config") utils.Logger().Warn().Err(err).Msg("cannot create beacon sync config")
peers, err := node.SyncingPeerProvider.SyncingPeers(0) continue
if err != nil {
utils.Logger().Warn().
Err(err).
Msg("cannot retrieve beacon syncing peers")
continue
}
if err := node.beaconSync.CreateSyncConfig(peers, true); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot create beacon sync config")
continue
}
} }
node.beaconSync.AddLastMileBlock(beaconBlock)
node.beaconSync.SyncLoop(node.Beaconchain(), node.BeaconWorker, false, true)
} }
node.beaconSync.SyncLoop(node.Beaconchain(), node.BeaconWorker, true)
time.Sleep(BeaconSyncFrequency * time.Second)
} }
} }
// DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up // DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up
func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool) { func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool) {
ticker := time.NewTicker(SyncFrequency * time.Second)
SyncingLoop: SyncingLoop:
for { for {
select { if node.stateSync == nil {
case <-ticker.C: node.stateSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
if node.stateSync == nil { utils.Logger().Debug().Msg("[SYNC] initialized state sync")
node.stateSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID()) }
if node.stateSync.GetActivePeerNumber() < MinConnectedPeers {
utils.Logger().Debug().Msg("[SYNC] initialized state sync") shardID := bc.ShardID()
peers, err := node.SyncingPeerProvider.SyncingPeers(shardID)
if err != nil {
utils.Logger().Warn().
Err(err).
Uint32("shard_id", shardID).
Msg("cannot retrieve syncing peers")
continue SyncingLoop
} }
if node.stateSync.GetActivePeerNumber() < MinConnectedPeers { if err := node.stateSync.CreateSyncConfig(peers, false); err != nil {
shardID := bc.ShardID() utils.Logger().Warn().
peers, err := node.SyncingPeerProvider.SyncingPeers(shardID) Err(err).
if err != nil { Interface("peers", peers).
utils.Logger().Warn(). Msg("[SYNC] create peers error")
Err(err). continue SyncingLoop
Uint32("shard_id", shardID). }
Msg("cannot retrieve syncing peers") utils.Logger().Debug().Int("len", node.stateSync.GetActivePeerNumber()).Msg("[SYNC] Get Active Peers")
continue SyncingLoop }
} if node.stateSync.IsOutOfSync(bc) {
if err := node.stateSync.CreateSyncConfig(peers, false); err != nil { node.stateMutex.Lock()
utils.Logger().Warn(). node.State = NodeNotInSync
Err(err). node.stateMutex.Unlock()
Interface("peers", peers). if willJoinConsensus {
Msg("[SYNC] create peers error") node.Consensus.BlocksNotSynchronized()
continue SyncingLoop }
} node.stateSync.SyncLoop(bc, worker, false)
utils.Logger().Debug().Int("len", node.stateSync.GetActivePeerNumber()).Msg("[SYNC] Get Active Peers") if node.NodeConfig.Role() == nodeconfig.ExplorerNode {
node.Consensus.UpdateConsensusInformation()
} }
if node.stateSync.IsOutOfSync(bc) { if willJoinConsensus {
node.stateMutex.Lock() node.stateMutex.Lock()
node.State = NodeNotInSync node.State = NodeReadyForConsensus
node.stateMutex.Unlock() node.stateMutex.Unlock()
if willJoinConsensus { node.Consensus.BlocksSynchronized()
node.Consensus.BlocksNotSynchronized()
}
node.stateSync.SyncLoop(bc, worker, willJoinConsensus, false)
if willJoinConsensus {
node.stateMutex.Lock()
node.State = NodeReadyForConsensus
node.stateMutex.Unlock()
node.Consensus.BlocksSynchronized()
}
} }
node.stateMutex.Lock()
node.State = NodeReadyForConsensus
node.stateMutex.Unlock()
} }
node.stateMutex.Lock()
node.State = NodeReadyForConsensus
node.stateMutex.Unlock()
time.Sleep(SyncFrequency * time.Second)
} }
} }
@ -251,15 +258,20 @@ func (node *Node) SupportSyncing() {
node.InitSyncingServer() node.InitSyncingServer()
node.StartSyncingServer() node.StartSyncingServer()
joinConsensus := false
// Check if the current node is explorer node. // Check if the current node is explorer node.
isExplorerNode := node.NodeConfig.Role() == nodeconfig.ExplorerNode switch node.NodeConfig.Role() {
case nodeconfig.Validator:
joinConsensus = true
}
// Send new block to unsync node if the current node is not explorer node. // Send new block to unsync node if the current node is not explorer node.
if !isExplorerNode { // TODO: leo this pushing logic has to be removed
if joinConsensus {
go node.SendNewBlockToUnsync() go node.SendNewBlockToUnsync()
} }
go node.DoSyncing(node.Blockchain(), node.Worker, !isExplorerNode) go node.DoSyncing(node.Blockchain(), node.Worker, joinConsensus)
} }
// InitSyncingServer starts downloader server. // InitSyncingServer starts downloader server.
@ -417,7 +429,7 @@ func (node *Node) CalculateResponse(request *downloader_pb.DownloaderRequest, in
return response, nil return response, nil
} else if len(node.peerRegistrationRecord) >= maxBroadcastNodes { } else if len(node.peerRegistrationRecord) >= maxBroadcastNodes {
response.Type = downloader_pb.DownloaderResponse_FAIL response.Type = downloader_pb.DownloaderResponse_FAIL
utils.GetLogInstance().Warn("[SYNC] maximum registration limit exceeds", "ip", ip, "port", port) utils.GetLogInstance().Debug("[SYNC] maximum registration limit exceeds", "ip", ip, "port", port)
return response, nil return response, nil
} else { } else {
response.Type = downloader_pb.DownloaderResponse_FAIL response.Type = downloader_pb.DownloaderResponse_FAIL

@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/harmony-one/harmony/hmy" "github.com/harmony-one/harmony/hmy"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/hmyapi" "github.com/harmony-one/harmony/internal/hmyapi"
"github.com/harmony-one/harmony/internal/hmyapi/filters" "github.com/harmony-one/harmony/internal/hmyapi/filters"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
@ -32,7 +33,7 @@ var (
httpEndpoint = "" httpEndpoint = ""
wsEndpoint = "" wsEndpoint = ""
httpModules = []string{"hmy", "net"} httpModules = []string{"hmy", "net", "explorer"}
httpVirtualHosts = []string{"*"} httpVirtualHosts = []string{"*"}
httpTimeouts = rpc.DefaultHTTPTimeouts httpTimeouts = rpc.DefaultHTTPTimeouts
httpOrigins = []string{"*"} httpOrigins = []string{"*"}
@ -46,7 +47,7 @@ var (
// StartRPC start RPC service // StartRPC start RPC service
func (node *Node) StartRPC(nodePort string) error { func (node *Node) StartRPC(nodePort string) error {
// Gather all the possible APIs to surface // Gather all the possible APIs to surface
harmony, _ = hmy.New(node, node.TxPool, new(event.TypeMux), node.Consensus.ShardID) harmony, _ = hmy.New(node, node.TxPool, node.CxPool, new(event.TypeMux), node.Consensus.ShardID)
apis := node.APIs() apis := node.APIs()
@ -56,12 +57,17 @@ func (node *Node) StartRPC(nodePort string) error {
port, _ := strconv.Atoi(nodePort) port, _ := strconv.Atoi(nodePort)
httpEndpoint = fmt.Sprintf(":%v", port+rpcHTTPPortOffset) ip := ""
if !nodeconfig.GetPublicRPC() {
ip = "127.0.0.1"
}
httpEndpoint = fmt.Sprintf("%v:%v", ip, port+rpcHTTPPortOffset)
if err := node.startHTTP(httpEndpoint, apis, httpModules, httpOrigins, httpVirtualHosts, httpTimeouts); err != nil { if err := node.startHTTP(httpEndpoint, apis, httpModules, httpOrigins, httpVirtualHosts, httpTimeouts); err != nil {
return err return err
} }
wsEndpoint = fmt.Sprintf(":%v", port+rpcWSPortOffset) wsEndpoint = fmt.Sprintf("%v:%v", ip, port+rpcWSPortOffset)
if err := node.startWS(wsEndpoint, apis, wsModules, wsOrigins, true); err != nil { if err := node.startWS(wsEndpoint, apis, wsModules, wsOrigins, true); err != nil {
node.stopHTTP() node.stopHTTP()
return err return err

@ -50,8 +50,6 @@ type Worker struct {
gasFloor uint64 gasFloor uint64
gasCeil uint64 gasCeil uint64
shardID uint32
} }
// Returns a tuple where the first value is the txs sender account address, // Returns a tuple where the first value is the txs sender account address,
@ -114,7 +112,7 @@ func (w *Worker) SelectTransactionsForNewBlock(newBlockNum uint64, txs types.Tra
unselected := types.Transactions{} unselected := types.Transactions{}
invalid := types.Transactions{} invalid := types.Transactions{}
for _, tx := range txs { for _, tx := range txs {
if tx.ShardID() != w.shardID { if tx.ShardID() != w.chain.ShardID() {
invalid = append(invalid, tx) invalid = append(invalid, tx)
continue continue
} }
@ -234,14 +232,8 @@ func (w *Worker) UpdateCurrent(coinbase common.Address) error {
parent := w.chain.CurrentBlock() parent := w.chain.CurrentBlock()
num := parent.Number() num := parent.Number()
timestamp := time.Now().Unix() timestamp := time.Now().Unix()
// New block's epoch is the same as parent's...
epoch := new(big.Int).Set(parent.Header().Epoch())
// TODO: Don't depend on sharding state for epoch change. epoch := w.GetNewEpoch()
if len(parent.Header().ShardState()) > 0 && parent.NumberU64() != 0 {
// ... except if parent has a resharding assignment it increases by 1.
epoch = epoch.Add(epoch, common.Big1)
}
header := w.factory.NewHeader(epoch).With(). header := w.factory.NewHeader(epoch).With().
ParentHash(parent.Hash()). ParentHash(parent.Hash()).
Number(num.Add(num, common.Big1)). Number(num.Add(num, common.Big1)).
@ -273,6 +265,19 @@ func (w *Worker) GetCurrentState() *state.DB {
return w.current.state return w.current.state
} }
// GetNewEpoch gets the current epoch.
func (w *Worker) GetNewEpoch() *big.Int {
parent := w.chain.CurrentBlock()
epoch := new(big.Int).Set(parent.Header().Epoch())
// TODO: Don't depend on sharding state for epoch change.
if len(parent.Header().ShardState()) > 0 && parent.NumberU64() != 0 {
// ... except if parent has a resharding assignment it increases by 1.
epoch = epoch.Add(epoch, common.Big1)
}
return epoch
}
// GetCurrentReceipts get the receipts generated starting from the last state. // GetCurrentReceipts get the receipts generated starting from the last state.
func (w *Worker) GetCurrentReceipts() []*types.Receipt { func (w *Worker) GetCurrentReceipts() []*types.Receipt {
return w.current.receipts return w.current.receipts
@ -294,7 +299,7 @@ func (w *Worker) ProposeShardStateWithoutBeaconSync() shard.State {
return nil return nil
} }
nextEpoch := new(big.Int).Add(w.current.header.Epoch(), common.Big1) nextEpoch := new(big.Int).Add(w.current.header.Epoch(), common.Big1)
return core.GetShardState(nextEpoch) return core.CalculateShardState(nextEpoch)
} }
// FinalizeNewBlock generate a new block for the next consensus round. // FinalizeNewBlock generate a new block for the next consensus round.
@ -346,7 +351,7 @@ func (w *Worker) FinalizeNewBlock(sig []byte, signers []byte, viewID uint64, coi
} }
// New create a new worker object. // New create a new worker object.
func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus_engine.Engine, shardID uint32) *Worker { func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus_engine.Engine) *Worker {
worker := &Worker{ worker := &Worker{
config: config, config: config,
factory: blockfactory.NewFactory(config), factory: blockfactory.NewFactory(config),
@ -355,19 +360,12 @@ func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus_en
} }
worker.gasFloor = 500000000000000000 worker.gasFloor = 500000000000000000
worker.gasCeil = 1000000000000000000 worker.gasCeil = 1000000000000000000
worker.shardID = shardID
parent := worker.chain.CurrentBlock() parent := worker.chain.CurrentBlock()
num := parent.Number() num := parent.Number()
timestamp := time.Now().Unix() timestamp := time.Now().Unix()
// New block's epoch is the same as parent's...
epoch := parent.Header().Epoch()
// TODO: Don't depend on sharding state for epoch change. epoch := worker.GetNewEpoch()
if len(parent.Header().ShardState()) > 0 && parent.NumberU64() != 0 {
// ... except if parent has a resharding assignment it increases by 1.
epoch = epoch.Add(epoch, common.Big1)
}
header := worker.factory.NewHeader(epoch).With(). header := worker.factory.NewHeader(epoch).With().
ParentHash(parent.Hash()). ParentHash(parent.Hash()).
Number(num.Add(num, common.Big1)). Number(num.Add(num, common.Big1)).

@ -44,7 +44,7 @@ func TestNewWorker(t *testing.T) {
chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil) chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil)
// Create a new worker // Create a new worker
worker := New(params.TestChainConfig, chain, chain2.Engine, 0) worker := New(params.TestChainConfig, chain, chain2.Engine)
if worker.GetCurrentState().GetBalance(crypto.PubkeyToAddress(testBankKey.PublicKey)).Cmp(testBankFunds) != 0 { if worker.GetCurrentState().GetBalance(crypto.PubkeyToAddress(testBankKey.PublicKey)).Cmp(testBankFunds) != 0 {
t.Error("Worker state is not setup correctly") t.Error("Worker state is not setup correctly")
@ -67,7 +67,7 @@ func TestCommitTransactions(t *testing.T) {
chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil) chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil)
// Create a new worker // Create a new worker
worker := New(params.TestChainConfig, chain, chain2.Engine, 0) worker := New(params.TestChainConfig, chain, chain2.Engine)
// Generate a test tx // Generate a test tx
baseNonce := worker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(testBankKey.PublicKey)) baseNonce := worker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(testBankKey.PublicKey))

@ -31,6 +31,6 @@ if [ -z "$NODE_ACCOUNT_ID" ]; then
exit 2 exit 2
fi fi
harmony -log_folder log -bootnodes $BN_MA -ip $PUB_IP -port $NODE_PORT -is_genesis -account_index $NODE_ACCOUNT_ID harmony -log_folder log -bootnodes $BN_MA -ip $PUB_IP -port $NODE_PORT -account_index $NODE_ACCOUNT_ID
# vim: ai ts=2 sw=2 et sts=2 ft=sh # vim: ai ts=2 sw=2 et sts=2 ft=sh

@ -12,13 +12,14 @@ SRC[wallet_stress_test]="cmd/client/wallet_stress_test/main.go cmd/client/wallet
BINDIR=bin BINDIR=bin
BUCKET=unique-bucket-bin BUCKET=unique-bucket-bin
PUBBUCKET=pub.harmony.one PUBBUCKET=pub.harmony.one
REL=s3 REL=
GOOS=linux GOOS=linux
GOARCH=amd64 GOARCH=amd64
FOLDER=/${WHOAMI:-$USER} FOLDER=/${WHOAMI:-$USER}
RACE= RACE=
VERBOSE= VERBOSE=
DEBUG=false DEBUG=false
NETWORK=main
unset -v progdir unset -v progdir
case "${0}" in case "${0}" in
@ -35,6 +36,9 @@ if [ "$(uname -s)" == "Darwin" ]; then
GOOS=darwin GOOS=darwin
LIB[libbls384_256.dylib]=${BLS_DIR}/lib/libbls384_256.dylib LIB[libbls384_256.dylib]=${BLS_DIR}/lib/libbls384_256.dylib
LIB[libmcl.dylib]=${MCL_DIR}/lib/libmcl.dylib LIB[libmcl.dylib]=${MCL_DIR}/lib/libmcl.dylib
LIB[libgmp.10.dylib]=/usr/local/opt/gmp/lib/libgmp.10.dylib
LIB[libgmpxx.4.dylib]=/usr/local/opt/gmp/lib/libgmpxx.4.dylib
LIB[libcrypto.1.0.0.dylib]=/usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib
else else
MD5=md5sum MD5=md5sum
LIB[libbls384_256.so]=${BLS_DIR}/lib/libbls384_256.so LIB[libbls384_256.so]=${BLS_DIR}/lib/libbls384_256.so
@ -62,6 +66,7 @@ ACTION:
build build binaries only (default action) build build binaries only (default action)
upload upload binaries to s3 upload upload binaries to s3
pubwallet upload wallet to public bucket (bucket: $PUBBUCKET) pubwallet upload wallet to public bucket (bucket: $PUBBUCKET)
release upload binaries to release bucket
harmony|txgen|bootnode|wallet harmony|txgen|bootnode|wallet
only build the specified binary only build the specified binary
@ -89,6 +94,8 @@ function build_only
BUILTBY=${USER}@ BUILTBY=${USER}@
local build=$1 local build=$1
set -e
for bin in "${!SRC[@]}"; do for bin in "${!SRC[@]}"; do
if [[ -z "$build" || "$bin" == "$build" ]]; then if [[ -z "$build" || "$bin" == "$build" ]]; then
rm -f $BINDIR/$bin rm -f $BINDIR/$bin
@ -133,6 +140,45 @@ function upload
[ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${BUCKET}$FOLDER/md5sum.txt --acl public-read [ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${BUCKET}$FOLDER/md5sum.txt --acl public-read
} }
function release
{
AWSCLI=aws
if [ -n "$PROFILE" ]; then
AWSCLI+=" --profile $PROFILE"
fi
OS=$(uname -s)
case "$OS" in
"Linux")
FOLDER=release/linux-x86_64/$REL ;;
"Darwin")
FOLDER=release/darwin-x86_64/$REL ;;
*)
echo "Unsupported OS: $OS"
return ;;
esac
for bin in "${!SRC[@]}"; do
if [ -e $BINDIR/$bin ]; then
$AWSCLI s3 cp $BINDIR/$bin s3://${PUBBUCKET}/$FOLDER/$bin --acl public-read
else
echo "!! MISSGING $bin !!"
fi
done
for lib in "${!LIB[@]}"; do
if [ -e ${LIB[$lib]} ]; then
$AWSCLI s3 cp ${LIB[$lib]} s3://${PUBBUCKET}/$FOLDER/$lib --acl public-read
else
echo "!! MISSING ${LIB[$lib]} !!"
fi
done
[ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${PUBBUCKET}/$FOLDER/md5sum.txt --acl public-read
}
function upload_wallet function upload_wallet
{ {
AWSCLI=aws AWSCLI=aws
@ -168,7 +214,7 @@ function upload_wallet
} }
################################ MAIN FUNCTION ############################## ################################ MAIN FUNCTION ##############################
while getopts "hp:a:o:b:f:rv" option; do while getopts "hp:a:o:b:f:rvN:" option; do
case $option in case $option in
h) usage ;; h) usage ;;
p) PROFILE=$OPTARG ;; p) PROFILE=$OPTARG ;;
@ -179,6 +225,7 @@ while getopts "hp:a:o:b:f:rv" option; do
r) RACE=-race ;; r) RACE=-race ;;
v) VERBOSE='-v -x' ;; v) VERBOSE='-v -x' ;;
d) DEBUG=true ;; d) DEBUG=true ;;
N) NETWORK=$OPTARG ;;
esac esac
done done
@ -188,9 +235,26 @@ shift $(($OPTIND-1))
ACTION=${1:-build} ACTION=${1:-build}
case "${NETWORK}" in
main)
REL=mainnet
;;
beta)
REL=testnet
;;
pangaea)
REL=pangaea
;;
*)
echo "${NETWORK}: invalid network"
exit
;;
esac
case "$ACTION" in case "$ACTION" in
"build") build_only ;; "build") build_only ;;
"upload") upload ;; "upload") upload ;;
"release") release ;;
"pubwallet") upload_wallet ;; "pubwallet") upload_wallet ;;
"harmony"|"wallet"|"txgen"|"bootnode") build_only $ACTION ;; "harmony"|"wallet"|"txgen"|"bootnode") build_only $ACTION ;;
*) usage ;; *) usage ;;

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
unset -v progname unset -v progname
progname="${0##*/}" progname="${0##*/}"
@ -101,10 +101,15 @@ usage: ${progname} [-1ch] [-k KEYFILE]
-s run setup env only (must run as root) -s run setup env only (must run as root)
-S run the ${progname} as non-root user (default: run as root) -S run the ${progname} as non-root user (default: run as root)
-p passfile use the given BLS passphrase file -p passfile use the given BLS passphrase file
-d just download the Harmony binaries (default: off)
-D do not download Harmony binaries (default: download when start) -D do not download Harmony binaries (default: download when start)
-m collect and upload node metrics to harmony prometheus + grafana -m collect and upload node metrics to harmony prometheus + grafana
-N network join the given network (main, beta, pangaea; default: main) -N network join the given network (main, beta, pangaea; default: main)
-t equivalent to -N pangaea (deprecated) -t equivalent to -N pangaea (deprecated)
-T nodetype specify the node type (validator, explorer; default: validator)
-i shardid specify the shard id (valid only with explorer node; default: 1)
-b download harmony_db files from shard specified by -i <shardid> (default: off)
-a dbfile specify the db file to download (default:off)
example: example:
@ -119,22 +124,31 @@ usage() {
exit 64 # EX_USAGE exit 64 # EX_USAGE
} }
unset start_clean loop run_as_root blspass do_not_download metrics network # =======
BUCKET=pub.harmony.one
OS=$(uname -s)
unset start_clean loop run_as_root blspass do_not_download download_only metrics network node_type shard_id download_harmony_db db_file_to_dl
start_clean=false start_clean=false
loop=true loop=true
run_as_root=true run_as_root=true
do_not_download=false do_not_download=false
download_only=false
metrics=false metrics=false
network=main network=main
node_type=validator
shard_id=1
download_harmony_db=false
${BLSKEYFILE=} ${BLSKEYFILE=}
unset OPTIND OPTARG opt unset OPTIND OPTARG opt
OPTIND=1 OPTIND=1
while getopts :1chk:sSp:DmN:t opt while getopts :1chk:sSp:dDmN:tT:i:ba: opt
do do
case "${opt}" in case "${opt}" in
'?') usage "unrecognized option -${OPTARG}";; '?') usage "unrecognized option -${OPTARG}";;
':') usage "missing argument for -${OPTARG}";; ':') usage "missing argument for -${OPTARG}";;
b) download_harmony_db=true;;
c) start_clean=true;; c) start_clean=true;;
1) loop=false;; 1) loop=false;;
h) print_usage; exit 0;; h) print_usage; exit 0;;
@ -142,10 +156,14 @@ do
s) setup_env; exit 0;; s) setup_env; exit 0;;
S) run_as_root=false ;; S) run_as_root=false ;;
p) blspass="${OPTARG}";; p) blspass="${OPTARG}";;
d) download_only=true;;
D) do_not_download=true;; D) do_not_download=true;;
m) metrics=true;; m) metrics=true;;
N) network="${OPTARG}";; N) network="${OPTARG}";;
t) network=pangaea;; t) network=pangaea;;
T) node_type="${OPTARG}";;
i) shard_id="${OPTARG}";;
a) db_file_to_dl="${OPTARG}";;
*) err 70 "unhandled option -${OPTARG}";; # EX_SOFTWARE *) err 70 "unhandled option -${OPTARG}";; # EX_SOFTWARE
esac esac
done done
@ -153,6 +171,12 @@ shift $((${OPTIND} - 1))
unset -v bootnodes REL network_type dns_zone unset -v bootnodes REL network_type dns_zone
case "${node_type}" in
validator|explorer) ;;
*)
usage ;;
esac
case "${network}" in case "${network}" in
main) main)
bootnodes=( bootnodes=(
@ -177,12 +201,12 @@ beta)
;; ;;
pangaea) pangaea)
bootnodes=( bootnodes=(
/ip4/54.86.126.90/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv /ip4/54.86.126.90/tcp/9889/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
/ip4/52.40.84.2/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 /ip4/52.40.84.2/tcp/9889/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
) )
REL=master REL=pangaea
network_type=pangaea network_type=pangaea
dns_zone=n.hmny.io dns_zone=p.hmny.io
;; ;;
*) *)
err 64 "${network}: invalid network" err 64 "${network}: invalid network"
@ -195,43 +219,6 @@ case $# in
;; ;;
esac esac
if ${run_as_root}; then
check_root
fi
case "${BLSKEYFILE}" in
"")
unset -v f
for f in \
~/*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
~/????????????????????????????????????????????????????????????????????????????????????????????????.key \
*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
????????????????????????????????????????????????????????????????????????????????????????????????.key
do
[ -f "${f}" ] || continue
case "${BLSKEYFILE}" in
"")
BLSKEYFILE="${f}"
;;
*)
[ "${f}" -ef "${BLSKEYFILE}" ] || \
err 69 "multiple key files found (${f}, ${BLSKEYFILE}); please use -k to specify"
;;
esac
done
case "${BLSKEYFILE}" in
"") err 69 "could not autodetect BLS key file; please use -k to specify";;
esac
msg "autodetected BLS key file: ${BLSKEYFILE}"
;;
*)
msg "using manually specified BLS key file: ${BLSKEYFILE}"
;;
esac
BUCKET=pub.harmony.one
OS=$(uname -s)
if [ "$OS" == "Darwin" ]; then if [ "$OS" == "Darwin" ]; then
FOLDER=release/darwin-x86_64/$REL/ FOLDER=release/darwin-x86_64/$REL/
BIN=( harmony libbls384_256.dylib libcrypto.1.0.0.dylib libgmp.10.dylib libgmpxx.4.dylib libmcl.dylib md5sum.txt ) BIN=( harmony libbls384_256.dylib libcrypto.1.0.0.dylib libgmp.10.dylib libgmpxx.4.dylib libmcl.dylib md5sum.txt )
@ -241,21 +228,6 @@ if [ "$OS" == "Linux" ]; then
BIN=( harmony libbls384_256.so libcrypto.so.10 libgmp.so.10 libgmpxx.so.4 libmcl.so md5sum.txt ) BIN=( harmony libbls384_256.so libcrypto.so.10 libgmp.so.10 libgmpxx.so.4 libmcl.so md5sum.txt )
fi fi
any_new_binaries() {
local outdir
${do_not_download} && return 0
outdir="${1:-.}"
mkdir -p "${outdir}"
curl -sSf http://${BUCKET}.s3.amazonaws.com/${FOLDER}md5sum.txt -o "${outdir}/md5sum.txt.new" || return $?
if diff $outdir/md5sum.txt.new md5sum.txt
then
rm "${outdir}/md5sum.txt.new"
else
mv "${outdir}/md5sum.txt.new" "${outdir}/md5sum.txt"
return 1
fi
}
extract_checksum() { extract_checksum() {
awk -v basename="${1}" ' awk -v basename="${1}" '
{ {
@ -281,7 +253,7 @@ verify_checksum() {
checksum_file="${3}" checksum_file="${3}"
[ -f "${dir}/${checksum_file}" ] || return 0 [ -f "${dir}/${checksum_file}" ] || return 0
checksum_for_file="${dir}/${checksum_file}::${file}" checksum_for_file="${dir}/${checksum_file}::${file}"
extract_checksum "${file}" < "${dir}/${checksum_file}" > "${dir}/${checksum_for_file}" extract_checksum "${file}" < "${dir}/${checksum_file}" > "${checksum_for_file}"
[ -s "${dir}/${checksum_for_file}" ] || return 0 [ -s "${dir}/${checksum_for_file}" ] || return 0
if ! (cd "${dir}" && exec md5sum -c --status "${checksum_for_file}") if ! (cd "${dir}" && exec md5sum -c --status "${checksum_for_file}")
then then
@ -291,7 +263,6 @@ verify_checksum() {
return 0 return 0
} }
download_binaries() { download_binaries() {
local outdir local outdir
${do_not_download} && return 0 ${do_not_download} && return 0
@ -306,6 +277,156 @@ download_binaries() {
(cd "${outdir}" && exec openssl sha256 "${BIN[@]}") > "${outdir}/harmony-checksums.txt" (cd "${outdir}" && exec openssl sha256 "${BIN[@]}") > "${outdir}/harmony-checksums.txt"
} }
check_free_disk() {
local dir
dir="${1:-.}"
local free_disk=$(df -BG $dir | tail -n 1 | awk ' { print $4 } ' | tr -d G)
# need at least 50G free disk space
local need_disk=50
if [ $free_disk -gt $need_disk ]; then
return 0
else
return 1
fi
}
_curl_check_exist() {
local url=$1
local statuscode=$(curl -I --silent --output /dev/null --write-out "%{http_code}" $url)
if [ $statuscode -ne 200 ]; then
return 1
else
return 0
fi
}
_curl_download() {
local url=$1
local outdir=$2
local filename=$3
mkdir -p "${outdir}"
if _curl_check_exist $url; then
curl --progress-bar -Sf $url -o "${outdir}/$filename" || return $?
return 0
else
msg "failed to find/download $url"
return 1
fi
}
download_harmony_db_file() {
local shard_id
shard_id="${1}"
local file_to_dl="${2}"
local outdir=db
if ! check_free_disk; then
err 70 "do not have enough free disk space to download db tarball"
fi
url="http://${BUCKET}.s3.amazonaws.com/${FOLDER}db/md5sum.txt"
rm -f "${outdir}/md5sum.txt"
if ! _curl_download $url "${outdir}" md5sum.txt; then
err 70 "cannot download md5sum.txt"
fi
if [ -n "${file_to_dl}" ]; then
if grep -q "${file_to_dl}" "${outdir}/md5sum.txt"; then
url="http://${BUCKET}.s3.amazonaws.com/${FOLDER}db/${file_to_dl}"
if _curl_download $url "${outdir}" ${file_to_dl}; then
verify_checksum "${outdir}" "${file_to_dl}" md5sum.txt || return $?
msg "downlaoded ${file_to_dl}, extracting ..."
tar -C "${outdir}" -xvf "${outdir}/${file_to_dl}"
else
msg "can't download ${file_to_dl}"
fi
fi
return
fi
files=$(awk '{ print $2 }' ${outdir}/md5sum.txt)
echo "[available harmony db files for shard ${shard_id}]"
grep -oE harmony_db_${shard_id}-.*.tar "${outdir}/md5sum.txt"
echo
for file in $files; do
if [[ $file =~ "harmony_db_${shard_id}" ]]; then
echo -n "Do you want to download ${file} (choose one only) [y/n]?"
read yesno
if [[ "$yesno" = "y" || "$yesno" = "Y" ]]; then
url="http://${BUCKET}.s3.amazonaws.com/${FOLDER}db/$file"
if _curl_download $url "${outdir}" $file; then
verify_checksum "${outdir}" "${file}" md5sum.txt || return $?
msg "downlaoded $file, extracting ..."
tar -C "${outdir}" -xvf "${outdir}/${file}"
else
msg "can't download $file"
fi
break
fi
fi
done
}
if ${download_only}; then
download_binaries || err 69 "download node software failed"
exit 0
fi
if ${download_harmony_db}; then
download_harmony_db_file "${shard_id}" "${db_file_to_dl}" || err 70 "download harmony_db file failed"
exit 0
fi
if ${run_as_root}; then
check_root
fi
case "${BLSKEYFILE}" in
"")
unset -v f
for f in \
~/*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
~/????????????????????????????????????????????????????????????????????????????????????????????????.key \
*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
????????????????????????????????????????????????????????????????????????????????????????????????.key
do
[ -f "${f}" ] || continue
case "${BLSKEYFILE}" in
"")
BLSKEYFILE="${f}"
;;
*)
[ "${f}" -ef "${BLSKEYFILE}" ] || \
err 69 "multiple key files found (${f}, ${BLSKEYFILE}); please use -k to specify"
;;
esac
done
case "${BLSKEYFILE}" in
"") err 69 "could not autodetect BLS key file; please use -k to specify";;
esac
msg "autodetected BLS key file: ${BLSKEYFILE}"
;;
*)
msg "using manually specified BLS key file: ${BLSKEYFILE}"
;;
esac
any_new_binaries() {
local outdir
${do_not_download} && return 0
outdir="${1:-.}"
mkdir -p "${outdir}"
curl -sSf http://${BUCKET}.s3.amazonaws.com/${FOLDER}md5sum.txt -o "${outdir}/md5sum.txt.new" || return $?
if diff $outdir/md5sum.txt.new md5sum.txt
then
rm "${outdir}/md5sum.txt.new"
else
mv "${outdir}/md5sum.txt.new" "${outdir}/md5sum.txt"
return 1
fi
}
if any_new_binaries if any_new_binaries
then then
msg "binaries did not change" msg "binaries did not change"
@ -471,11 +592,19 @@ do
-bootnodes "${BN_MA}" -bootnodes "${BN_MA}"
-ip "${PUB_IP}" -ip "${PUB_IP}"
-port "${NODE_PORT}" -port "${NODE_PORT}"
-is_genesis
-blskey_file "${BLSKEYFILE}" -blskey_file "${BLSKEYFILE}"
-network_type="${network_type}" -network_type="${network_type}"
-dns_zone="${dns_zone}" -dns_zone="${dns_zone}"
) )
# backward compatible with older harmony node software
case "${node_type}" in
explorer)
args+=(
-node_type="${node_type}"
-shard_id="${shard_id}"
)
;;
esac
case "${metrics}" in case "${metrics}" in
true) true)
args+=( args+=(

@ -61,7 +61,7 @@ set_download () {
REL=testnet REL=testnet
;; ;;
pangaea) pangaea)
REL=master REL=pangaea
;; ;;
*) *)
err 64 "${network}: invalid network" err 64 "${network}: invalid network"

@ -107,7 +107,7 @@ func fundFaucetContract(chain *core.BlockChain) {
fmt.Println("--------- Funding addresses for Faucet Contract Call ---------") fmt.Println("--------- Funding addresses for Faucet Contract Call ---------")
fmt.Println() fmt.Println()
contractworker = pkgworker.New(params.TestChainConfig, chain, chain.Engine(), 0) contractworker = pkgworker.New(params.TestChainConfig, chain, chain.Engine())
nonce = contractworker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(FaucetPriKey.PublicKey)) nonce = contractworker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(FaucetPriKey.PublicKey))
dataEnc = common.FromHex(FaucetContractBinary) dataEnc = common.FromHex(FaucetContractBinary)
ftx, _ := types.SignTx(types.NewContractCreation(nonce, 0, big.NewInt(7000000000000000000), params.TxGasContractCreation*10, nil, dataEnc), types.HomesteadSigner{}, FaucetPriKey) ftx, _ := types.SignTx(types.NewContractCreation(nonce, 0, big.NewInt(7000000000000000000), params.TxGasContractCreation*10, nil, dataEnc), types.HomesteadSigner{}, FaucetPriKey)

@ -20,7 +20,7 @@
127.0.0.1 9103 validator one1p7ht2d4kl8ve7a8jxw746yfnx4wnfxtp8jqxwe ca86e551ee42adaaa6477322d7db869d3e203c00d7b86c82ebee629ad79cb6d57b8f3db28336778ec2180e56a8e07296 127.0.0.1 9103 validator one1p7ht2d4kl8ve7a8jxw746yfnx4wnfxtp8jqxwe ca86e551ee42adaaa6477322d7db869d3e203c00d7b86c82ebee629ad79cb6d57b8f3db28336778ec2180e56a8e07296
127.0.0.1 9104 validator one1z05g55zamqzfw9qs432n33gycdmyvs38xjemyl 95117937cd8c09acd2dfae847d74041a67834ea88662a7cbed1e170350bc329e53db151e5a0ef3e712e35287ae954818 127.0.0.1 9104 validator one1z05g55zamqzfw9qs432n33gycdmyvs38xjemyl 95117937cd8c09acd2dfae847d74041a67834ea88662a7cbed1e170350bc329e53db151e5a0ef3e712e35287ae954818
127.0.0.1 9105 validator one1ljznytjyn269azvszjlcqvpcj6hjm822yrcp2e 68ae289d73332872ec8d04ac256ca0f5453c88ad392730c5741b6055bc3ec3d086ab03637713a29f459177aaa8340615 127.0.0.1 9105 validator one1ljznytjyn269azvszjlcqvpcj6hjm822yrcp2e 68ae289d73332872ec8d04ac256ca0f5453c88ad392730c5741b6055bc3ec3d086ab03637713a29f459177aaa8340615
127.0.0.1 9107 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg 1c1fb28d2de96e82c3d9b4917eb54412517e2763112a3164862a6ed627ac62e87ce274bb4ea36e6a61fb66a15c263a06 127.0.0.1 9107 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100
127.0.0.1 9108 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 b179c4fdc0bee7bd0b6698b792837dd13404d3f985b59d4a9b1cd0641a76651e271518b61abbb6fbebd4acf963358604 127.0.0.1 9108 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c
127.0.0.1 9109 validator one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep 576d3c48294e00d6be4a22b07b66a870ddee03052fe48a5abbd180222e5d5a1f8946a78d55b025de21635fd743bbad90 127.0.0.1 9109 validator one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep 576d3c48294e00d6be4a22b07b66a870ddee03052fe48a5abbd180222e5d5a1f8946a78d55b025de21635fd743bbad90
127.0.0.1 9110 validator one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc 16513c487a6bb76f37219f3c2927a4f281f9dd3fd6ed2e3a64e500de6545cf391dd973cc228d24f9bd01efe94912e714 127.0.0.1 9110 validator one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc 16513c487a6bb76f37219f3c2927a4f281f9dd3fd6ed2e3a64e500de6545cf391dd973cc228d24f9bd01efe94912e714

@ -149,25 +149,21 @@ sleep 2
i=0 i=0
while IFS='' read -r line || [[ -n "$line" ]]; do while IFS='' read -r line || [[ -n "$line" ]]; do
IFS=' ' read ip port mode account blspub <<< $line IFS=' ' read ip port mode account blspub <<< $line
if [ "${mode}" == "explorer" ] args=("${base_args[@]}" -ip "${ip}" -port "${port}" -key "/tmp/${ip}-${port}.key" -db_dir "db-${ip}-${port}")
then if [[ -z "$ip" || -z "$port" ]]; then
args=("${base_args[@]}" -ip "${ip}" -port "${port}" -key "/tmp/${ip}-${port}.key" -db_dir "db-${ip}-${port}") echo "skip empty node"
else continue
if [ ! -e .hmy/${blspub}.key ]; then fi
echo "missing blskey .hmy/${blspub}.key"
echo "skipping this node" if [ ! -e .hmy/${blspub}.key ]; then
continue args=("${args[@]}" -blskey_file "BLSKEY")
fi else
args=("${args[@]}" -blskey_file ".hmy/${blspub}.key")
args=("${base_args[@]}" -ip "${ip}" -port "${port}" -key "/tmp/${ip}-${port}.key" -db_dir "db-${ip}-${port}" -blskey_file ".hmy/${blspub}.key")
fi fi
case "${mode}" in
leader*|validator*) args=("${args[@]}" -is_genesis);;
esac
case "${mode}" in leader*) args=("${args[@]}" -is_leader);; esac case "${mode}" in leader*) args=("${args[@]}" -is_leader);; esac
case "${mode}" in *archival|archival) args=("${args[@]}" -is_archival);; esac case "${mode}" in *archival|archival) args=("${args[@]}" -is_archival);; esac
case "${mode}" in explorer*) args=("${args[@]}" -is_genesis=false -is_explorer=true -shard_id=0);; esac case "${mode}" in explorer*) args=("${args[@]}" -node_type=explorer -shard_id=0);; esac
case "${mode}" in case "${mode}" in
client) ;; client) ;;
*) $DRYRUN "${ROOT}/bin/harmony" "${args[@]}" "${extra_args[@]}" 2>&1 | tee -a "${LOG_FILE}" &;; *) $DRYRUN "${ROOT}/bin/harmony" "${args[@]}" "${extra_args[@]}" 2>&1 | tee -a "${LOG_FILE}" &;;

Loading…
Cancel
Save