# This is a combination of 16 commits.

# This is the 1st commit message:

Fix travis

# This is the commit message #2:

[cleanup] remove is_genesis flag

Signed-off-by: Leo Chen <leo@harmony.one>

# This is the commit message #3:

[nodetype] add nodetype to replace is_explorer

Signed-off-by: Leo Chen <leo@harmony.one>

# This is the commit message #4:

fix beacon sync channel blocking issue

# This is the commit message #5:

use lastMileMux to protect lastMileBlock queue to avoid potential blocking; use sleep instead of ticker

# This is the commit message #6:

Fix the beacon committee check

# This is the commit message #7:

[release] add release action to go_executable_build.sh

Signed-off-by: Leo Chen <leo@harmony.one>

# This is the commit message #8:

[node.sh] add -d option

-d              download only

Signed-off-by: Leo Chen <leo@harmony.one>

# This is the commit message #9:

[node.sh] add -T node_type option

-T node_type       support node type (validator/explorer)

Signed-off-by: Leo Chen <leo@harmony.one>

# This is the commit message #10:

[node.sh] backward compatible with older harmony binary

Signed-off-by: Leo Chen <leo@harmony.one>

# This is the commit message #11:

[node.sh] support -i shard_id option

-i shard_id             specify shard_id, this is applicable only to
explorer node

Signed-off-by: Leo Chen <leo@harmony.one>

# This is the commit message #12:

Revisited api

# This is the commit message #13:

Fix rpc integration

# This is the commit message #14:

address some minor issues in comments and code

# This is the commit message #15:

addressed comments on others' buckets

# This is the commit message #16:

Add Global Access to OS Temp Directory Variable and Move DHT Files Into Temp Directory
pull/1587/head
flicker-harmony 5 years ago
parent 8982926477
commit 49b94e7bbb
  1. 240
      api/service/explorer/service.go
  2. 3
      api/service/networkinfo/service.go
  3. 10
      api/service/syncing/syncing.go
  4. 35
      cmd/harmony/main.go
  5. 1
      hmy/api_backend.go
  6. 6
      internal/configs/node/config.go
  7. 5
      node/node.go
  8. 2
      node/node_cross_shard.go
  9. 5
      node/node_handler.go
  10. 145
      node/node_syncing.go
  11. 9
      node/rpc.go
  12. 2
      scripts/docker/run
  13. 66
      scripts/go_executable_build.sh
  14. 138
      scripts/node.sh
  15. 4
      test/configs/local-resharding.txt
  16. 26
      test/deploy.sh

@ -50,7 +50,7 @@ type Service struct {
Port string
GetNodeIDs func() []libp2p_peer.ID
ShardID uint32
storage *Storage
Storage *Storage
server *http.Server
messageChan chan *msg_pb.Message
GetAccountBalance func(common.Address) (*big.Int, error)
@ -67,6 +67,16 @@ func New(selfPeer *p2p.Peer, shardID uint32, GetNodeIDs func() []libp2p_peer.ID,
}
}
// ServiceAPI is rpc api.
type ServiceAPI struct {
Service *Service
}
// NewServiceAPI returns explorer service api.
func NewServiceAPI(explorerService *Service) *ServiceAPI {
return &ServiceAPI{Service: explorerService}
}
// StartService starts explorer service.
func (s *Service) StartService() {
utils.Logger().Info().Msg("Starting explorer service.")
@ -95,7 +105,7 @@ func GetExplorerPort(nodePort string) string {
// Init is to initialize for ExplorerService.
func (s *Service) Init(remove bool) {
s.storage = GetStorageInstance(s.IP, s.Port, remove)
s.Storage = GetStorageInstance(s.IP, s.Port, remove)
}
// Run is to run serving explorer.
@ -150,7 +160,7 @@ func (s *Service) ReadBlocksFromDB(from, to int) []*types.Block {
continue
}
key := GetBlockKey(i)
data, err := storage.db.Get([]byte(key))
data, err := s.Storage.db.Get([]byte(key))
if err != nil {
blocks = append(blocks, nil)
continue
@ -172,6 +182,11 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
to := r.FormValue("to")
pageParam := r.FormValue("page")
offsetParam := r.FormValue("offset")
withSignersParam := r.FormValue("with_signers")
withSigners := false
if withSignersParam == "true" {
withSigners = true
}
data := &Data{
Blocks: []*Block{},
}
@ -186,7 +201,7 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
return
}
db := s.storage.GetDB()
db := s.Storage.GetDB()
fromInt, err := strconv.Atoi(from)
if err != nil {
utils.Logger().Warn().Err(err).Msg("invalid from parameter")
@ -236,19 +251,21 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
accountBlocks := s.ReadBlocksFromDB(fromInt, toInt)
curEpoch := int64(-1)
committee := &shard.Committee{}
if withSigners {
if bytes, err := db.Get([]byte(GetCommitteeKey(uint32(s.ShardID), 0))); err == nil {
if err = rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee for new epoch")
}
}
}
for id, accountBlock := range accountBlocks {
if id == 0 || id == len(accountBlocks)-1 || accountBlock == nil {
continue
}
block := NewBlock(accountBlock, id+fromInt-1)
if int64(block.Epoch) > curEpoch {
if bytes, err := db.Get([]byte(GetCommitteeKey(uint32(s.ShardID), block.Epoch))); err == nil {
committee = &shard.Committee{}
if err = rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee for new epoch")
}
} else {
state, err := accountBlock.Header().GetShardState()
if withSigners && int64(block.Epoch) > curEpoch {
if accountBlocks[id-1] != nil {
state, err := accountBlocks[id-1].Header().GetShardState()
if err == nil {
for _, shardCommittee := range state {
if shardCommittee.ShardID == accountBlock.ShardID() {
@ -256,28 +273,32 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
break
}
}
} else {
utils.Logger().Warn().Err(err).Msg("error parsing shard state")
}
}
curEpoch = int64(block.Epoch)
}
pubkeys := make([]*bls.PublicKey, len(committee.NodeList))
for i, validator := range committee.NodeList {
pubkeys[i] = new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(pubkeys[i])
}
mask, err := bls2.NewMask(pubkeys, nil)
if err == nil && accountBlocks[id+1] != nil {
err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap())
if err == nil {
for _, validator := range committee.NodeList {
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)
if err != nil {
continue
}
blsPublicKey := new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(blsPublicKey)
if ok, err := mask.KeyEnabled(blsPublicKey); err == nil && ok {
block.Signers = append(block.Signers, oneAddress)
if withSigners {
pubkeys := make([]*bls.PublicKey, len(committee.NodeList))
for i, validator := range committee.NodeList {
pubkeys[i] = new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(pubkeys[i])
}
mask, err := bls2.NewMask(pubkeys, nil)
if err == nil && accountBlocks[id+1] != nil {
err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap())
if err == nil {
for _, validator := range committee.NodeList {
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)
if err != nil {
continue
}
blsPublicKey := new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(blsPublicKey)
if ok, err := mask.KeyEnabled(blsPublicKey); err == nil && ok {
block.Signers = append(block.Signers, oneAddress)
}
}
}
}
@ -315,46 +336,48 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
}
data.Blocks = append(data.Blocks, block)
}
paginatedBlocks := make([]*Block, 0)
for i := 0; i < offset && i+offset*page < len(data.Blocks); i++ {
paginatedBlocks = append(paginatedBlocks, data.Blocks[i+offset*page])
if offset*page+offset > len(data.Blocks) {
data.Blocks = data.Blocks[offset*page:]
} else {
data.Blocks = data.Blocks[offset*page : offset*page+offset]
}
data.Blocks = paginatedBlocks
}
// GetExplorerBlocksRPC serves end-point /blocks
func (s *Service) GetExplorerBlocksRPC(from, to, page, offset int) []*Block {
// GetExplorerBlocks rpc end-point.
func (s *ServiceAPI) GetExplorerBlocks(ctx context.Context, from, to, page, offset int, withSigners bool) ([]*Block, error) {
if offset == 0 {
offset = paginationOffset
}
db := s.storage.GetDB()
db := s.Service.Storage.GetDB()
if to == 0 {
bytes, err := db.Get([]byte(BlockHeightKey))
if err == nil {
to, err = strconv.Atoi(string(bytes))
if err != nil {
utils.Logger().Info().Msg("failed to fetch block height")
return nil, err
}
}
}
blocks := make([]*Block, 0)
accountBlocks := s.ReadBlocksFromDB(from, to)
accountBlocks := s.Service.ReadBlocksFromDB(from, to)
curEpoch := int64(-1)
committee := &shard.Committee{}
if withSigners {
if bytes, err := db.Get([]byte(GetCommitteeKey(uint32(s.Service.ShardID), 0))); err == nil {
if err = rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee for new epoch")
}
}
}
for id, accountBlock := range accountBlocks {
if id == 0 || id == len(accountBlocks)-1 || accountBlock == nil {
continue
}
block := NewBlock(accountBlock, id+from-1)
if int64(block.Epoch) > curEpoch {
if bytes, err := db.Get([]byte(GetCommitteeKey(uint32(s.ShardID), block.Epoch))); err == nil {
committee = &shard.Committee{}
if err = rlp.DecodeBytes(bytes, committee); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee for new epoch")
}
} else {
state, err := accountBlock.Header().GetShardState()
if withSigners && int64(block.Epoch) > curEpoch {
if accountBlocks[id-1] != nil {
state, err := accountBlocks[id-1].Header().GetShardState()
if err == nil {
for _, shardCommittee := range state {
if shardCommittee.ShardID == accountBlock.ShardID() {
@ -366,24 +389,26 @@ func (s *Service) GetExplorerBlocksRPC(from, to, page, offset int) []*Block {
}
curEpoch = int64(block.Epoch)
}
pubkeys := make([]*bls.PublicKey, len(committee.NodeList))
for i, validator := range committee.NodeList {
pubkeys[i] = new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(pubkeys[i])
}
mask, err := bls2.NewMask(pubkeys, nil)
if err == nil && accountBlocks[id+1] != nil {
err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap())
if err == nil {
for _, validator := range committee.NodeList {
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)
if err != nil {
continue
}
blsPublicKey := new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(blsPublicKey)
if ok, err := mask.KeyEnabled(blsPublicKey); err == nil && ok {
block.Signers = append(block.Signers, oneAddress)
if withSigners {
pubkeys := make([]*bls.PublicKey, len(committee.NodeList))
for i, validator := range committee.NodeList {
pubkeys[i] = new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(pubkeys[i])
}
mask, err := bls2.NewMask(pubkeys, nil)
if err == nil && accountBlocks[id+1] != nil {
err = mask.SetMask(accountBlocks[id+1].Header().LastCommitBitmap())
if err == nil {
for _, validator := range committee.NodeList {
oneAddress, err := common2.AddressToBech32(validator.EcdsaAddress)
if err != nil {
continue
}
blsPublicKey := new(bls.PublicKey)
validator.BlsPublicKey.ToLibBLSPublicKey(blsPublicKey)
if ok, err := mask.KeyEnabled(blsPublicKey); err == nil && ok {
block.Signers = append(block.Signers, oneAddress)
}
}
}
}
@ -421,12 +446,12 @@ func (s *Service) GetExplorerBlocksRPC(from, to, page, offset int) []*Block {
}
blocks = append(blocks, block)
}
paginatedBlocks := make([]*Block, 0)
for i := 0; i < offset && i+offset*page < len(blocks); i++ {
paginatedBlocks = append(paginatedBlocks, blocks[i+offset*page])
if offset*page+offset > len(blocks) {
blocks = blocks[offset*page:]
} else {
blocks = blocks[offset*page : offset*page+offset]
}
return paginatedBlocks
return blocks, nil
}
// GetExplorerTransaction servers /tx end-point.
@ -445,7 +470,7 @@ func (s *Service) GetExplorerTransaction(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusBadRequest)
return
}
db := s.storage.GetDB()
db := s.Storage.GetDB()
bytes, err := db.Get([]byte(GetTXKey(id)))
if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read TX")
@ -461,13 +486,13 @@ func (s *Service) GetExplorerTransaction(w http.ResponseWriter, r *http.Request)
data.TX = *tx
}
// GetExplorerTransactionRPC servers /tx end-point.
func (s *Service) GetExplorerTransactionRPC(id string) (*Transaction, error) {
// GetExplorerTransaction rpc end-point.
func (s *ServiceAPI) GetExplorerTransaction(ctx context.Context, id string) (*Transaction, error) {
if id == "" {
utils.Logger().Warn().Msg("invalid id parameter")
return nil, nil
}
db := s.storage.GetDB()
db := s.Service.Storage.GetDB()
bytes, err := db.Get([]byte(GetTXKey(id)))
if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read TX")
@ -511,7 +536,7 @@ func (s *Service) GetExplorerCommittee(w http.ResponseWriter, r *http.Request) {
return
}
// fetch current epoch if epoch is 0
db := s.storage.GetDB()
db := s.Storage.GetDB()
if epoch == 0 {
bytes, err := db.Get([]byte(BlockHeightKey))
blockHeight, err := strconv.Atoi(string(bytes))
@ -561,14 +586,14 @@ func (s *Service) GetExplorerCommittee(w http.ResponseWriter, r *http.Request) {
}
}
// GetExplorerCommitteeRPC servers /comittee end-point.
func (s *Service) GetExplorerCommitteeRPC(shardID uint32, epoch uint64) (*Committee, error) {
if s.ShardID != uint32(shardID) {
// GetExplorerCommittee rpc end-point.
func (s *ServiceAPI) GetExplorerCommittee(ctx context.Context, shardID uint32, epoch uint64) (*Committee, error) {
if s.Service.ShardID != uint32(shardID) {
utils.Logger().Warn().Msg("incorrect shard id")
return nil, nil
}
// fetch current epoch if epoch is 0
db := s.storage.GetDB()
db := s.Service.Storage.GetDB()
if epoch == 0 {
bytes, err := db.Get([]byte(BlockHeightKey))
blockHeight, err := strconv.Atoi(string(bytes))
@ -598,7 +623,7 @@ func (s *Service) GetExplorerCommitteeRPC(shardID uint32, epoch uint64) (*Commit
validators := &Committee{}
for _, validator := range committee.NodeList {
validatorBalance := big.NewInt(0)
validatorBalance, err := s.GetAccountBalance(validator.EcdsaAddress)
validatorBalance, err := s.Service.GetAccountBalance(validator.EcdsaAddress)
if err != nil {
continue
}
@ -673,7 +698,7 @@ func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
}
}
db := s.storage.GetDB()
db := s.Storage.GetDB()
bytes, err := db.Get([]byte(key))
if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read address from db")
@ -707,15 +732,15 @@ func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
}
data.Address.TXs = sentTXs
}
paginatedTXs := make([]*Transaction, 0)
for i := 0; i < offset && i+offset*page < len(data.Address.TXs); i++ {
paginatedTXs = append(paginatedTXs, data.Address.TXs[i+offset*page])
if offset*page+offset > len(data.Address.TXs) {
data.Address.TXs = data.Address.TXs[offset*page:]
} else {
data.Address.TXs = data.Address.TXs[offset*page : offset*page+offset]
}
data.Address.TXs = paginatedTXs
}
// GetExplorerAddressRPC serves /address end-point.
func (s *Service) GetExplorerAddressRPC(id, txView string, page, offset int) (*Address, error) {
// GetExplorerAddress rpc end-point.
func (s *ServiceAPI) GetExplorerAddress(ctx context.Context, id, txView string, page, offset int) (*Address, error) {
if offset == 0 {
offset = paginationOffset
}
@ -732,9 +757,9 @@ func (s *Service) GetExplorerAddressRPC(id, txView string, page, offset int) (*A
// Try to populate the banace by directly calling get balance.
// Check the balance from blockchain rather than local DB dump
balanceAddr := big.NewInt(0)
if s.GetAccountBalance != nil {
if s.Service.GetAccountBalance != nil {
addr := common2.ParseAddr(id)
balance, err := s.GetAccountBalance(addr)
balance, err := s.Service.GetAccountBalance(addr)
if err == nil {
balanceAddr = balance
address.Balance = balance
@ -742,11 +767,11 @@ func (s *Service) GetExplorerAddressRPC(id, txView string, page, offset int) (*A
}
key := GetAddressKey(id)
db := s.storage.GetDB()
db := s.Service.Storage.GetDB()
bytes, err := db.Get([]byte(key))
if err != nil {
utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read address from db")
return nil, err
return address, nil
}
if err = rlp.DecodeBytes(bytes, &address); err != nil {
utils.Logger().Warn().Str("id", id).Msg("cannot convert data from DB")
@ -775,11 +800,11 @@ func (s *Service) GetExplorerAddressRPC(id, txView string, page, offset int) (*A
}
address.TXs = sentTXs
}
paginatedTXs := make([]*Transaction, 0)
for i := 0; i < offset && i+offset*page < len(address.TXs); i++ {
paginatedTXs = append(paginatedTXs, address.TXs[i+offset*page])
if offset*page+offset > len(address.TXs) {
address.TXs = address.TXs[offset*page:]
} else {
address.TXs = address.TXs[offset*page : offset*page+offset]
}
address.TXs = paginatedTXs
return address, nil
}
@ -792,12 +817,12 @@ func (s *Service) GetExplorerNodeCount(w http.ResponseWriter, r *http.Request) {
}
}
// GetExplorerNodeCountRPC serves /nodes end-point.
func (s *Service) GetExplorerNodeCountRPC() int {
return len(s.GetNodeIDs())
// GetExplorerNodeCount rpc end-point.
func (s *ServiceAPI) GetExplorerNodeCount(ctx context.Context) int {
return len(s.Service.GetNodeIDs())
}
// GetExplorerShard serves /shard end-point
// GetExplorerShard serves /shard end-point.
func (s *Service) GetExplorerShard(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var nodes []Node
@ -812,10 +837,10 @@ func (s *Service) GetExplorerShard(w http.ResponseWriter, r *http.Request) {
}
}
// GetExplorerShardRPC serves /shard end-point
func (s *Service) GetExplorerShardRPC() *Shard {
// GetExplorerShard rpc end-point.
func (s *ServiceAPI) GetExplorerShard(ctx context.Context) *Shard {
var nodes []Node
for _, nodeID := range s.GetNodeIDs() {
for _, nodeID := range s.Service.GetNodeIDs() {
nodes = append(nodes, Node{
ID: libp2p_peer.IDB58Encode(nodeID),
})
@ -823,7 +848,7 @@ func (s *Service) GetExplorerShardRPC() *Shard {
return &Shard{Nodes: nodes}
}
// NotifyService notify service
// NotifyService notify service.
func (s *Service) NotifyService(params map[string]interface{}) {
return
}
@ -835,5 +860,12 @@ func (s *Service) SetMessageChan(messageChan chan *msg_pb.Message) {
// APIs for the services.
func (s *Service) APIs() []rpc.API {
return nil
return []rpc.API{
{
Namespace: "explorer",
Version: "1.0",
Service: NewServiceAPI(s),
Public: true,
},
}
}

@ -11,6 +11,7 @@ import (
"github.com/ethereum/go-ethereum/rpc"
msg_pb "github.com/harmony-one/harmony/api/proto/message"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
badger "github.com/ipfs/go-ds-badger"
@ -60,7 +61,7 @@ const (
func New(h p2p.Host, rendezvous p2p.GroupID, peerChan chan p2p.Peer, bootnodes utils.AddrList) *Service {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), connectionTimeout)
dataStore, err := badger.NewDatastore(fmt.Sprintf(".dht-%s-%s", h.GetSelfPeer().IP, h.GetSelfPeer().Port), nil)
dataStore, err := badger.NewDatastore(fmt.Sprintf("%s/.dht-%s-%s", nodeconfig.GetTempDir(), h.GetSelfPeer().IP, h.GetSelfPeer().Port), nil)
if err != nil {
panic(err)
}

@ -32,6 +32,7 @@ const (
inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus
BatchSize uint32 = 1000 //maximum size for one query of block hashes
SyncLoopFrequency = 1 // unit in second
LastMileBlocksSize = 10
)
// SyncPeerConfig is peer config to sync.
@ -105,6 +106,7 @@ type StateSync struct {
syncConfig *SyncConfig
stateSyncTaskQueue *queue.Queue
syncMux sync.Mutex
lastMileMux sync.Mutex
}
func (ss *StateSync) purgeAllBlocksFromCache() {
@ -130,9 +132,13 @@ func (ss *StateSync) purgeOldBlocksFromCache() {
}
// AddLastMileBlock add the lastest a few block into queue for syncing
// only keep the latest blocks with size capped by LastMileBlocksSize
func (ss *StateSync) AddLastMileBlock(block *types.Block) {
ss.syncMux.Lock()
defer ss.syncMux.Unlock()
ss.lastMileMux.Lock()
defer ss.lastMileMux.Unlock()
if len(ss.lastMileBlocks) >= LastMileBlocksSize {
ss.lastMileBlocks = ss.lastMileBlocks[1:]
}
ss.lastMileBlocks = append(ss.lastMileBlocks, block)
}

@ -42,7 +42,7 @@ var (
commit string
)
// InitLDBDatabase initializes a LDBDatabase. isGenesis=true will return the beacon chain database for normal shard nodes
// InitLDBDatabase initializes a LDBDatabase. will return the beacon chain database for normal shard nodes
func InitLDBDatabase(ip string, port string, freshDB bool, isBeacon bool) (*ethdb.LDBDatabase, error) {
var dbFileName string
if isBeacon {
@ -80,14 +80,12 @@ var (
minPeers = flag.Int("min_peers", 32, "Minimal number of Peers in shard")
// Key file to store the private key
keyFile = flag.String("key", "./.hmykey", "the p2p key file of the harmony node")
// isGenesis indicates this node is a genesis node
isGenesis = flag.Bool("is_genesis", true, "true means this node is a genesis node")
// isArchival indicates this node is an archival node that will save and archive current blockchain
isArchival = flag.Bool("is_archival", true, "false makes node faster by turning caching off")
// delayCommit is the commit-delay timer, used by Harmony nodes
delayCommit = flag.String("delay_commit", "0ms", "how long to delay sending commit messages in consensus, ex: 500ms, 1s")
// isExplorer indicates this node is a node to serve explorer
isExplorer = flag.Bool("is_explorer", false, "true means this node is a node to serve explorer")
// nodeType indicates the type of the node: validator, explorer
nodeType = flag.String("node_type", "validator", "node type: validator, explorer")
// networkType indicates the type of the network
networkType = flag.String("network_type", "mainnet", "type of the network: mainnet, testnet, devnet, localnet")
// blockPeriod indicates the how long the leader waits to propose a new block.
@ -170,9 +168,11 @@ func initSetup() {
func passphraseForBls() {
// If FN node running, they should either specify blsPrivateKey or the file with passphrase
if *isExplorer {
// However, explorer or non-validator nodes need no blskey
if *nodeType != "validator" {
return
}
if *blsKeyFile == "" || *blsPass == "" {
fmt.Println("Internal nodes need to have pass to decrypt blskey")
os.Exit(101)
@ -233,7 +233,7 @@ func createGlobalConfig() *nodeconfig.ConfigType {
var err error
nodeConfig := nodeconfig.GetShardConfig(initialAccount.ShardID)
if !*isExplorer {
if *nodeType == "validator" {
// Set up consensus keys.
setupConsensusKey(nodeConfig)
} else {
@ -336,17 +336,17 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
currentNode.NodeConfig.SetBeaconGroupID(p2p.NewGroupIDByShardID(0))
if *isExplorer {
switch *nodeType {
case "explorer":
currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode)
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(*shardID)))
currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(*shardID)))
} else {
case "validator":
currentNode.NodeConfig.SetRole(nodeconfig.Validator)
if nodeConfig.ShardID == 0 {
currentNode.NodeConfig.SetRole(nodeconfig.Validator)
currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDBeacon)
currentNode.NodeConfig.SetClientGroupID(p2p.GroupIDBeaconClient)
} else {
currentNode.NodeConfig.SetRole(nodeconfig.Validator)
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
}
@ -397,6 +397,15 @@ func main() {
flag.Var(&utils.BootNodes, "bootnodes", "a list of bootnode multiaddress (delimited by ,)")
flag.Parse()
switch *nodeType {
case "validator":
case "explorer":
break
default:
fmt.Fprintf(os.Stderr, "Unknown node type: %s\n", *nodeType)
os.Exit(1)
}
nodeconfig.SetVersion(fmt.Sprintf("Harmony (C) 2019. %v, version %v-%v (%v %v)", path.Base(os.Args[0]), version, commit, builtBy, builtAt))
if *versionFlag {
printVersion()
@ -433,7 +442,7 @@ func main() {
memprofiling.MaybeCallGCPeriodically()
}
if !*isExplorer {
if *nodeType == "validator" {
setupInitialAccount()
}
@ -454,7 +463,7 @@ func main() {
}
startMsg := "==== New Harmony Node ===="
if *isExplorer {
if *nodeType == "explorer" {
startMsg = "==== New Explorer Node ===="
}

@ -246,6 +246,7 @@ func (b *APIBackend) ResendCx(ctx context.Context, txID common.Hash) (uint64, bo
return 0, false
}
tx := txs[int(index)]
// check whether it is a valid cross shard tx
if tx.ShardID() == tx.ToShardID() || blk.Header().ShardID() != tx.ShardID() {
return 0, false
}

@ -7,6 +7,7 @@ import (
"crypto/ecdsa"
"errors"
"fmt"
"os"
"sync"
"github.com/harmony-one/bls/ffi/go/bls"
@ -246,3 +247,8 @@ func SetVersion(ver string) {
func GetVersion() string {
return version
}
// GetTempDir return temporary directory
func GetTempDir() string {
return os.TempDir()
}

@ -569,3 +569,8 @@ func (node *Node) initNodeConfiguration() (service.NodeConfig, chan p2p.Peer) {
func (node *Node) AccountManager() *accounts.Manager {
return node.accountManager
}
// ServiceManager ...
func (node *Node) ServiceManager() *service.Manager {
return node.serviceManager
}

@ -91,7 +91,7 @@ func (node *Node) BroadcastMissingCXReceipts() {
}
sig := nextHeader.LastCommitSignature()
bitmap := nextHeader.LastCommitBitmap()
go node.BroadcastCXReceiptsWithShardID(blk, sig[:], bitmap, toShardID)
node.BroadcastCXReceiptsWithShardID(blk, sig[:], bitmap, toShardID)
}
node.CxPool.Clear()
// this should not happen or maybe happen for impatient user

@ -165,9 +165,7 @@ func (node *Node) messageHandler(content []byte, sender libp2p_peer.ID) {
Msg("block sync")
} else {
// for non-beaconchain node, subscribe to beacon block broadcast
role := node.NodeConfig.Role()
if role == nodeconfig.Validator {
if node.Blockchain().ShardID() != 0 {
for _, block := range blocks {
if block.ShardID() == 0 {
utils.Logger().Info().
@ -392,6 +390,7 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block, commitSigAndBit
}
}
// Broadcast client requested missing cross shard receipts if there is any
node.BroadcastMissingCXReceipts()
// TODO chao: uncomment this after beacon syncing is stable

@ -23,10 +23,11 @@ import (
// Constants related to doing syncing.
const (
lastMileThreshold = 4
inSyncThreshold = 1 // unit in number of block
SyncFrequency = 10 // unit in second
MinConnectedPeers = 10 // minimum number of peers connected to in node syncing
lastMileThreshold = 4
inSyncThreshold = 1 // unit in number of block
SyncFrequency = 10 // unit in second
BeaconSyncFrequency = 5 // unit in second
MinConnectedPeers = 10 // minimum number of peers connected to in node syncing
)
// getNeighborPeers is a helper function to return list of peers
@ -160,84 +161,87 @@ func (p *LocalSyncingPeerProvider) SyncingPeers(shardID uint32) (peers []p2p.Pee
// DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks
func (node *Node) DoBeaconSyncing() {
go func(node *Node) {
for {
select {
case beaconBlock := <-node.BeaconBlockChannel:
node.beaconSync.AddLastMileBlock(beaconBlock)
}
}
}(node)
for {
select {
case beaconBlock := <-node.BeaconBlockChannel:
if node.beaconSync == nil {
utils.Logger().Info().Msg("initializing beacon sync")
node.beaconSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
if node.beaconSync == nil {
utils.Logger().Info().Msg("initializing beacon sync")
node.beaconSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
}
if node.beaconSync.GetActivePeerNumber() == 0 {
utils.Logger().Info().Msg("no peers; bootstrapping beacon sync config")
// 0 means shardID=0 here
peers, err := node.SyncingPeerProvider.SyncingPeers(0)
if err != nil {
utils.Logger().Warn().
Err(err).
Msg("cannot retrieve beacon syncing peers")
continue
}
if node.beaconSync.GetActivePeerNumber() == 0 {
utils.Logger().Info().Msg("no peers; bootstrapping beacon sync config")
peers, err := node.SyncingPeerProvider.SyncingPeers(0)
if err != nil {
utils.Logger().Warn().
Err(err).
Msg("cannot retrieve beacon syncing peers")
continue
}
if err := node.beaconSync.CreateSyncConfig(peers, true); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot create beacon sync config")
continue
}
if err := node.beaconSync.CreateSyncConfig(peers, true); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot create beacon sync config")
continue
}
node.beaconSync.AddLastMileBlock(beaconBlock)
node.beaconSync.SyncLoop(node.Beaconchain(), node.BeaconWorker, false, true)
}
node.beaconSync.SyncLoop(node.Beaconchain(), node.BeaconWorker, false, true)
time.Sleep(BeaconSyncFrequency * time.Second)
}
}
// DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up
func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool) {
ticker := time.NewTicker(SyncFrequency * time.Second)
SyncingLoop:
for {
select {
case <-ticker.C:
if node.stateSync == nil {
node.stateSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
utils.Logger().Debug().Msg("[SYNC] initialized state sync")
if node.stateSync == nil {
node.stateSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
utils.Logger().Debug().Msg("[SYNC] initialized state sync")
}
if node.stateSync.GetActivePeerNumber() < MinConnectedPeers {
shardID := bc.ShardID()
peers, err := node.SyncingPeerProvider.SyncingPeers(shardID)
if err != nil {
utils.Logger().Warn().
Err(err).
Uint32("shard_id", shardID).
Msg("cannot retrieve syncing peers")
continue SyncingLoop
}
if err := node.stateSync.CreateSyncConfig(peers, false); err != nil {
utils.Logger().Warn().
Err(err).
Interface("peers", peers).
Msg("[SYNC] create peers error")
continue SyncingLoop
}
if node.stateSync.GetActivePeerNumber() < MinConnectedPeers {
shardID := bc.ShardID()
peers, err := node.SyncingPeerProvider.SyncingPeers(shardID)
if err != nil {
utils.Logger().Warn().
Err(err).
Uint32("shard_id", shardID).
Msg("cannot retrieve syncing peers")
continue SyncingLoop
}
if err := node.stateSync.CreateSyncConfig(peers, false); err != nil {
utils.Logger().Warn().
Err(err).
Interface("peers", peers).
Msg("[SYNC] create peers error")
continue SyncingLoop
}
utils.Logger().Debug().Int("len", node.stateSync.GetActivePeerNumber()).Msg("[SYNC] Get Active Peers")
utils.Logger().Debug().Int("len", node.stateSync.GetActivePeerNumber()).Msg("[SYNC] Get Active Peers")
}
if node.stateSync.IsOutOfSync(bc) {
node.stateMutex.Lock()
node.State = NodeNotInSync
node.stateMutex.Unlock()
if willJoinConsensus {
node.Consensus.BlocksNotSynchronized()
}
if node.stateSync.IsOutOfSync(bc) {
node.stateSync.SyncLoop(bc, worker, willJoinConsensus, false)
if willJoinConsensus {
node.stateMutex.Lock()
node.State = NodeNotInSync
node.State = NodeReadyForConsensus
node.stateMutex.Unlock()
if willJoinConsensus {
node.Consensus.BlocksNotSynchronized()
}
node.stateSync.SyncLoop(bc, worker, willJoinConsensus, false)
if willJoinConsensus {
node.stateMutex.Lock()
node.State = NodeReadyForConsensus
node.stateMutex.Unlock()
node.Consensus.BlocksSynchronized()
}
node.Consensus.BlocksSynchronized()
}
node.stateMutex.Lock()
node.State = NodeReadyForConsensus
node.stateMutex.Unlock()
}
node.stateMutex.Lock()
node.State = NodeReadyForConsensus
node.stateMutex.Unlock()
time.Sleep(SyncFrequency * time.Second)
}
}
@ -251,15 +255,20 @@ func (node *Node) SupportSyncing() {
node.InitSyncingServer()
node.StartSyncingServer()
joinConsensus := false
// Check if the current node is explorer node.
isExplorerNode := node.NodeConfig.Role() == nodeconfig.ExplorerNode
switch node.NodeConfig.Role() {
case nodeconfig.Validator:
joinConsensus = true
}
// Send new block to unsync node if the current node is not explorer node.
if !isExplorerNode {
// TODO: leo this pushing logic has to be removed
if joinConsensus {
go node.SendNewBlockToUnsync()
}
go node.DoSyncing(node.Blockchain(), node.Worker, !isExplorerNode)
go node.DoSyncing(node.Blockchain(), node.Worker, joinConsensus)
}
// InitSyncingServer starts downloader server.
@ -402,7 +411,7 @@ func (node *Node) CalculateResponse(request *downloader_pb.DownloaderRequest, in
return response, nil
} else if len(node.peerRegistrationRecord) >= maxBroadcastNodes {
response.Type = downloader_pb.DownloaderResponse_FAIL
utils.GetLogInstance().Warn("[SYNC] maximum registration limit exceeds", "ip", ip, "port", port)
utils.GetLogInstance().Debug("[SYNC] maximum registration limit exceeds", "ip", ip, "port", port)
return response, nil
} else {
response.Type = downloader_pb.DownloaderResponse_FAIL

@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
"github.com/harmony-one/harmony/api/service"
"github.com/harmony-one/harmony/hmy"
"github.com/harmony-one/harmony/internal/hmyapi"
"github.com/harmony-one/harmony/internal/hmyapi/filters"
@ -33,7 +32,7 @@ var (
httpEndpoint = ""
wsEndpoint = ""
httpModules = []string{"hmy", "net"}
httpModules = []string{"hmy", "net", "explorer"}
httpVirtualHosts = []string{"*"}
httpTimeouts = rpc.DefaultHTTPTimeouts
httpOrigins = []string{"*"}
@ -162,11 +161,5 @@ func (node *Node) APIs() []rpc.API {
Service: hmyapi.NewPublicNetAPI(node.host, harmony.APIBackend.NetVersion()),
Public: true,
},
{
Namespace: "explorer",
Version: "1.0",
Service: node.ServiceManager().GetServices()[service.SupportExplorer],
Public: true,
},
}...)
}

@ -31,6 +31,6 @@ if [ -z "$NODE_ACCOUNT_ID" ]; then
exit 2
fi
harmony -log_folder log -bootnodes $BN_MA -ip $PUB_IP -port $NODE_PORT -is_genesis -account_index $NODE_ACCOUNT_ID
harmony -log_folder log -bootnodes $BN_MA -ip $PUB_IP -port $NODE_PORT -account_index $NODE_ACCOUNT_ID
# vim: ai ts=2 sw=2 et sts=2 ft=sh

@ -12,13 +12,14 @@ SRC[wallet_stress_test]="cmd/client/wallet_stress_test/main.go cmd/client/wallet
BINDIR=bin
BUCKET=unique-bucket-bin
PUBBUCKET=pub.harmony.one
REL=s3
REL=
GOOS=linux
GOARCH=amd64
FOLDER=/${WHOAMI:-$USER}
RACE=
VERBOSE=
DEBUG=false
NETWORK=main
unset -v progdir
case "${0}" in
@ -35,6 +36,9 @@ if [ "$(uname -s)" == "Darwin" ]; then
GOOS=darwin
LIB[libbls384_256.dylib]=${BLS_DIR}/lib/libbls384_256.dylib
LIB[libmcl.dylib]=${MCL_DIR}/lib/libmcl.dylib
LIB[libgmp.10.dylib]=/usr/local/opt/gmp/lib/libgmp.10.dylib
LIB[libgmpxx.4.dylib]=/usr/local/opt/gmp/lib/libgmpxx.4.dylib
LIB[libcrypto.1.0.0.dylib]=/usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib
else
MD5=md5sum
LIB[libbls384_256.so]=${BLS_DIR}/lib/libbls384_256.so
@ -62,6 +66,7 @@ ACTION:
build build binaries only (default action)
upload upload binaries to s3
pubwallet upload wallet to public bucket (bucket: $PUBBUCKET)
release upload binaries to release bucket
harmony|txgen|bootnode|wallet
only build the specified binary
@ -133,6 +138,45 @@ function upload
[ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${BUCKET}$FOLDER/md5sum.txt --acl public-read
}
function release
{
AWSCLI=aws
if [ -n "$PROFILE" ]; then
AWSCLI+=" --profile $PROFILE"
fi
OS=$(uname -s)
case "$OS" in
"Linux")
FOLDER=release/linux-x86_64/$REL ;;
"Darwin")
FOLDER=release/darwin-x86_64/$REL ;;
*)
echo "Unsupported OS: $OS"
return ;;
esac
for bin in "${!SRC[@]}"; do
if [ -e $BINDIR/$bin ]; then
$AWSCLI s3 cp $BINDIR/$bin s3://${PUBBUCKET}/$FOLDER/$bin --acl public-read
else
echo "!! MISSGING $bin !!"
fi
done
for lib in "${!LIB[@]}"; do
if [ -e ${LIB[$lib]} ]; then
$AWSCLI s3 cp ${LIB[$lib]} s3://${PUBBUCKET}/$FOLDER/$lib --acl public-read
else
echo "!! MISSING ${LIB[$lib]} !!"
fi
done
[ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${PUBBUCKET}/$FOLDER/md5sum.txt --acl public-read
}
function upload_wallet
{
AWSCLI=aws
@ -168,7 +212,7 @@ function upload_wallet
}
################################ MAIN FUNCTION ##############################
while getopts "hp:a:o:b:f:rv" option; do
while getopts "hp:a:o:b:f:rvN:" option; do
case $option in
h) usage ;;
p) PROFILE=$OPTARG ;;
@ -179,6 +223,7 @@ while getopts "hp:a:o:b:f:rv" option; do
r) RACE=-race ;;
v) VERBOSE='-v -x' ;;
d) DEBUG=true ;;
N) NETWORK=$OPTARG ;;
esac
done
@ -188,9 +233,26 @@ shift $(($OPTIND-1))
ACTION=${1:-build}
case "${NETWORK}" in
main)
REL=mainnet
;;
beta)
REL=testnet
;;
pangaea)
REL=pangaea
;;
*)
echo "${NETWORK}: invalid network"
exit
;;
esac
case "$ACTION" in
"build") build_only ;;
"upload") upload ;;
"release") release ;;
"pubwallet") upload_wallet ;;
"harmony"|"wallet"|"txgen"|"bootnode") build_only $ACTION ;;
*) usage ;;

@ -101,10 +101,13 @@ usage: ${progname} [-1ch] [-k KEYFILE]
-s run setup env only (must run as root)
-S run the ${progname} as non-root user (default: run as root)
-p passfile use the given BLS passphrase file
-d just download the Harmony binaries (default: off)
-D do not download Harmony binaries (default: download when start)
-m collect and upload node metrics to harmony prometheus + grafana
-N network join the given network (main, beta, pangaea; default: main)
-t equivalent to -N pangaea (deprecated)
-T nodetype specify the node type (validator, explorer; default: validator)
-i shardid specify the shard id (valid only with explorer node; default: 1)
example:
@ -119,18 +122,25 @@ usage() {
exit 64 # EX_USAGE
}
# =======
BUCKET=pub.harmony.one
OS=$(uname -s)
unset start_clean loop run_as_root blspass do_not_download metrics network
start_clean=false
loop=true
run_as_root=true
do_not_download=false
download_only=false
metrics=false
network=main
node_type=validator
shard_id=1
${BLSKEYFILE=}
unset OPTIND OPTARG opt
OPTIND=1
while getopts :1chk:sSp:DmN:t opt
while getopts :1chk:sSp:dDmN:tT:i: opt
do
case "${opt}" in
'?') usage "unrecognized option -${OPTARG}";;
@ -142,10 +152,13 @@ do
s) setup_env; exit 0;;
S) run_as_root=false ;;
p) blspass="${OPTARG}";;
d) download_only=true;;
D) do_not_download=true;;
m) metrics=true;;
N) network="${OPTARG}";;
t) network=pangaea;;
T) node_type="${OPTARG}";;
i) shard_id="${OPTARG}";;
*) err 70 "unhandled option -${OPTARG}";; # EX_SOFTWARE
esac
done
@ -153,6 +166,12 @@ shift $((${OPTIND} - 1))
unset -v bootnodes REL network_type dns_zone
case "${node_type}" in
validator|explorer) ;;
*)
usage ;;
esac
case "${network}" in
main)
bootnodes=(
@ -195,43 +214,6 @@ case $# in
;;
esac
if ${run_as_root}; then
check_root
fi
case "${BLSKEYFILE}" in
"")
unset -v f
for f in \
~/*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
~/????????????????????????????????????????????????????????????????????????????????????????????????.key \
*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
????????????????????????????????????????????????????????????????????????????????????????????????.key
do
[ -f "${f}" ] || continue
case "${BLSKEYFILE}" in
"")
BLSKEYFILE="${f}"
;;
*)
[ "${f}" -ef "${BLSKEYFILE}" ] || \
err 69 "multiple key files found (${f}, ${BLSKEYFILE}); please use -k to specify"
;;
esac
done
case "${BLSKEYFILE}" in
"") err 69 "could not autodetect BLS key file; please use -k to specify";;
esac
msg "autodetected BLS key file: ${BLSKEYFILE}"
;;
*)
msg "using manually specified BLS key file: ${BLSKEYFILE}"
;;
esac
BUCKET=pub.harmony.one
OS=$(uname -s)
if [ "$OS" == "Darwin" ]; then
FOLDER=release/darwin-x86_64/$REL/
BIN=( harmony libbls384_256.dylib libcrypto.1.0.0.dylib libgmp.10.dylib libgmpxx.4.dylib libmcl.dylib md5sum.txt )
@ -241,21 +223,6 @@ if [ "$OS" == "Linux" ]; then
BIN=( harmony libbls384_256.so libcrypto.so.10 libgmp.so.10 libgmpxx.so.4 libmcl.so md5sum.txt )
fi
any_new_binaries() {
local outdir
${do_not_download} && return 0
outdir="${1:-.}"
mkdir -p "${outdir}"
curl -sSf http://${BUCKET}.s3.amazonaws.com/${FOLDER}md5sum.txt -o "${outdir}/md5sum.txt.new" || return $?
if diff $outdir/md5sum.txt.new md5sum.txt
then
rm "${outdir}/md5sum.txt.new"
else
mv "${outdir}/md5sum.txt.new" "${outdir}/md5sum.txt"
return 1
fi
}
extract_checksum() {
awk -v basename="${1}" '
{
@ -291,7 +258,6 @@ verify_checksum() {
return 0
}
download_binaries() {
local outdir
${do_not_download} && return 0
@ -306,6 +272,60 @@ download_binaries() {
(cd "${outdir}" && exec openssl sha256 "${BIN[@]}") > "${outdir}/harmony-checksums.txt"
}
if ${download_only}; then
download_binaries || err 69 "download node software failed"
exit 0
fi
if ${run_as_root}; then
check_root
fi
case "${BLSKEYFILE}" in
"")
unset -v f
for f in \
~/*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
~/????????????????????????????????????????????????????????????????????????????????????????????????.key \
*--????-??-??T??-??-??.*Z--bls_???????????????????????????????????????????????????????????????????????????????????????????????? \
????????????????????????????????????????????????????????????????????????????????????????????????.key
do
[ -f "${f}" ] || continue
case "${BLSKEYFILE}" in
"")
BLSKEYFILE="${f}"
;;
*)
[ "${f}" -ef "${BLSKEYFILE}" ] || \
err 69 "multiple key files found (${f}, ${BLSKEYFILE}); please use -k to specify"
;;
esac
done
case "${BLSKEYFILE}" in
"") err 69 "could not autodetect BLS key file; please use -k to specify";;
esac
msg "autodetected BLS key file: ${BLSKEYFILE}"
;;
*)
msg "using manually specified BLS key file: ${BLSKEYFILE}"
;;
esac
any_new_binaries() {
local outdir
${do_not_download} && return 0
outdir="${1:-.}"
mkdir -p "${outdir}"
curl -sSf http://${BUCKET}.s3.amazonaws.com/${FOLDER}md5sum.txt -o "${outdir}/md5sum.txt.new" || return $?
if diff $outdir/md5sum.txt.new md5sum.txt
then
rm "${outdir}/md5sum.txt.new"
else
mv "${outdir}/md5sum.txt.new" "${outdir}/md5sum.txt"
return 1
fi
}
if any_new_binaries
then
msg "binaries did not change"
@ -471,11 +491,19 @@ do
-bootnodes "${BN_MA}"
-ip "${PUB_IP}"
-port "${NODE_PORT}"
-is_genesis
-blskey_file "${BLSKEYFILE}"
-network_type="${network_type}"
-dns_zone="${dns_zone}"
)
# backward compatible with older harmony node software
case "${node_type}" in
explorer)
args+=(
-node_type="${node_type}"
-shard_id="${shard_id}"
)
;;
esac
case "${metrics}" in
true)
args+=(

@ -20,7 +20,7 @@
127.0.0.1 9103 validator one1p7ht2d4kl8ve7a8jxw746yfnx4wnfxtp8jqxwe ca86e551ee42adaaa6477322d7db869d3e203c00d7b86c82ebee629ad79cb6d57b8f3db28336778ec2180e56a8e07296
127.0.0.1 9104 validator one1z05g55zamqzfw9qs432n33gycdmyvs38xjemyl 95117937cd8c09acd2dfae847d74041a67834ea88662a7cbed1e170350bc329e53db151e5a0ef3e712e35287ae954818
127.0.0.1 9105 validator one1ljznytjyn269azvszjlcqvpcj6hjm822yrcp2e 68ae289d73332872ec8d04ac256ca0f5453c88ad392730c5741b6055bc3ec3d086ab03637713a29f459177aaa8340615
127.0.0.1 9107 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg 1c1fb28d2de96e82c3d9b4917eb54412517e2763112a3164862a6ed627ac62e87ce274bb4ea36e6a61fb66a15c263a06
127.0.0.1 9108 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 b179c4fdc0bee7bd0b6698b792837dd13404d3f985b59d4a9b1cd0641a76651e271518b61abbb6fbebd4acf963358604
127.0.0.1 9107 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100
127.0.0.1 9108 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c
127.0.0.1 9109 validator one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep 576d3c48294e00d6be4a22b07b66a870ddee03052fe48a5abbd180222e5d5a1f8946a78d55b025de21635fd743bbad90
127.0.0.1 9110 validator one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc 16513c487a6bb76f37219f3c2927a4f281f9dd3fd6ed2e3a64e500de6545cf391dd973cc228d24f9bd01efe94912e714

@ -149,25 +149,21 @@ sleep 2
i=0
while IFS='' read -r line || [[ -n "$line" ]]; do
IFS=' ' read ip port mode account blspub <<< $line
if [ "${mode}" == "explorer" ]
then
args=("${base_args[@]}" -ip "${ip}" -port "${port}" -key "/tmp/${ip}-${port}.key" -db_dir "db-${ip}-${port}")
else
if [ ! -e .hmy/${blspub}.key ]; then
echo "missing blskey .hmy/${blspub}.key"
echo "skipping this node"
continue
fi
args=("${base_args[@]}" -ip "${ip}" -port "${port}" -key "/tmp/${ip}-${port}.key" -db_dir "db-${ip}-${port}" -blskey_file ".hmy/${blspub}.key")
args=("${base_args[@]}" -ip "${ip}" -port "${port}" -key "/tmp/${ip}-${port}.key" -db_dir "db-${ip}-${port}")
if [[ -z "$ip" || -z "$port" ]]; then
echo "skip empty node"
continue
fi
if [ ! -e .hmy/${blspub}.key ]; then
args=("${args[@]}" -blskey_file "BLSKEY")
else
args=("${args[@]}" -blskey_file ".hmy/${blspub}.key")
fi
case "${mode}" in
leader*|validator*) args=("${args[@]}" -is_genesis);;
esac
case "${mode}" in leader*) args=("${args[@]}" -is_leader);; esac
case "${mode}" in *archival|archival) args=("${args[@]}" -is_archival);; esac
case "${mode}" in explorer*) args=("${args[@]}" -is_genesis=false -is_explorer=true -shard_id=0);; esac
case "${mode}" in explorer*) args=("${args[@]}" -node_type=explorer -shard_id=0);; esac
case "${mode}" in
client) ;;
*) $DRYRUN "${ROOT}/bin/harmony" "${args[@]}" "${extra_args[@]}" 2>&1 | tee -a "${LOG_FILE}" &;;

Loading…
Cancel
Save