Merge branch 'master' of github.com:harmony-one/harmony into rj_branch

pull/693/head
Rongjian Lan 6 years ago
commit 75d0463426
  1. 3
      .bnkey
  2. 3
      .gitignore
  3. 39
      .hmy/wallet.ini
  4. 1
      api/service/syncing/errors.go
  5. 258
      api/service/syncing/syncing.go
  6. 83
      cmd/client/wallet/main.go
  7. 1
      go.mod
  8. 124
      internal/ctxerror/ctxerror.go
  9. 363
      internal/ctxerror/ctxerror_test.go
  10. 125
      internal/ctxerror/mock/ctxerror.go
  11. 66
      internal/utils/configfile.go
  12. 112
      internal/utils/configfile_test.go
  13. 37
      internal/utils/test.ini
  14. 8
      node/node_syncing.go
  15. 2
      scripts/list_harmony_go_files.sh

@ -0,0 +1,3 @@
{
"key": "CAASpgkwggSiAgEAAoIBAQDhLRM7P8O8U9x+iXoxU+XOv5ihLVe+83he8jVoXp64QHZJQViYj//Wer2qDPeW7hHSC9FU7jsTOBcx3s46Z/4fS+rcXUtGQptscJywjh8wkadSoM8WwnLYIcRIpzI0nbt7wFRUOtwsLhR6/wb2l0qXiygmQn0l+byRQs7Jl1ZwleVetDik/QFGGz8NHFCl2PWNSkkJBOsy2zDGhiInK83qVADlyCP7roLgUe55W625OJ34f9iD7H+/A8UXi7n7BpTmLLDLVS1yeap0Jrftl7fA6s3hDVcNrBBDxsZuntEuKe+5fq6jb4Lj/iHzDoW/mkVWsURR2dIghBCe79cTUjhBAgMBAAECggEAOmOX0wtL1C/iwyUe/F+G/b+M8m5xjGBR3DzuQGwYuCpvAbviH11nt9QIDmpiYU7GbDMISv+jbe5jhVkDHP4OVUs4UYAXntZyuwHQf6+6wrtiKfjs+jfd09GdtkE0sZQdI4/Rzi2dHg+s29/5jEPa4cUB2jnvfcNudDRgrsGBdpvLZCbDm99PAn0z4gV8TZ+JxgMYG3obHjxWbHoyVHXkisM+WDsXaqOsE4xx40P3CevKsbrYYC7la5N/gt4RhkkMF8QGFpiMYHsO8PYxTYpNdZNUgfxLMDqvlUzBhzYE90y+a4bL2rflA+3JI9bfHKFqW0T14QXr8P/s9SqYPd1rgQKBgQDwj+ry71L++Koppct0qnu/IW+sZpw9Oydcaw128fWnEuX/VJR1DR/pBMisCHJwGGBqGQqZntw4Ajg7ZoYx7nlpUv9m5l8GTwS2IrmDkY1gri4qRw2ipzDCah+6mNQeys4B7LbQqbSATdeOQDbbbjJHTreAjts2V+bMx/Tep0G76QKBgQDvoGLmZ39dO5KM7yTG2GM3mmOadOHBk3GY/gjCm43PVtC4RePCIKyRX4UzYZuvmvT++Y5XSH91pVdoNqWY2vOhfBjNufV5tGHkUdTPy5yB6t2B/RvbYOEzZdWU+sQYl+/0EAI8IjoI8j2jnKr4giVVbEhxRU1o2lyj9nDmwpFamQKBgEC+GveC3Y1tky5eRqGBeIh6EToO66G3F+LRlPAcMobimS/crY/LFnl6Jh/WriXpCZnEX4v0q0QPpN6nuxoZGmf0RHSHL6/c+GGketUkCS6p8hbCxLKv2HmaYiuwEfavkj4GXTVPVxro2Eiak6j+wV1bnBtnVywLADzA2/BIh3JxAoGAH95oyNPC8JdXqj6z2W4149M4o/YfgCsn1H0UlS0y8vxMzfUdkVffG4ZkpKy6k+Q76R0vfRQ7P145/bYm1+lmtXdXpSSyLPl8e20WrHwb2Htv8jXDWq3LxZYpjaK8KbkrRH5MjcrPhRkScYwIgPxuEqpQCCB7ZxKDd5ry8P23byECgYBTXNVyeGqn5gBR1zdHzyg8qrUr39oerWqc+ejeDJgXp4HSYmXz7g1KuuE2OgVcOo/gpL13Acjc7neHGHnI20Aa3v5vh3gPohY5lgGy3AIoe0doedhkIUtqWjYkq+bmrrYUf2NG6iNOmjbqjUNBuxuIxlUomvgH+5PqBwGh0yKRMQ=="
}

3
.gitignore vendored

@ -47,9 +47,6 @@ bc_config.json
# leveldb local storage
db/
# bootnode keystore
.bnkey
# harmony node keystore
.hmykey

@ -0,0 +1,39 @@
[default]
bootnode = /ip4/100.26.90.187/tcp/9876/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/54.213.43.194/tcp/9876/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
shards = 1
[default.shard0.rpc]
rpc = 34.217.179.222:14555
rpc = 18.209.247.105:14555
rpc = 100.25.248.42:14555
rpc = 3.80.164.193:14555
rpc = 54.87.237.93:14555
[local]
bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv
shards = 1
[local.shard0.rpc]
rpc = 127.0.0.1:14555
rpc = 127.0.0.1:14556
[devnet]
bootnode = /ip4/100.26.90.187/tcp/9871/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/54.213.43.194/tcp/9871/p2p/QmRVbTpEYup8dSaURZfF6ByrMTSKa4UyUzJhSjahFzRqNj
shards = 3
[devnet.shard0.rpc]
rpc = 13.57.196.136:14555
rpc = 35.175.103.144:14555
rpc = 54.245.176.36:14555
[devnet.shard1.rpc]
rpc = 35.163.188.234:14555
rpc = 54.215.251.123:14555
rpc = 54.153.11.146:14555
[devnet.shard2.rpc]
rpc = 52.201.246.212:14555
rpc = 3.81.26.139:14555
rpc = 18.237.42.209:14555

@ -4,7 +4,6 @@ import "errors"
// Errors ...
var (
ErrSyncPeerConfigClientNotReady = errors.New("[SYNC]: client is not ready")
ErrRegistrationFail = errors.New("[SYNC]: registration failed")
ErrGetBlock = errors.New("[SYNC]: get block failed")
ErrGetBlockHash = errors.New("[SYNC]: get blockhash failed")

@ -16,6 +16,7 @@ import (
pb "github.com/harmony-one/harmony/api/service/syncing/downloader/proto"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node/worker"
"github.com/harmony-one/harmony/p2p"
@ -55,9 +56,32 @@ type SyncBlockTask struct {
// SyncConfig contains an array of SyncPeerConfig.
type SyncConfig struct {
// mtx locks peers, and *SyncPeerConfig pointers in peers.
// SyncPeerConfig itself is guarded by its own mutex.
mtx sync.RWMutex
peers []*SyncPeerConfig
}
// AddPeer adds the given sync peer.
func (sc *SyncConfig) AddPeer(peer *SyncPeerConfig) {
sc.mtx.Lock()
defer sc.mtx.Unlock()
sc.peers = append(sc.peers, peer)
}
// ForEachPeer calls the given function with each peer.
// It breaks the iteration iff the function returns true.
func (sc *SyncConfig) ForEachPeer(f func(peer *SyncPeerConfig) (brk bool)) {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
for _, peer := range sc.peers {
if f(peer) {
break
}
}
}
// CreateStateSync returns the implementation of StateSyncInterface interface.
func CreateStateSync(ip string, port string, peerHash [20]byte) *StateSync {
stateSync := &StateSync{}
@ -74,8 +98,6 @@ type StateSync struct {
selfip string
selfport string
selfPeerHash [20]byte // hash of ip and address combination
peerNumber int
activePeerNumber int
commonBlocks map[int]*types.Block
lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus
syncConfig *SyncConfig
@ -91,25 +113,39 @@ func (ss *StateSync) AddLastMileBlock(block *types.Block) {
}
// CloseConnections close grpc connections for state sync clients
func (ss *StateSync) CloseConnections() {
for _, pc := range ss.syncConfig.peers {
if pc.client != nil {
func (sc *SyncConfig) CloseConnections() {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
for _, pc := range sc.peers {
pc.client.Close()
}
}
// FindPeerByHash returns the peer with the given hash, or nil if not found.
func (sc *SyncConfig) FindPeerByHash(peerHash []byte) *SyncPeerConfig {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
for _, pc := range sc.peers {
if bytes.Compare(pc.peerHash, peerHash) == 0 {
return pc
}
}
return nil
}
// AddNewBlock will add newly received block into state syncing queue
func (ss *StateSync) AddNewBlock(peerHash []byte, block *types.Block) {
for i, pc := range ss.syncConfig.peers {
if bytes.Compare(pc.peerHash, peerHash) != 0 {
continue
pc := ss.syncConfig.FindPeerByHash(peerHash)
if pc == nil {
// Received a block with no active peer; just ignore.
return
}
// TODO ek – we shouldn't mess with SyncPeerConfig's mutex.
// Factor this into a method, like pc.AddNewBlock(block)
pc.mux.Lock()
defer pc.mux.Unlock()
pc.newBlocks = append(pc.newBlocks, block)
pc.mux.Unlock()
utils.GetLogInstance().Debug("[SYNC] new block received", "total", len(ss.syncConfig.peers[i].newBlocks), "blockHeight", block.NumberU64())
}
utils.GetLogInstance().Debug("[SYNC] new block received", "total", len(pc.newBlocks), "blockHeight", block.NumberU64())
}
// CreateTestSyncPeerConfig used for testing.
@ -138,9 +174,6 @@ func CompareSyncPeerConfigByblockHashes(a *SyncPeerConfig, b *SyncPeerConfig) in
// GetBlocks gets blocks by calling grpc request to the corresponding peer.
func (peerConfig *SyncPeerConfig) GetBlocks(hashes [][]byte) ([][]byte, error) {
if peerConfig.client == nil {
return nil, ErrSyncPeerConfigClientNotReady
}
response := peerConfig.client.GetBlocks(hashes)
if response == nil {
return nil, ErrGetBlock
@ -149,71 +182,55 @@ func (peerConfig *SyncPeerConfig) GetBlocks(hashes [][]byte) ([][]byte, error) {
}
// CreateSyncConfig creates SyncConfig for StateSync object.
func (ss *StateSync) CreateSyncConfig(peers []p2p.Peer) bool {
func (ss *StateSync) CreateSyncConfig(peers []p2p.Peer) error {
utils.GetLogInstance().Debug("CreateSyncConfig: len of peers", "len", len(peers))
if len(peers) == 0 {
utils.GetLogInstance().Warn("[SYNC] Unable to get neighbor peers")
return false
}
ss.peerNumber = len(peers)
ss.syncConfig = &SyncConfig{
peers: make([]*SyncPeerConfig, ss.peerNumber),
return ctxerror.New("[SYNC] no peers to connect to")
}
for id := range ss.syncConfig.peers {
ss.syncConfig.peers[id] = &SyncPeerConfig{
ip: peers[id].IP,
port: peers[id].Port,
}
}
utils.GetLogInstance().Info("[SYNC] Finished creating SyncConfig")
return true
}
// MakeConnectionToPeers makes grpc connection to all peers.
func (ss *StateSync) MakeConnectionToPeers() {
ss.syncConfig = &SyncConfig{}
var wg sync.WaitGroup
wg.Add(ss.peerNumber)
for id := range ss.syncConfig.peers {
go func(peerConfig *SyncPeerConfig) {
for _, peer := range peers {
wg.Add(1)
go func(peer p2p.Peer) {
defer wg.Done()
peerConfig.client = downloader.ClientSetup(peerConfig.ip, peerConfig.port)
}(ss.syncConfig.peers[id])
client := downloader.ClientSetup(peer.IP, peer.Port)
if client == nil {
return
}
peerConfig := &SyncPeerConfig{
ip: peer.IP,
port: peer.Port,
client: client,
}
ss.syncConfig.AddPeer(peerConfig)
}(peer)
}
wg.Wait()
ss.CleanUpNilPeers()
utils.GetLogInstance().Info("[SYNC] Finished making connection to peers.")
return nil
}
// GetActivePeerNumber returns the number of active peers
func (ss *StateSync) GetActivePeerNumber() int {
if ss.syncConfig == nil || len(ss.syncConfig.peers) == 0 {
if ss.syncConfig == nil {
return 0
}
ss.CleanUpNilPeers()
return ss.activePeerNumber
}
// CleanUpNilPeers cleans up peer with nil client and recalculate activePeerNumber.
func (ss *StateSync) CleanUpNilPeers() {
ss.activePeerNumber = 0
for _, configPeer := range ss.syncConfig.peers {
if configPeer.client != nil {
ss.activePeerNumber++
}
}
utils.GetLogInstance().Info("[SYNC] clean up inactive peers", "activeNumber", ss.activePeerNumber)
// len() is atomic; no need to hold mutex.
return len(ss.syncConfig.peers)
}
// GetHowManyMaxConsensus returns max number of consensus nodes and the first ID of consensus group.
// getHowManyMaxConsensus returns max number of consensus nodes and the first ID of consensus group.
// Assumption: all peers are sorted by CompareSyncPeerConfigByBlockHashes first.
func (syncConfig *SyncConfig) GetHowManyMaxConsensus() (int, int) {
// Caller shall ensure mtx is locked for reading.
func (sc *SyncConfig) getHowManyMaxConsensus() (int, int) {
// As all peers are sorted by their blockHashes, all equal blockHashes should come together and consecutively.
curCount := 0
curFirstID := -1
maxCount := 0
maxFirstID := -1
for i := range syncConfig.peers {
if curFirstID == -1 || CompareSyncPeerConfigByblockHashes(syncConfig.peers[curFirstID], syncConfig.peers[i]) != 0 {
for i := range sc.peers {
if curFirstID == -1 || CompareSyncPeerConfigByblockHashes(sc.peers[curFirstID], sc.peers[i]) != 0 {
curCount = 1
curFirstID = i
} else {
@ -228,40 +245,44 @@ func (syncConfig *SyncConfig) GetHowManyMaxConsensus() (int, int) {
}
// InitForTesting used for testing.
func (syncConfig *SyncConfig) InitForTesting(client *downloader.Client, blockHashes [][]byte) {
for i := range syncConfig.peers {
syncConfig.peers[i].blockHashes = blockHashes
syncConfig.peers[i].client = client
func (sc *SyncConfig) InitForTesting(client *downloader.Client, blockHashes [][]byte) {
sc.mtx.RLock()
defer sc.mtx.RUnlock()
for i := range sc.peers {
sc.peers[i].blockHashes = blockHashes
sc.peers[i].client = client
}
}
// CleanUpPeers cleans up all peers whose blockHashes are not equal to consensus block hashes.
func (syncConfig *SyncConfig) CleanUpPeers(maxFirstID int) {
fixedPeer := syncConfig.peers[maxFirstID]
for i := 0; i < len(syncConfig.peers); i++ {
if CompareSyncPeerConfigByblockHashes(fixedPeer, syncConfig.peers[i]) != 0 {
// cleanUpPeers cleans up all peers whose blockHashes are not equal to
// consensus block hashes. Caller shall ensure mtx is locked for RW.
func (sc *SyncConfig) cleanUpPeers(maxFirstID int) {
fixedPeer := sc.peers[maxFirstID]
for i := 0; i < len(sc.peers); i++ {
if CompareSyncPeerConfigByblockHashes(fixedPeer, sc.peers[i]) != 0 {
// TODO: move it into a util delete func.
// See tip https://github.com/golang/go/wiki/SliceTricks
// Close the client and remove the peer out of the
syncConfig.peers[i].client.Close()
copy(syncConfig.peers[i:], syncConfig.peers[i+1:])
syncConfig.peers[len(syncConfig.peers)-1] = nil
syncConfig.peers = syncConfig.peers[:len(syncConfig.peers)-1]
sc.peers[i].client.Close()
copy(sc.peers[i:], sc.peers[i+1:])
sc.peers[len(sc.peers)-1] = nil
sc.peers = sc.peers[:len(sc.peers)-1]
}
}
}
// GetBlockHashesConsensusAndCleanUp chesk if all consensus hashes are equal.
func (ss *StateSync) GetBlockHashesConsensusAndCleanUp() bool {
func (sc *SyncConfig) GetBlockHashesConsensusAndCleanUp() bool {
sc.mtx.Lock()
defer sc.mtx.Unlock()
// Sort all peers by the blockHashes.
sort.Slice(ss.syncConfig.peers, func(i, j int) bool {
return CompareSyncPeerConfigByblockHashes(ss.syncConfig.peers[i], ss.syncConfig.peers[j]) == -1
sort.Slice(sc.peers, func(i, j int) bool {
return CompareSyncPeerConfigByblockHashes(sc.peers[i], sc.peers[j]) == -1
})
maxFirstID, maxCount := ss.syncConfig.GetHowManyMaxConsensus()
maxFirstID, maxCount := sc.getHowManyMaxConsensus()
utils.GetLogInstance().Info("[SYNC] block consensus hashes", "maxFirstID", maxFirstID, "maxCount", maxCount)
if float64(maxCount) >= ConsensusRatio*float64(ss.activePeerNumber) {
ss.syncConfig.CleanUpPeers(maxFirstID)
ss.CleanUpNilPeers()
if float64(maxCount) >= ConsensusRatio*float64(len(sc.peers)) {
sc.cleanUpPeers(maxFirstID)
return true
}
return false
@ -272,22 +293,20 @@ func (ss *StateSync) GetConsensusHashes(startHash []byte) bool {
count := 0
for {
var wg sync.WaitGroup
for id := range ss.syncConfig.peers {
if ss.syncConfig.peers[id].client == nil {
continue
}
ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) {
wg.Add(1)
go func(peerConfig *SyncPeerConfig) {
go func() {
defer wg.Done()
response := peerConfig.client.GetBlockHashes(startHash)
if response == nil {
return
}
peerConfig.blockHashes = response.Payload
}(ss.syncConfig.peers[id])
}
}()
return
})
wg.Wait()
if ss.GetBlockHashesConsensusAndCleanUp() {
if ss.syncConfig.GetBlockHashesConsensusAndCleanUp() {
break
}
if count > TimesToFail {
@ -303,14 +322,13 @@ func (ss *StateSync) GetConsensusHashes(startHash []byte) bool {
func (ss *StateSync) generateStateSyncTaskQueue(bc *core.BlockChain) {
ss.stateSyncTaskQueue = queue.New(0)
for _, configPeer := range ss.syncConfig.peers {
if configPeer.client != nil {
ss.syncConfig.ForEachPeer(func(configPeer *SyncPeerConfig) (brk bool) {
for id, blockHash := range configPeer.blockHashes {
ss.stateSyncTaskQueue.Put(SyncBlockTask{index: id, blockHash: blockHash})
}
break
}
}
brk = true
return
})
utils.GetLogInstance().Info("syncing: Finished generateStateSyncTaskQueue", "length", ss.stateSyncTaskQueue.Len())
}
@ -318,13 +336,10 @@ func (ss *StateSync) generateStateSyncTaskQueue(bc *core.BlockChain) {
func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
// Initialize blockchain
var wg sync.WaitGroup
wg.Add(ss.activePeerNumber)
count := 0
for i := range ss.syncConfig.peers {
if ss.syncConfig.peers[i].client == nil {
continue
}
go func(peerConfig *SyncPeerConfig, stateSyncTaskQueue *queue.Queue, bc *core.BlockChain) {
ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) {
wg.Add(1)
go func(stateSyncTaskQueue *queue.Queue, bc *core.BlockChain) {
defer wg.Done()
for !stateSyncTaskQueue.Empty() {
task, err := ss.stateSyncTaskQueue.Poll(1, time.Millisecond)
@ -361,8 +376,9 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
ss.commonBlocks[syncTask.index] = &blockObj
ss.syncMux.Unlock()
}
}(ss.syncConfig.peers[i], ss.stateSyncTaskQueue, bc)
}
}(ss.stateSyncTaskQueue, bc)
return
})
wg.Wait()
utils.GetLogInstance().Info("[SYNC] Finished downloadBlocks.")
}
@ -399,8 +415,7 @@ func GetHowManyMaxConsensus(blocks []*types.Block) (int, int) {
func (ss *StateSync) getMaxConsensusBlockFromParentHash(parentHash common.Hash) *types.Block {
candidateBlocks := []*types.Block{}
ss.syncMux.Lock()
for id := range ss.syncConfig.peers {
peerConfig := ss.syncConfig.peers[id]
ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) {
for _, block := range peerConfig.newBlocks {
ph := block.ParentHash()
if bytes.Compare(ph[:], parentHash[:]) == 0 {
@ -408,7 +423,8 @@ func (ss *StateSync) getMaxConsensusBlockFromParentHash(parentHash common.Hash)
break
}
}
}
return
})
ss.syncMux.Unlock()
if len(candidateBlocks) == 0 {
return nil
@ -488,10 +504,13 @@ func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker
}
parentHash = block.Hash()
}
// TODO ek – Do we need to hold syncMux now that syncConfig has its onw
// mutex?
ss.syncMux.Lock()
for id := range ss.syncConfig.peers {
ss.syncConfig.peers[id].newBlocks = []*types.Block{}
}
ss.syncConfig.ForEachPeer(func(peer *SyncPeerConfig) (brk bool) {
peer.newBlocks = []*types.Block{}
return
})
ss.syncMux.Unlock()
// update last mile blocks if any
@ -538,42 +557,40 @@ func (peerConfig *SyncPeerConfig) registerToBroadcast(peerHash []byte, ip, port
// RegisterNodeInfo will register node to peers to accept future new block broadcasting
// return number of successfull registration
func (ss *StateSync) RegisterNodeInfo() int {
ss.CleanUpNilPeers()
registrationNumber := RegistrationNumber
utils.GetLogInstance().Debug("[SYNC] node registration to peers", "registrationNumber", registrationNumber, "activePeerNumber", ss.activePeerNumber)
utils.GetLogInstance().Debug("[SYNC] node registration to peers",
"registrationNumber", registrationNumber,
"activePeerNumber", len(ss.syncConfig.peers))
count := 0
for id := range ss.syncConfig.peers {
peerConfig := ss.syncConfig.peers[id]
ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) {
if count >= registrationNumber {
break
brk = true
return
}
if peerConfig.ip == ss.selfip && peerConfig.port == GetSyncingPort(ss.selfport) {
utils.GetLogInstance().Debug("[SYNC] skip self", "peerport", peerConfig.port, "selfport", ss.selfport, "selfsyncport", GetSyncingPort(ss.selfport))
continue
}
if peerConfig.client == nil {
continue
return
}
err := peerConfig.registerToBroadcast(ss.selfPeerHash[:], ss.selfip, ss.selfport)
if err != nil {
utils.GetLogInstance().Debug("[SYNC] register failed to peer", "ip", peerConfig.ip, "port", peerConfig.port, "selfPeerHash", ss.selfPeerHash)
continue
return
}
utils.GetLogInstance().Debug("[SYNC] register success", "ip", peerConfig.ip, "port", peerConfig.port)
count++
}
return
})
return count
}
// getMaxPeerHeight gets the maximum blockchain heights from peers
func (ss *StateSync) getMaxPeerHeight() uint64 {
ss.CleanUpNilPeers()
maxHeight := uint64(0)
var wg sync.WaitGroup
for id := range ss.syncConfig.peers {
ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) {
wg.Add(1)
go func(peerConfig *SyncPeerConfig) {
go func() {
defer wg.Done()
response := peerConfig.client.GetBlockChainHeight()
ss.syncMux.Lock()
@ -581,8 +598,9 @@ func (ss *StateSync) getMaxPeerHeight() uint64 {
maxHeight = response.BlockHeight
}
ss.syncMux.Unlock()
}(ss.syncConfig.peers[id])
}
}()
return
})
wg.Wait()
return maxHeight
}

@ -54,6 +54,8 @@ type AccountState struct {
const (
rpcRetry = 3
defaultConfigFile = ".hmy/wallet.ini"
defaultProfile = "default"
)
var (
@ -77,32 +79,7 @@ var (
)
var (
// list of bootnodes
addrStrings = []string{"/ip4/100.26.90.187/tcp/9876/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9", "/ip4/54.213.43.194/tcp/9876/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX"}
// list of rpc servers
rpcServers = []p2p.Peer{
p2p.Peer{
IP: "18.236.187.250",
Port: "14555",
},
p2p.Peer{
IP: "54.186.236.223",
Port: "14555",
},
p2p.Peer{
IP: "18.213.246.142",
Port: "14555",
},
p2p.Peer{
IP: "75.101.226.226",
Port: "14555",
},
p2p.Peer{
IP: "34.221.85.140",
Port: "14555",
},
}
walletProfile *utils.WalletProfile
)
// setupLog setup log for verbose output
@ -122,7 +99,10 @@ func main() {
// os.Arg[1] will be the subcommand
if len(os.Args) < 2 {
fmt.Println("Usage:")
fmt.Println(" wallet <action> <params>")
fmt.Println(" wallet -p profile <action> <params>")
fmt.Println(" -p profile - Specify the profile of the wallet, either testnet/devnet or others configured. Default is: testnet")
fmt.Println(" The profile is in file:", defaultConfigFile)
fmt.Println()
fmt.Println("Actions:")
fmt.Println(" 1. new - Generates a new account and store the private key locally")
fmt.Println(" 2. list - Lists all accounts in local keystore")
@ -133,7 +113,7 @@ func main() {
fmt.Println(" --address - The address to check balance for")
fmt.Println(" 6. getFreeToken - Gets free token on each shard")
fmt.Println(" --address - The free token receiver account's address")
fmt.Println(" 7. transfer")
fmt.Println(" 7. transfer - Transfer token from one account to another")
fmt.Println(" --from - The sender account's address or index in the local keystore")
fmt.Println(" --to - The receiver account's address")
fmt.Println(" --amount - The amount of token to transfer")
@ -149,22 +129,22 @@ ARG:
case "--verbose":
setupLog()
os.Args = os.Args[:len(os.Args)-1]
case "--devnet":
// the multiaddress of bootnodes for devnet
addrStrings = []string{"/ip4/100.26.90.187/tcp/9871/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv", "/ip4/54.213.43.194/tcp/9871/p2p/QmRVbTpEYup8dSaURZfF6ByrMTSKa4UyUzJhSjahFzRqNj"}
os.Args = os.Args[:len(os.Args)-1]
default:
break ARG
}
}
if len(os.Getenv("RpcNodes")) > 0 {
rpcServers = utils.StringsToPeers(os.Getenv("RpcNodes"))
var profile string
if os.Args[1] == "-p" {
profile = os.Args[2]
os.Args = os.Args[2:]
} else {
profile = defaultProfile
}
if len(rpcServers) == 0 {
fmt.Println("Error: please set environment variable RpcNodes")
fmt.Println("Example: export RpcNodes=127.0.0.1:8000,192.168.0.1:9999")
os.Exit(0)
if len(os.Args) == 1 {
fmt.Println("Missing action")
flag.PrintDefaults()
os.Exit(1)
}
// Switch on the subcommand
@ -180,10 +160,13 @@ ARG:
case "import":
processImportCommnad()
case "balances":
readProfile(profile)
processBalancesCommand()
case "getFreeToken":
readProfile(profile)
processGetFreeToken()
case "transfer":
readProfile(profile)
processTransferCommand()
default:
fmt.Printf("Unknown action: %s\n", os.Args[1])
@ -192,9 +175,19 @@ ARG:
}
}
func readProfile(profile string) {
fmt.Printf("Using %s profile for wallet\n", profile)
var err error
walletProfile, err = utils.ReadWalletProfile(defaultConfigFile, profile)
if err != nil {
fmt.Printf("Read wallet profile error: %v\nExiting ...\n", err)
os.Exit(2)
}
}
// createWalletNode creates wallet server node.
func createWalletNode() *node.Node {
bootNodeAddrs, err := utils.StringsToAddrs(addrStrings)
bootNodeAddrs, err := utils.StringsToAddrs(walletProfile.Bootnodes)
if err != nil {
panic(err)
}
@ -427,14 +420,14 @@ func convertBalanceIntoReadableFormat(balance *big.Int) string {
}
// FetchBalance fetches account balance of specified address from the Harmony network
// TODO: (chao) add support for non beacon chain shards
func FetchBalance(address common.Address) map[uint32]AccountState {
result := make(map[uint32]AccountState)
balance := big.NewInt(0)
result[0] = AccountState{balance, 0}
for i := 0; i < walletProfile.Shards; i++ {
result[uint32(i)] = AccountState{balance, 0}
for retry := 0; retry < rpcRetry; retry++ {
server := rpcServers[rand.Intn(len(rpcServers))]
server := walletProfile.RPCServer[i][rand.Intn(len(walletProfile.RPCServer[i]))]
client, err := clientService.NewClient(server.IP, server.Port)
if err != nil {
continue
@ -448,16 +441,18 @@ func FetchBalance(address common.Address) map[uint32]AccountState {
}
log.Debug("FetchBalance", "response", response)
balance.SetBytes(response.Balance)
result[0] = AccountState{balance, response.Nonce}
result[uint32(i)] = AccountState{balance, response.Nonce}
break
}
}
return result
}
// GetFreeToken requests for token test token on each shard
func GetFreeToken(address common.Address) {
for retry := 0; retry < rpcRetry; retry++ {
server := rpcServers[0]
// use the 1st server from shard 0 (beacon chain) to make the getFreeToken call
server := walletProfile.RPCServer[0][0]
client, err := clientService.NewClient(server.IP, server.Port)
if err != nil {
continue

@ -49,6 +49,7 @@ require (
golang.org/x/tools v0.0.0-20190320160634-b6b7807791df
google.golang.org/grpc v1.19.0
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
gopkg.in/ini.v1 v1.42.0
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
)

@ -0,0 +1,124 @@
// Package ctxerror provides a context-aware error facility.
//
// Inspired by log15-style (semi-)structured logging,
// it also provides a log15 bridge.
package ctxerror
//go:generate mockgen -source ctxerror.go -destination mock/ctxerror.go
import (
"fmt"
)
// CtxError is a context-aware error container.
type CtxError interface {
// Error returns a fully formatted message, with context info.
Error() string
// Message returns the bare error message, without context info.
Message() string
// Contexts returns message contexts.
// Caller shall not modify the returned map.
Contexts() map[string]interface{}
// WithCause chains an error after the receiver.
// It returns the merged/chained instance,
// where the message is "<receiver.Message>: <c.Message>",
// and with contexts merged (ones in c takes precedence).
WithCause(c error) CtxError
}
type ctxError struct {
msg string
ctx map[string]interface{}
}
// New creates and returns a new context-aware error.
func New(msg string, ctx ...interface{}) CtxError {
e := &ctxError{msg: msg, ctx: make(map[string]interface{})}
e.updateCtx(ctx...)
return e
}
func (e *ctxError) updateCtx(ctx ...interface{}) {
var name string
if len(ctx)%2 == 1 {
ctx = append(ctx, nil)
}
for idx, value := range ctx {
if idx%2 == 0 {
name = value.(string)
} else {
e.ctx[name] = value
}
}
}
// Error returns a fully formatted message, with context info.
func (e *ctxError) Error() string {
s := e.msg
for k, v := range e.ctx {
s += fmt.Sprintf(", %s=%#v", k, v)
}
return s
}
// Message returns the bare error message, without context info.
func (e *ctxError) Message() string {
return e.msg
}
// Contexts returns message contexts.
// Caller shall not modify the returned map.
func (e *ctxError) Contexts() map[string]interface{} {
return e.ctx
}
// WithCause chains an error after the receiver.
// It returns the merged/chained instance,
// where the message is “<receiver.Message>: <c.Message>”,
// and with contexts merged (ones in c takes precedence).
func (e *ctxError) WithCause(c error) CtxError {
r := &ctxError{msg: e.msg + ": ", ctx: make(map[string]interface{})}
for k, v := range e.ctx {
r.ctx[k] = v
}
switch c := c.(type) {
case *ctxError:
r.msg += c.msg
for k, v := range c.ctx {
r.ctx[k] = v
}
default:
r.msg += c.Error()
}
return r
}
// Log15Func is a log15-compatible logging function.
type Log15Func func(msg string, ctx ...interface{})
// Log15Logger logs something with a log15-style logging function.
type Log15Logger interface {
Log15(f Log15Func)
}
// Log15 logs the receiver with a log15-style logging function.
func (e *ctxError) Log15(f Log15Func) {
var ctx []interface{}
for k, v := range e.ctx {
ctx = append(ctx, k, v)
}
f(e.msg, ctx...)
}
// Log15 logs an error with a log15-style logging function.
// It handles both regular errors and Log15Logger-compliant errors.
func Log15(f Log15Func, e error) {
if e15, ok := e.(Log15Logger); ok {
e15.Log15(f)
} else {
f(e.Error())
}
}

@ -0,0 +1,363 @@
package ctxerror
import (
"errors"
"reflect"
"testing"
)
func TestNew(t *testing.T) {
type args struct {
msg string
ctx []interface{}
}
tests := []struct {
name string
args args
want CtxError
}{
{
name: "Empty",
args: args{msg: "", ctx: []interface{}{}},
want: &ctxError{msg: "", ctx: map[string]interface{}{}},
},
{
name: "Regular",
args: args{msg: "omg", ctx: []interface{}{"wtf", 1, "bbq", 2}},
want: &ctxError{msg: "omg", ctx: map[string]interface{}{"wtf": 1, "bbq": 2}},
},
{
name: "Truncated",
args: args{
msg: "omg",
ctx: []interface{}{"wtf", 1, "bbq" /* missing value... */},
},
want: &ctxError{
msg: "omg",
ctx: map[string]interface{}{"wtf": 1, "bbq": /* becomes */ nil},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := New(tt.args.msg, tt.args.ctx...); !reflect.DeepEqual(got, tt.want) {
t.Errorf("New() = %#v, want %#v", got, tt.want)
}
})
}
}
func Test_ctxError_updateCtx(t *testing.T) {
tests := []struct {
name string
before, after map[string]interface{}
delta []interface{}
}{
{
name: "Empty",
before: map[string]interface{}{"omg": 1, "wtf": 2, "bbq": 3},
delta: []interface{}{},
after: map[string]interface{}{"omg": 1, "wtf": 2, "bbq": 3},
},
{
name: "Regular",
before: map[string]interface{}{"omg": 1, "wtf": 2, "bbq": 3},
delta: []interface{}{"omg", 10, "wtf", 20},
after: map[string]interface{}{"omg": 10, "wtf": 20, "bbq": 3},
},
{
name: "Truncated",
before: map[string]interface{}{"omg": 1, "wtf": 2, "bbq": 3},
delta: []interface{}{"omg", 10, "wtf" /* missing value... */},
after: map[string]interface{}{"omg": 10, "wtf": /* becomes */ nil, "bbq": 3},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &ctxError{msg: tt.name, ctx: tt.before}
e.updateCtx(tt.delta...)
if !reflect.DeepEqual(e.ctx, tt.after) {
t.Errorf("expected ctx %#v != %#v seen", tt.after, e.ctx)
}
})
}
}
func Test_ctxError_Error(t *testing.T) {
type fields struct {
msg string
ctx map[string]interface{}
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "AllEmpty",
fields: fields{msg: "", ctx: map[string]interface{}{}},
want: "",
},
{
name: "CtxEmpty",
fields: fields{msg: "omg", ctx: map[string]interface{}{}},
want: "omg",
},
{
name: "MsgEmpty",
fields: fields{msg: "", ctx: map[string]interface{}{"wtf": "bbq"}},
want: ", wtf=\"bbq\"",
},
{
name: "Regular",
fields: fields{msg: "omg", ctx: map[string]interface{}{"wtf": "bbq"}},
want: "omg, wtf=\"bbq\"",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &ctxError{
msg: tt.fields.msg,
ctx: tt.fields.ctx,
}
if got := e.Error(); got != tt.want {
t.Errorf("Error() = %#v, want %#v", got, tt.want)
}
})
}
}
func Test_ctxError_Message(t *testing.T) {
type fields struct {
msg string
ctx map[string]interface{}
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "AllEmpty",
fields: fields{msg: "", ctx: map[string]interface{}{}},
want: "",
},
{
name: "CtxEmpty",
fields: fields{msg: "omg", ctx: map[string]interface{}{}},
want: "omg",
},
{
name: "MsgEmpty",
fields: fields{msg: "", ctx: map[string]interface{}{"wtf": "bbq"}},
want: "",
},
{
name: "Regular",
fields: fields{msg: "omg", ctx: map[string]interface{}{"wtf": "bbq"}},
want: "omg",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &ctxError{
msg: tt.fields.msg,
ctx: tt.fields.ctx,
}
if got := e.Message(); got != tt.want {
t.Errorf("Message() = %#v, want %#v", got, tt.want)
}
})
}
}
func Test_ctxError_Contexts(t *testing.T) {
type fields struct {
msg string
ctx map[string]interface{}
}
tests := []struct {
name string
fields fields
want map[string]interface{}
}{
{
name: "Empty",
fields: fields{msg: "", ctx: map[string]interface{}{}},
want: map[string]interface{}{},
},
{
name: "Regular",
fields: fields{
msg: "",
ctx: map[string]interface{}{"omg": 1, "wtf": 2, "bbq": 3},
},
want: map[string]interface{}{"omg": 1, "wtf": 2, "bbq": 3},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &ctxError{
msg: tt.fields.msg,
ctx: tt.fields.ctx,
}
if got := e.Contexts(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Contexts() = %#v, want %#v", got, tt.want)
}
})
}
}
func Test_ctxError_WithCause(t *testing.T) {
type fields struct {
msg string
ctx map[string]interface{}
}
type args struct {
c error
}
tests := []struct {
name string
fields fields
args args
want CtxError
}{
{
name: "CtxError",
fields: fields{
msg: "hello",
ctx: map[string]interface{}{"omg": 1, "wtf": 2},
},
args: args{c: &ctxError{
msg: "world",
ctx: map[string]interface{}{"wtf": 20, "bbq": 30},
}},
want: &ctxError{
msg: "hello: world",
ctx: map[string]interface{}{"omg": 1, "wtf": 20, "bbq": 30},
},
},
{
name: "RegularError",
fields: fields{
msg: "hello",
ctx: map[string]interface{}{"omg": 1, "wtf": 2},
},
args: args{c: errors.New("world")},
want: &ctxError{
msg: "hello: world",
ctx: map[string]interface{}{"omg": 1, "wtf": 2},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &ctxError{
msg: tt.fields.msg,
ctx: tt.fields.ctx,
}
if got := e.WithCause(tt.args.c); !reflect.DeepEqual(got, tt.want) {
t.Errorf("WithCause() = %#v, want %#v", got, tt.want)
}
})
}
}
func Test_ctxError_Log15(t *testing.T) {
type fields struct {
msg string
ctx map[string]interface{}
}
type want struct {
msg string
ctx []interface{}
}
tests := []struct {
name string
fields fields
want want
}{
{
name: "Empty",
fields: fields{msg: "", ctx: map[string]interface{}{}},
want: want{msg: "", ctx: nil},
},
{
name: "Regular",
fields: fields{msg: "hello", ctx: map[string]interface{}{"omg": 1}},
want: want{msg: "hello", ctx: []interface{}{"omg", 1}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
called := false
f := func(msg string, ctx ...interface{}) {
called = true
if msg != tt.want.msg {
t.Errorf("expected message %#v != %#v seen",
tt.want.msg, msg)
}
if !reflect.DeepEqual(ctx, tt.want.ctx) {
t.Errorf("expected ctx %#v != %#v seen", ctx, tt.want.ctx)
}
}
e := &ctxError{
msg: tt.fields.msg,
ctx: tt.fields.ctx,
}
e.Log15(f)
if !called {
t.Error("logging func not called")
}
})
}
}
func TestLog15(t *testing.T) {
type args struct {
e error
}
type want struct {
msg string
ctx []interface{}
}
tests := []struct {
name string
args args
want want
}{
{
name: "Regular",
args: args{e: errors.New("hello")},
want: want{msg: "hello", ctx: nil},
},
{
name: "CtxError",
args: args{e: &ctxError{
msg: "hello",
ctx: map[string]interface{}{"omg": 1},
}},
want: want{msg: "hello", ctx: []interface{}{"omg", 1}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
called := false
f := func(msg string, ctx ...interface{}) {
called = true
if msg != tt.want.msg {
t.Errorf("expected message %#v != %#v seen",
tt.want.msg, msg)
}
if !reflect.DeepEqual(ctx, tt.want.ctx) {
t.Errorf("expected ctx %#v != %#v seen",
tt.want.ctx, ctx)
}
}
Log15(f, tt.args.e)
if !called {
t.Errorf("logging func not called")
}
})
}
}

@ -0,0 +1,125 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ctxerror.go
// Package mock_ctxerror is a generated GoMock package.
package mock_ctxerror
import (
gomock "github.com/golang/mock/gomock"
ctxerror "github.com/harmony-one/harmony/internal/ctxerror"
reflect "reflect"
)
// MockCtxError is a mock of CtxError interface
type MockCtxError struct {
ctrl *gomock.Controller
recorder *MockCtxErrorMockRecorder
}
// MockCtxErrorMockRecorder is the mock recorder for MockCtxError
type MockCtxErrorMockRecorder struct {
mock *MockCtxError
}
// NewMockCtxError creates a new mock instance
func NewMockCtxError(ctrl *gomock.Controller) *MockCtxError {
mock := &MockCtxError{ctrl: ctrl}
mock.recorder = &MockCtxErrorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockCtxError) EXPECT() *MockCtxErrorMockRecorder {
return m.recorder
}
// Error mocks base method
func (m *MockCtxError) Error() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Error")
ret0, _ := ret[0].(string)
return ret0
}
// Error indicates an expected call of Error
func (mr *MockCtxErrorMockRecorder) Error() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockCtxError)(nil).Error))
}
// Message mocks base method
func (m *MockCtxError) Message() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Message")
ret0, _ := ret[0].(string)
return ret0
}
// Message indicates an expected call of Message
func (mr *MockCtxErrorMockRecorder) Message() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Message", reflect.TypeOf((*MockCtxError)(nil).Message))
}
// Contexts mocks base method
func (m *MockCtxError) Contexts() map[string]interface{} {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Contexts")
ret0, _ := ret[0].(map[string]interface{})
return ret0
}
// Contexts indicates an expected call of Contexts
func (mr *MockCtxErrorMockRecorder) Contexts() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Contexts", reflect.TypeOf((*MockCtxError)(nil).Contexts))
}
// WithCause mocks base method
func (m *MockCtxError) WithCause(c error) ctxerror.CtxError {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WithCause", c)
ret0, _ := ret[0].(ctxerror.CtxError)
return ret0
}
// WithCause indicates an expected call of WithCause
func (mr *MockCtxErrorMockRecorder) WithCause(c interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithCause", reflect.TypeOf((*MockCtxError)(nil).WithCause), c)
}
// MockLog15Logger is a mock of Log15Logger interface
type MockLog15Logger struct {
ctrl *gomock.Controller
recorder *MockLog15LoggerMockRecorder
}
// MockLog15LoggerMockRecorder is the mock recorder for MockLog15Logger
type MockLog15LoggerMockRecorder struct {
mock *MockLog15Logger
}
// NewMockLog15Logger creates a new mock instance
func NewMockLog15Logger(ctrl *gomock.Controller) *MockLog15Logger {
mock := &MockLog15Logger{ctrl: ctrl}
mock.recorder = &MockLog15LoggerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockLog15Logger) EXPECT() *MockLog15LoggerMockRecorder {
return m.recorder
}
// Log15 mocks base method
func (m *MockLog15Logger) Log15(f ctxerror.Log15Func) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Log15", f)
}
// Log15 indicates an expected call of Log15
func (mr *MockLog15LoggerMockRecorder) Log15(f interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Log15", reflect.TypeOf((*MockLog15Logger)(nil).Log15), f)
}

@ -0,0 +1,66 @@
package utils
// this module in utils handles the ini file read/write
import (
"fmt"
"strings"
"github.com/harmony-one/harmony/p2p"
ini "gopkg.in/ini.v1"
)
// WalletProfile contains a section and key value pair map
type WalletProfile struct {
Profile string
Bootnodes []string
Shards int
RPCServer [][]p2p.Peer
}
// ReadWalletProfile reads an ini file and return WalletProfile
func ReadWalletProfile(fn string, profile string) (*WalletProfile, error) {
cfg, err := ini.ShadowLoad(fn)
if err != nil {
return nil, err
}
config := new(WalletProfile)
config.Profile = profile
// get the profile section
sec, err := cfg.GetSection(profile)
if err != nil {
return nil, err
}
if sec.HasKey("bootnode") {
config.Bootnodes = sec.Key("bootnode").ValueWithShadows()
} else {
return nil, fmt.Errorf("can't find bootnode key")
}
if sec.HasKey("shards") {
config.Shards = sec.Key("shards").MustInt()
config.RPCServer = make([][]p2p.Peer, config.Shards)
} else {
return nil, fmt.Errorf("can't find shards key")
}
for i := 0; i < config.Shards; i++ {
rpcSec, err := cfg.GetSection(fmt.Sprintf("%s.shard%v.rpc", profile, i))
if err != nil {
return nil, err
}
rpcKey := rpcSec.Key("rpc").ValueWithShadows()
for _, key := range rpcKey {
v := strings.Split(key, ":")
rpc := p2p.Peer{
IP: v[0],
Port: v[1],
}
config.RPCServer[i] = append(config.RPCServer[i], rpc)
}
}
return config, nil
}

@ -0,0 +1,112 @@
package utils
import (
"reflect"
"testing"
"github.com/harmony-one/harmony/p2p"
)
func TestReadWalletProfile(t *testing.T) {
config := []*WalletProfile{
&WalletProfile{
Profile: "default",
Bootnodes: []string{"127.0.0.1:9000/abcd", "127.0.0.1:9999/daeg"},
Shards: 4,
RPCServer: [][]p2p.Peer{
[]p2p.Peer{
p2p.Peer{
IP: "127.0.0.4",
Port: "8888",
},
p2p.Peer{
IP: "192.168.0.4",
Port: "9876",
},
},
[]p2p.Peer{
p2p.Peer{
IP: "127.0.0.1",
Port: "8888",
},
p2p.Peer{
IP: "192.168.0.1",
Port: "9876",
},
},
[]p2p.Peer{
p2p.Peer{
IP: "127.0.0.2",
Port: "8888",
},
p2p.Peer{
IP: "192.168.0.2",
Port: "9876",
},
},
[]p2p.Peer{
p2p.Peer{
IP: "127.0.0.3",
Port: "8888",
},
p2p.Peer{
IP: "192.168.0.3",
Port: "9876",
},
},
},
},
&WalletProfile{
Profile: "testnet",
Bootnodes: []string{"192.168.0.1:9990/abcd", "127.0.0.1:8888/daeg"},
Shards: 3,
RPCServer: [][]p2p.Peer{
[]p2p.Peer{
p2p.Peer{
IP: "192.168.2.3",
Port: "8888",
},
p2p.Peer{
IP: "192.168.192.3",
Port: "9877",
},
},
[]p2p.Peer{
p2p.Peer{
IP: "192.168.2.1",
Port: "8888",
},
p2p.Peer{
IP: "192.168.192.1",
Port: "9877",
},
},
[]p2p.Peer{
p2p.Peer{
IP: "192.168.2.2",
Port: "8888",
},
p2p.Peer{
IP: "192.168.192.2",
Port: "9877",
},
},
},
},
}
config1, err := ReadWalletProfile("test.ini", "default")
if err != nil {
t.Fatalf("ReadWalletProfile Error: %v", err)
}
if !reflect.DeepEqual(config[0], config1) {
t.Errorf("Got: %v\nExpect: %v\n", config1, config[0])
}
config2, err := ReadWalletProfile("test.ini", "testnet")
if err != nil {
t.Fatalf("ReadWalletProfile Error: %v", err)
}
if !reflect.DeepEqual(config[1], config2) {
t.Errorf("Got: %v\nExpect: %v\n", config2, config[1])
}
}

@ -0,0 +1,37 @@
[default]
bootnode = 127.0.0.1:9000/abcd
bootnode = 127.0.0.1:9999/daeg
shards = 4
[default.shard0.rpc]
rpc = 127.0.0.4:8888
rpc = 192.168.0.4:9876
[default.shard1.rpc]
rpc = 127.0.0.1:8888
rpc = 192.168.0.1:9876
[default.shard2.rpc]
rpc = 127.0.0.2:8888
rpc = 192.168.0.2:9876
[default.shard3.rpc]
rpc = 127.0.0.3:8888
rpc = 192.168.0.3:9876
[testnet]
bootnode = 192.168.0.1:9990/abcd
bootnode = 127.0.0.1:8888/daeg
shards = 3
[testnet.shard0.rpc]
rpc = 192.168.2.3:8888
rpc = 192.168.192.3:9877
[testnet.shard1.rpc]
rpc = 192.168.2.1:8888
rpc = 192.168.192.1:9877
[testnet.shard2.rpc]
rpc = 192.168.2.2:8888
rpc = 192.168.192.2:9877

@ -13,6 +13,7 @@ import (
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node/worker"
"github.com/harmony-one/harmony/p2p"
@ -61,10 +62,9 @@ SyncingLoop:
node.stateSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
}
if node.stateSync.GetActivePeerNumber() == 0 {
if node.stateSync.CreateSyncConfig(getPeers()) {
node.stateSync.MakeConnectionToPeers()
} else {
utils.GetLogInstance().Debug("[SYNC] no active peers, continue SyncingLoop")
peers := getPeers()
if err := node.stateSync.CreateSyncConfig(peers); err != nil {
ctxerror.Log15(utils.GetLogInstance().Debug, err)
continue SyncingLoop
}
}

@ -4,5 +4,5 @@ exec git ls-files '*.go' | grep -v \
-e '\.pb\.go$' \
-e '/mock_stream\.go' \
-e '/host_mock\.go' \
-e '^p2p/host/hostv2/mock/' \
-e '/mock/[^/]*\.go' \
-e '/gen_[^/]*\.go'

Loading…
Cancel
Save