From f0747bdd001dc99e1a3a05e0d0557baea5dfc9e0 Mon Sep 17 00:00:00 2001 From: Andy Wu Date: Wed, 16 Oct 2019 21:01:45 -0700 Subject: [PATCH 01/11] fn key swaps for epoch 51 find a duplicated key which was introduced in epoch 46 re-targeting epoch 54; verified with Li --- internal/configs/sharding/mainnet.go | 4 ++-- internal/configs/sharding/shardingconfig_test.go | 2 +- internal/genesis/foundational.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/configs/sharding/mainnet.go b/internal/configs/sharding/mainnet.go index 6470c0ec8..9b032e5c4 100644 --- a/internal/configs/sharding/mainnet.go +++ b/internal/configs/sharding/mainnet.go @@ -27,7 +27,7 @@ const ( mainnetV1_2Epoch = 25 mainnetV1_3Epoch = 36 mainnetV1_4Epoch = 46 - mainnetV1_5Epoch = 50 + mainnetV1_5Epoch = 54 mainnetMaxTxAmountLimit = 1e3 // unit is interface{} One mainnetMaxNumRecentTxsPerAccountLimit = 1e2 @@ -50,7 +50,7 @@ type mainnetSchedule struct{} func (mainnetSchedule) InstanceForEpoch(epoch *big.Int) Instance { switch { case epoch.Cmp(big.NewInt(mainnetV1_5Epoch)) >= 0: - // forty-nine resharding epoch (for shard 0) around 17/10/2019 4:05:16 PDT + // 54 resharding epoch (for shard 0) around 23/10/2019 ~10:05 PDT return mainnetV1_5 case epoch.Cmp(big.NewInt(mainnetV1_4Epoch)) >= 0: // forty-sixth resharding epoch around 10/10/2019 8:06pm PDT diff --git a/internal/configs/sharding/shardingconfig_test.go b/internal/configs/sharding/shardingconfig_test.go index 210fe36dd..07534a890 100644 --- a/internal/configs/sharding/shardingconfig_test.go +++ b/internal/configs/sharding/shardingconfig_test.go @@ -36,7 +36,7 @@ func TestMainnetInstanceForEpoch(t *testing.T) { mainnetV1_4, }, { - big.NewInt(50), + big.NewInt(54), mainnetV1_5, }, } diff --git a/internal/genesis/foundational.go b/internal/genesis/foundational.go index 5cd326269..13f846713 100644 --- a/internal/genesis/foundational.go +++ b/internal/genesis/foundational.go @@ -2680,7 +2680,7 @@ var FoundationalNodeAccountsV1_4 = []DeployAccount{ {Index: "319", Address: "one19c4uqfzezuws7e4ka4kvc5r09suks2ghpyg6xw", BlsPublicKey: "51b2019b222df63fc99d202b03834dee09f1ef11e25a03592a96c1d01bca2bedfc25e0f26d88dcbb8a7176e30e1ec116"}, } -// FoundationalNodeAccountsV1_5 are the accounts for the foundational nodes from Epoch 50. +// FoundationalNodeAccountsV1_5 are the accounts for the foundational nodes from Epoch 54. var FoundationalNodeAccountsV1_5 = []DeployAccount{ {Index: "0", Address: "one1y0xcf40fg65n2ehm8fx5vda4thrkymhpg45ecj", BlsPublicKey: "9e70e8d76851f6e8dc648255acdd57bb5c49cdae7571aed43f86e9f140a6343caed2ffa860919d03e0912411fee4850a"}, {Index: "1", Address: "one18lp2w7ghhuajdpzl8zqeddza97u92wtkfcwpjk", BlsPublicKey: "fce3097d9fc234d34d6eaef3eecd0365d435d1118f69f2da1ed2a69ba725270771572e40347c222aca784cb973307b11"}, @@ -2800,7 +2800,7 @@ var FoundationalNodeAccountsV1_5 = []DeployAccount{ {Index: "115", Address: "one14ajehwyxpzpzxhke77mhtt0z6k5z6cevgf6rfa", BlsPublicKey: "52ba9ca9d046ac237214e81438b054d42b17c16654b041562723d8e6e928f92a83e6373da28a821d285ebfe118e81884"}, {Index: "116", Address: "one1hxqhp9tls9r4v5hz208g93exhvz5ak258ut7d2", BlsPublicKey: "95bad32a857901a2eecf20aa516a6fc0c21d85015ba0dc70a966f0bd70b0f3bc0f5af356fac630ef53e5e1a329d7fe0a"}, {Index: "117", Address: "one1wt5darzj8wd385xl8stccj4sv6553hgckaypfr", BlsPublicKey: "9622f8a5590d6ef8ca94e6c866d663aa0398caf00a88b2dd059dc7a63daa8600828a85737eca4e595caa382b5d407205"}, - {Index: "118", Address: "one19saqljg2w5n402p589y6xenjc6lan46a9l9tah", BlsPublicKey: "bcd24c722dc5dd3727bc3f027e3f681e4d1f5a552513d158645833eb8d8d39ec1076370b55e063aeed5a7825eb6aa20a"}, + {Index: "118", Address: "one1k80wv3uvfw5r0qhzp9yxn94u4jxu8my2xwuk87", BlsPublicKey: "bcd24c722dc5dd3727bc3f027e3f681e4d1f5a552513d158645833eb8d8d39ec1076370b55e063aeed5a7825eb6aa20a"}, {Index: "119", Address: "one1kwqkyzq2pmhvufe9528g9nd966ur54v6auzruf", BlsPublicKey: "aaac4eb8260e6cee7f19fbcae721ce2d68f125461953a583adca44407194452e7ac41de0757e2921c8fed83469172f92"}, {Index: "120", Address: "one1gjas4xurmc0rguafq63ql65rwuxayukm74w2mn", BlsPublicKey: "d6c8cf5553fa77257d26ba6b201294a2a497d070d420ab76c044efc0f4325f40b5664e7a7f973940ef1ea57530215886"}, {Index: "121", Address: "one1pkw7wnplp077fn6phv2kfejw3u7wvx0m9vppzc", BlsPublicKey: "92d5e3fb5d3f1e64af4be7c0acbd457b68a2ec59cf34aaaa0bac04d0e0346b283a65e0227378a60e1fe7af2407d9c50a"}, From 3c394abac3affc7f5d6e436fd150085577468953 Mon Sep 17 00:00:00 2001 From: Eugene Kim Date: Tue, 22 Oct 2019 19:51:17 +0000 Subject: [PATCH 02/11] Test newer FN tables --- internal/genesis/genesis_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/genesis/genesis_test.go b/internal/genesis/genesis_test.go index 8c5e7672a..620ef0734 100644 --- a/internal/genesis/genesis_test.go +++ b/internal/genesis/genesis_test.go @@ -57,6 +57,9 @@ func TestCommitteeAccounts(test *testing.T) { testAccounts(test, FoundationalNodeAccountsV1) testAccounts(test, FoundationalNodeAccountsV1_1) testAccounts(test, FoundationalNodeAccountsV1_2) + testAccounts(test, FoundationalNodeAccountsV1_3) + testAccounts(test, FoundationalNodeAccountsV1_4) + testAccounts(test, FoundationalNodeAccountsV1_5) testAccounts(test, HarmonyAccounts) testAccounts(test, TNHarmonyAccounts) testAccounts(test, TNFoundationalAccounts) From 9c0a78244c001c35202aa122b641488e77e15de0 Mon Sep 17 00:00:00 2001 From: coolcottontail Date: Fri, 25 Oct 2019 20:20:18 -0700 Subject: [PATCH 03/11] added support for building static binary on Linux platforms --- Makefile | 5 +++++ scripts/go_executable_build.sh | 12 ++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index e91973868..b6f1677d2 100644 --- a/Makefile +++ b/Makefile @@ -20,3 +20,8 @@ exe: test: ./test/debug.sh + +linux_static: + make -C $(TOP)/mcl -j4 + make -C $(TOP)/bls minimised_static BLS_SWAP_G=1 -j4 + ./scripts/go_executable_build.sh -s diff --git a/scripts/go_executable_build.sh b/scripts/go_executable_build.sh index 5a1b68112..177aeceec 100755 --- a/scripts/go_executable_build.sh +++ b/scripts/go_executable_build.sh @@ -21,6 +21,7 @@ RACE= VERBOSE= DEBUG=false NETWORK=main +STATIC=false unset -v progdir case "${0}" in @@ -62,6 +63,8 @@ OPTIONS: -f folder set the upload folder name in the bucket (default: $FOLDER) -r enable -race build option (default: $RACE) -v verbose build process (default: $VERBOSE) + -s build static linux executable (default: $STATIC) + ACTION: build build binaries only (default action) @@ -104,7 +107,11 @@ function build_only if [ "$DEBUG" == "true" ]; then env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-N -l -c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]} else - env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]} + if [ "$STATIC" == "true" ]; then + env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags='-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY} -w -extldflags "-static"' -o $BINDIR/$bin $RACE ${SRC[$bin]} + else + env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]} + fi fi if [ "$(uname -s)" == "Linux" ]; then $BINDIR/$bin -version || $BINDIR/$bin version @@ -216,7 +223,7 @@ function upload_wallet } ################################ MAIN FUNCTION ############################## -while getopts "hp:a:o:b:f:rvN:" option; do +while getopts "hp:a:o:b:f:rvsN:" option; do case $option in h) usage ;; p) PROFILE=$OPTARG ;; @@ -227,6 +234,7 @@ while getopts "hp:a:o:b:f:rvN:" option; do r) RACE=-race ;; v) VERBOSE='-v -x' ;; d) DEBUG=true ;; + s) STATIC=true ;; N) NETWORK=$OPTARG ;; esac done From d1fcabcb8495397fc5da412e881ea27776f80b45 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Tue, 29 Oct 2019 07:36:27 +0000 Subject: [PATCH 04/11] [build] fix version in static build Signed-off-by: Leo Chen --- scripts/go_executable_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/go_executable_build.sh b/scripts/go_executable_build.sh index 177aeceec..9d4528417 100755 --- a/scripts/go_executable_build.sh +++ b/scripts/go_executable_build.sh @@ -108,7 +108,7 @@ function build_only env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-N -l -c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]} else if [ "$STATIC" == "true" ]; then - env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags='-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY} -w -extldflags "-static"' -o $BINDIR/$bin $RACE ${SRC[$bin]} + env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY} -w -extldflags \"-static\"" -o $BINDIR/$bin $RACE ${SRC[$bin]} else env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]} fi From e75b4a49e290ee851ee617b3d5e886f6ebee575c Mon Sep 17 00:00:00 2001 From: Dennis Won Date: Wed, 30 Oct 2019 21:54:51 +0000 Subject: [PATCH 05/11] make process state sync func to return error --- api/service/syncing/errors.go | 12 +++- api/service/syncing/syncing.go | 104 ++++++++++++++++++++------------- node/node_syncing.go | 2 +- 3 files changed, 72 insertions(+), 46 deletions(-) diff --git a/api/service/syncing/errors.go b/api/service/syncing/errors.go index a70193583..ae8c7dae9 100644 --- a/api/service/syncing/errors.go +++ b/api/service/syncing/errors.go @@ -4,7 +4,13 @@ import "errors" // Errors ... var ( - ErrRegistrationFail = errors.New("[SYNC]: registration failed") - ErrGetBlock = errors.New("[SYNC]: get block failed") - ErrGetBlockHash = errors.New("[SYNC]: get blockhash failed") + ErrRegistrationFail = errors.New("[SYNC]: registration failed") + ErrGetBlock = errors.New("[SYNC]: get block failed") + ErrGetBlockHash = errors.New("[SYNC]: get blockhash failed") + ErrProcessStateSync = errors.New("[SYNC]: get blockhash failed") + ErrGetConsensusHashes = errors.New("[SYNC]: get consensus hashes failed") + ErrGenStateSyncTaskQueue = errors.New("[SYNC]: generate state sync task queue failed") + ErrDownloadBlocks = errors.New("[SYNC]: get download blocks failed") + ErrUpdateBlockAndStatus = errors.New("[SYNC]: update block and status failed") + ErrGenerateNewState = errors.New("[SYNC]: get generate new state failed") ) diff --git a/api/service/syncing/syncing.go b/api/service/syncing/syncing.go index 45f73dca7..6c1a849a7 100644 --- a/api/service/syncing/syncing.go +++ b/api/service/syncing/syncing.go @@ -26,13 +26,15 @@ import ( // Constants for syncing. const ( - TimesToFail = 5 // Downloadblocks service retry limit - RegistrationNumber = 3 - SyncingPortDifference = 3000 - inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus - BatchSize uint32 = 1000 //maximum size for one query of block hashes - SyncLoopFrequency = 1 // unit in second - LastMileBlocksSize = 10 + downloadBlocksRetryLimit = 5 // downloadBlocks service retry limit + TimesToFail = 5 // downloadBlocks service retry limit + RegistrationNumber = 3 + SyncingPortDifference = 3000 + inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus + SyncLoopBatchSize uint32 = 1000 // maximum size for one query of block hashes + verifyHeaderBatchSize uint64 = 100 // block chain header verification batch size + SyncLoopFrequency = 1 // unit in second + LastMileBlocksSize = 10 ) // SyncPeerConfig is peer config to sync. @@ -333,26 +335,27 @@ func (sc *SyncConfig) GetBlockHashesConsensusAndCleanUp() { sc.cleanUpPeers(maxFirstID) } -// GetConsensusHashes gets all hashes needed to download. -func (ss *StateSync) GetConsensusHashes(startHash []byte, size uint32) { +// getConsensusHashes gets all hashes needed to download. +func (ss *StateSync) getConsensusHashes(startHash []byte, size uint32) { var wg sync.WaitGroup ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) { wg.Add(1) go func() { defer wg.Done() + response := peerConfig.client.GetBlockHashes(startHash, size, ss.selfip, ss.selfport) if response == nil { utils.Logger().Warn(). Str("peerIP", peerConfig.ip). Str("peerPort", peerConfig.port). - Msg("[SYNC] GetConsensusHashes Nil Response") + Msg("[SYNC] getConsensusHashes Nil Response") return } if len(response.Payload) > int(size+1) { utils.Logger().Warn(). Uint32("requestSize", size). Int("respondSize", len(response.Payload)). - Msg("[SYNC] GetConsensusHashes: receive more blockHahses than request!") + Msg("[SYNC] getConsensusHashes: receive more blockHahses than request!") peerConfig.blockHashes = response.Payload[:size+1] } else { peerConfig.blockHashes = response.Payload @@ -404,7 +407,7 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) { if err != nil || len(payload) == 0 { count++ utils.Logger().Error().Err(err).Int("failNumber", count).Msg("[SYNC] downloadBlocks: GetBlocks failed") - if count > TimesToFail { + if count > downloadBlocksRetryLimit { break } if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil { @@ -424,7 +427,7 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) { if err != nil { count++ utils.Logger().Error().Err(err).Msg("[SYNC] downloadBlocks: failed to DecodeBytes from received new block") - if count > TimesToFail { + if count > downloadBlocksRetryLimit { break } if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil { @@ -527,50 +530,55 @@ func (ss *StateSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Ha return nil } -func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) bool { - utils.Logger().Info().Str("blockHex", bc.CurrentBlock().Hash().Hex()).Msg("[SYNC] Current Block") +func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) error { + utils.Logger().Info().Str("blockHex", bc.CurrentBlock().Hash().Hex()).Msg("[SYNC] updateBlockAndStatus: Current Block") // Verify block signatures if block.NumberU64() > 1 { // Verify signature every 100 blocks - verifySig := block.NumberU64()%100 == 0 + verifySig := block.NumberU64()%verifyHeaderBatchSize == 0 err := bc.Engine().VerifyHeader(bc, block.Header(), verifySig) if err != nil { - utils.Logger().Error().Err(err).Msgf("[SYNC] failed verifying signatures for new block %d", block.NumberU64()) - utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back last 99 blocks!") - for i := 0; i < 99; i++ { - bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()}) + utils.Logger().Error().Err(err).Msgf("[SYNC] updateBlockAndStatus: failed verifying signatures for new block %d", block.NumberU64()) + + utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] updateBlockAndStatus: Rolling back last 99 blocks!") + var hashes []common.Hash + for i := uint64(0); i < verifyHeaderBatchSize-1; i++ { + hashes = append(hashes, bc.CurrentBlock().Hash()) } - return false + bc.Rollback(hashes) + return err } } _, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */) if err != nil { - utils.Logger().Error().Err(err).Msgf("[SYNC] Error adding new block to blockchain %d %d", block.NumberU64(), block.ShardID()) + utils.Logger().Error().Err(err).Msgf("[SYNC] updateBlockAndStatus: Error adding new block to blockchain %d %d", block.NumberU64(), block.ShardID()) - utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back current block!") + utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] updateBlockAndStatus: Rolling back current block!") bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()}) - return false + return err } utils.Logger().Info(). Uint64("blockHeight", bc.CurrentBlock().NumberU64()). Str("blockHex", bc.CurrentBlock().Hash().Hex()). - Msg("[SYNC] new block added to blockchain") - return true + Msg("[SYNC] updateBlockAndStatus: new block added to blockchain") + return nil } // generateNewState will construct most recent state from downloaded blocks -func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker) { +func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker) error { // update blocks created before node start sync parentHash := bc.CurrentBlock().Hash() + + var err error for { block := ss.getBlockFromOldBlocksByParentHash(parentHash) if block == nil { break } - ok := ss.updateBlockAndStatus(block, bc, worker) - if !ok { + err = ss.updateBlockAndStatus(block, bc, worker) + if err != nil { break } parentHash = block.Hash() @@ -586,8 +594,8 @@ func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker if block == nil { break } - ok := ss.updateBlockAndStatus(block, bc, worker) - if !ok { + err = ss.updateBlockAndStatus(block, bc, worker) + if err != nil { break } parentHash = block.Hash() @@ -607,25 +615,26 @@ func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker if block == nil { break } - ok := ss.updateBlockAndStatus(block, bc, worker) - if !ok { + err = ss.updateBlockAndStatus(block, bc, worker) + if err != nil { break } parentHash = block.Hash() } + + return err } // ProcessStateSync processes state sync from the blocks received but not yet processed so far -// TODO: return error -func (ss *StateSync) ProcessStateSync(startHash []byte, size uint32, bc *core.BlockChain, worker *worker.Worker) { +func (ss *StateSync) ProcessStateSync(startHash []byte, size uint32, bc *core.BlockChain, worker *worker.Worker) error { // Gets consensus hashes. - ss.GetConsensusHashes(startHash, size) + ss.getConsensusHashes(startHash, size) ss.generateStateSyncTaskQueue(bc) // Download blocks. if ss.stateSyncTaskQueue.Len() > 0 { ss.downloadBlocks(bc) } - ss.generateNewState(bc, worker) + return ss.generateNewState(bc, worker) } func (peerConfig *SyncPeerConfig) registerToBroadcast(peerHash []byte, ip, port string) error { @@ -738,17 +747,28 @@ Loop: currentHeight := bc.CurrentBlock().NumberU64() if currentHeight >= otherHeight { - utils.Logger().Info().Msgf("[SYNC] Node is now IN SYNC! (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", isBeacon, bc.ShardID(), otherHeight, currentHeight) + utils.Logger().Info(). + Msgf("[SYNC] Node is now IN SYNC! (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", + isBeacon, bc.ShardID(), otherHeight, currentHeight) break Loop } else { - utils.Logger().Debug().Msgf("[SYNC] Node is Not in Sync (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", isBeacon, bc.ShardID(), otherHeight, currentHeight) + utils.Logger().Debug(). + Msgf("[SYNC] Node is Not in Sync (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", + isBeacon, bc.ShardID(), otherHeight, currentHeight) } startHash := bc.CurrentBlock().Hash() size := uint32(otherHeight - currentHeight) - if size > BatchSize { - size = BatchSize + if size > SyncLoopBatchSize { + size = SyncLoopBatchSize + } + err := ss.ProcessStateSync(startHash[:], size, bc, worker) + if err != nil { + utils.Logger().Error().Err(err). + Msgf("[SYNC] ProcessStateSync failed (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", + isBeacon, bc.ShardID(), otherHeight, currentHeight) + // should we still call UpdateConsensusInformation() upon state sync failure? + // how to handle error here? } - ss.ProcessStateSync(startHash[:], size, bc, worker) ss.purgeOldBlocksFromCache() if consensus != nil { consensus.UpdateConsensusInformation() diff --git a/node/node_syncing.go b/node/node_syncing.go index a1fdf67c4..359496716 100644 --- a/node/node_syncing.go +++ b/node/node_syncing.go @@ -332,7 +332,7 @@ func (node *Node) CalculateResponse(request *downloader_pb.DownloaderRequest, in if request.BlockHash == nil { return response, fmt.Errorf("[SYNC] GetBlockHashes Request BlockHash is NIL") } - if request.Size == 0 || request.Size > syncing.BatchSize { + if request.Size == 0 || request.Size > syncing.SyncLoopBatchSize { return response, fmt.Errorf("[SYNC] GetBlockHashes Request contains invalid Size %v", request.Size) } size := uint64(request.Size) From 8331d52a17774742ddf79b7f933be9615e2fe96c Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Thu, 31 Oct 2019 00:49:11 -0700 Subject: [PATCH 06/11] add example for creating one address --- test/one_address/main.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 test/one_address/main.go diff --git a/test/one_address/main.go b/test/one_address/main.go new file mode 100644 index 000000000..7c4639dca --- /dev/null +++ b/test/one_address/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "encoding/hex" + "fmt" + + "github.com/ethereum/go-ethereum/crypto" + common2 "github.com/harmony-one/harmony/internal/common" +) + +func main() { + // Create an account + key, _ := crypto.GenerateKey() + + // Get the address + address := crypto.PubkeyToAddress(key.PublicKey) + // 0x8ee3333cDE801ceE9471ADf23370c48b011f82a6 + + // Get the private key + privateKey := hex.EncodeToString(key.D.Bytes()) + // 05b14254a1d0c77a49eae3bdf080f926a2df17d8e2ebdf7af941ea001481e57f + + fmt.Printf("account: %s\n", common2.MustAddressToBech32(address)) + fmt.Printf("private Key : %s\n", privateKey) +} From fb77926d0fdc9b692a6a0973c4a7f4539e4ba6c8 Mon Sep 17 00:00:00 2001 From: Nye Liu Date: Thu, 31 Oct 2019 23:57:06 +0000 Subject: [PATCH 07/11] Comment out "Dumping block" message When running explorer, every time we restart it, this dumps megabytes of messages into zerolog, overwhelming logstash. --- api/service/explorer/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/service/explorer/storage.go b/api/service/explorer/storage.go index 85b669719..f034fb966 100644 --- a/api/service/explorer/storage.go +++ b/api/service/explorer/storage.go @@ -89,7 +89,7 @@ func (storage *Storage) GetDB() *ethdb.LDBDatabase { // Dump extracts information from block and index them into lvdb for explorer. func (storage *Storage) Dump(block *types.Block, height uint64) { - utils.Logger().Info().Uint64("block height", height).Msg("Dumping block") + //utils.Logger().Debug().Uint64("block height", height).Msg("Dumping block") if block == nil { return } From 8a534ee88cb813f16c1fa9ec126ceeedfc97c3cc Mon Sep 17 00:00:00 2001 From: coolcottontail Date: Thu, 31 Oct 2019 22:20:03 -0700 Subject: [PATCH 08/11] switch the order of validatoraddress in staking messages --- staking/types/messages.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staking/types/messages.go b/staking/types/messages.go index 3a25d6ce4..9fb487601 100644 --- a/staking/types/messages.go +++ b/staking/types/messages.go @@ -48,11 +48,11 @@ func (d Directive) String() string { // CreateValidator - type for creating a new validator type CreateValidator struct { + ValidatorAddress common.Address `json:"validator_address" yaml:"validator_address"` Description *Description `json:"description" yaml:"description"` CommissionRates `json:"commission" yaml:"commission"` MinSelfDelegation *big.Int `json:"min_self_delegation" yaml:"min_self_delegation"` MaxTotalDelegation *big.Int `json:"max_total_delegation" yaml:"max_total_delegation"` - ValidatorAddress common.Address `json:"validator_address" yaml:"validator_address"` SlotPubKeys []shard.BlsPublicKey `json:"slot_pub_keys" yaml:"slot_pub_keys"` Amount *big.Int `json:"amount" yaml:"amount"` } From 0bf5c0925dc9b957d9f896a2c30a7a9668318f75 Mon Sep 17 00:00:00 2001 From: coolcottontail Date: Fri, 1 Nov 2019 09:14:07 -0700 Subject: [PATCH 09/11] fixed goimports --- staking/types/messages.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/staking/types/messages.go b/staking/types/messages.go index 9fb487601..0a1678ac0 100644 --- a/staking/types/messages.go +++ b/staking/types/messages.go @@ -48,8 +48,8 @@ func (d Directive) String() string { // CreateValidator - type for creating a new validator type CreateValidator struct { - ValidatorAddress common.Address `json:"validator_address" yaml:"validator_address"` - Description *Description `json:"description" yaml:"description"` + ValidatorAddress common.Address `json:"validator_address" yaml:"validator_address"` + Description *Description `json:"description" yaml:"description"` CommissionRates `json:"commission" yaml:"commission"` MinSelfDelegation *big.Int `json:"min_self_delegation" yaml:"min_self_delegation"` MaxTotalDelegation *big.Int `json:"max_total_delegation" yaml:"max_total_delegation"` From 7b0ce75a8996af787c5b178b35f665a04d2b12af Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 30 Oct 2019 03:25:07 +0000 Subject: [PATCH 10/11] [build] do not upload so files if build statically Signed-off-by: Leo Chen --- scripts/go_executable_build.sh | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/scripts/go_executable_build.sh b/scripts/go_executable_build.sh index 9d4528417..8d3d34f74 100755 --- a/scripts/go_executable_build.sh +++ b/scripts/go_executable_build.sh @@ -138,13 +138,15 @@ function upload [ -e $BINDIR/$bin ] && $AWSCLI s3 cp $BINDIR/$bin s3://${BUCKET}$FOLDER/$bin --acl public-read done - for lib in "${!LIB[@]}"; do - if [ -e ${LIB[$lib]} ]; then - $AWSCLI s3 cp ${LIB[$lib]} s3://${BUCKET}$FOLDER/$lib --acl public-read - else - echo "!! MISSING ${LIB[$lib]} !!" - fi - done + if [ "$STATIC" != "true" ]; then + for lib in "${!LIB[@]}"; do + if [ -e ${LIB[$lib]} ]; then + $AWSCLI s3 cp ${LIB[$lib]} s3://${BUCKET}$FOLDER/$lib --acl public-read + else + echo "!! MISSING ${LIB[$lib]} !!" + fi + done + fi [ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${BUCKET}$FOLDER/md5sum.txt --acl public-read } @@ -177,13 +179,15 @@ function release fi done - for lib in "${!LIB[@]}"; do - if [ -e ${LIB[$lib]} ]; then - $AWSCLI s3 cp ${LIB[$lib]} s3://${PUBBUCKET}/$FOLDER/$lib --acl public-read - else - echo "!! MISSING ${LIB[$lib]} !!" - fi - done + if [ "$STATIC" != "true" ]; then + for lib in "${!LIB[@]}"; do + if [ -e ${LIB[$lib]} ]; then + $AWSCLI s3 cp ${LIB[$lib]} s3://${PUBBUCKET}/$FOLDER/$lib --acl public-read + else + echo "!! MISSING ${LIB[$lib]} !!" + fi + done + fi [ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${PUBBUCKET}/$FOLDER/md5sum.txt --acl public-read } From 005321f9e9da6eb2d3d8b03b2476a9c96216457a Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Fri, 1 Nov 2019 22:58:30 +0000 Subject: [PATCH 11/11] [static] exit if not running on Linux Signed-off-by: Leo Chen --- scripts/go_executable_build.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/go_executable_build.sh b/scripts/go_executable_build.sh index 8d3d34f74..2a0300847 100755 --- a/scripts/go_executable_build.sh +++ b/scripts/go_executable_build.sh @@ -92,6 +92,11 @@ EOF function build_only { + if [[ "$STATIC" == "true" && "$GOOS" == "darwin" ]]; then + echo "static build only supported on Linux platform" + exit 2 + fi + VERSION=$(git rev-list --count HEAD) COMMIT=$(git describe --always --long --dirty) BUILTAT=$(date +%FT%T%z)