Merge branch 'master' of github.com:harmony-one/harmony into staking-part3

pull/1795/head
chao 5 years ago
commit 8a71ea671b
  1. 9
      Makefile
  2. 2
      api/service/explorer/storage.go
  3. 12
      api/service/syncing/errors.go
  4. 104
      api/service/syncing/syncing.go
  5. 4
      internal/configs/sharding/mainnet.go
  6. 2
      internal/configs/sharding/shardingconfig_test.go
  7. 4
      internal/genesis/foundational.go
  8. 3
      internal/genesis/genesis_test.go
  9. 2
      node/node_syncing.go
  10. 49
      scripts/go_executable_build.sh
  11. 14
      staking/types/messages.go
  12. 25
      test/one_address/main.go

@ -12,11 +12,16 @@ all: libs
./scripts/go_executable_build.sh
libs:
make -C $(TOP)/mcl -j4
make -C $(TOP)/bls BLS_SWAP_G=1 -j4
make -C $(TOP)/mcl
make -C $(TOP)/bls BLS_SWAP_G=1
exe:
./scripts/go_executable_build.sh
test:
./test/debug.sh
linux_static:
make -C $(TOP)/mcl
make -C $(TOP)/bls minimised_static BLS_SWAP_G=1
./scripts/go_executable_build.sh -s

@ -89,7 +89,7 @@ func (storage *Storage) GetDB() *ethdb.LDBDatabase {
// Dump extracts information from block and index them into lvdb for explorer.
func (storage *Storage) Dump(block *types.Block, height uint64) {
utils.Logger().Info().Uint64("block height", height).Msg("Dumping block")
//utils.Logger().Debug().Uint64("block height", height).Msg("Dumping block")
if block == nil {
return
}

@ -4,7 +4,13 @@ import "errors"
// Errors ...
var (
ErrRegistrationFail = errors.New("[SYNC]: registration failed")
ErrGetBlock = errors.New("[SYNC]: get block failed")
ErrGetBlockHash = errors.New("[SYNC]: get blockhash failed")
ErrRegistrationFail = errors.New("[SYNC]: registration failed")
ErrGetBlock = errors.New("[SYNC]: get block failed")
ErrGetBlockHash = errors.New("[SYNC]: get blockhash failed")
ErrProcessStateSync = errors.New("[SYNC]: get blockhash failed")
ErrGetConsensusHashes = errors.New("[SYNC]: get consensus hashes failed")
ErrGenStateSyncTaskQueue = errors.New("[SYNC]: generate state sync task queue failed")
ErrDownloadBlocks = errors.New("[SYNC]: get download blocks failed")
ErrUpdateBlockAndStatus = errors.New("[SYNC]: update block and status failed")
ErrGenerateNewState = errors.New("[SYNC]: get generate new state failed")
)

@ -26,13 +26,15 @@ import (
// Constants for syncing.
const (
TimesToFail = 5 // Downloadblocks service retry limit
RegistrationNumber = 3
SyncingPortDifference = 3000
inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus
BatchSize uint32 = 1000 //maximum size for one query of block hashes
SyncLoopFrequency = 1 // unit in second
LastMileBlocksSize = 10
downloadBlocksRetryLimit = 5 // downloadBlocks service retry limit
TimesToFail = 5 // downloadBlocks service retry limit
RegistrationNumber = 3
SyncingPortDifference = 3000
inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus
SyncLoopBatchSize uint32 = 1000 // maximum size for one query of block hashes
verifyHeaderBatchSize uint64 = 100 // block chain header verification batch size
SyncLoopFrequency = 1 // unit in second
LastMileBlocksSize = 10
)
// SyncPeerConfig is peer config to sync.
@ -333,26 +335,27 @@ func (sc *SyncConfig) GetBlockHashesConsensusAndCleanUp() {
sc.cleanUpPeers(maxFirstID)
}
// GetConsensusHashes gets all hashes needed to download.
func (ss *StateSync) GetConsensusHashes(startHash []byte, size uint32) {
// getConsensusHashes gets all hashes needed to download.
func (ss *StateSync) getConsensusHashes(startHash []byte, size uint32) {
var wg sync.WaitGroup
ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) {
wg.Add(1)
go func() {
defer wg.Done()
response := peerConfig.client.GetBlockHashes(startHash, size, ss.selfip, ss.selfport)
if response == nil {
utils.Logger().Warn().
Str("peerIP", peerConfig.ip).
Str("peerPort", peerConfig.port).
Msg("[SYNC] GetConsensusHashes Nil Response")
Msg("[SYNC] getConsensusHashes Nil Response")
return
}
if len(response.Payload) > int(size+1) {
utils.Logger().Warn().
Uint32("requestSize", size).
Int("respondSize", len(response.Payload)).
Msg("[SYNC] GetConsensusHashes: receive more blockHahses than request!")
Msg("[SYNC] getConsensusHashes: receive more blockHahses than request!")
peerConfig.blockHashes = response.Payload[:size+1]
} else {
peerConfig.blockHashes = response.Payload
@ -404,7 +407,7 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
if err != nil || len(payload) == 0 {
count++
utils.Logger().Error().Err(err).Int("failNumber", count).Msg("[SYNC] downloadBlocks: GetBlocks failed")
if count > TimesToFail {
if count > downloadBlocksRetryLimit {
break
}
if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil {
@ -424,7 +427,7 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
if err != nil {
count++
utils.Logger().Error().Err(err).Msg("[SYNC] downloadBlocks: failed to DecodeBytes from received new block")
if count > TimesToFail {
if count > downloadBlocksRetryLimit {
break
}
if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil {
@ -527,50 +530,55 @@ func (ss *StateSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Ha
return nil
}
func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) bool {
utils.Logger().Info().Str("blockHex", bc.CurrentBlock().Hash().Hex()).Msg("[SYNC] Current Block")
func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) error {
utils.Logger().Info().Str("blockHex", bc.CurrentBlock().Hash().Hex()).Msg("[SYNC] updateBlockAndStatus: Current Block")
// Verify block signatures
if block.NumberU64() > 1 {
// Verify signature every 100 blocks
verifySig := block.NumberU64()%100 == 0
verifySig := block.NumberU64()%verifyHeaderBatchSize == 0
err := bc.Engine().VerifyHeader(bc, block.Header(), verifySig)
if err != nil {
utils.Logger().Error().Err(err).Msgf("[SYNC] failed verifying signatures for new block %d", block.NumberU64())
utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back last 99 blocks!")
for i := 0; i < 99; i++ {
bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()})
utils.Logger().Error().Err(err).Msgf("[SYNC] updateBlockAndStatus: failed verifying signatures for new block %d", block.NumberU64())
utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] updateBlockAndStatus: Rolling back last 99 blocks!")
var hashes []common.Hash
for i := uint64(0); i < verifyHeaderBatchSize-1; i++ {
hashes = append(hashes, bc.CurrentBlock().Hash())
}
return false
bc.Rollback(hashes)
return err
}
}
_, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */)
if err != nil {
utils.Logger().Error().Err(err).Msgf("[SYNC] Error adding new block to blockchain %d %d", block.NumberU64(), block.ShardID())
utils.Logger().Error().Err(err).Msgf("[SYNC] updateBlockAndStatus: Error adding new block to blockchain %d %d", block.NumberU64(), block.ShardID())
utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back current block!")
utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] updateBlockAndStatus: Rolling back current block!")
bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()})
return false
return err
}
utils.Logger().Info().
Uint64("blockHeight", bc.CurrentBlock().NumberU64()).
Str("blockHex", bc.CurrentBlock().Hash().Hex()).
Msg("[SYNC] new block added to blockchain")
return true
Msg("[SYNC] updateBlockAndStatus: new block added to blockchain")
return nil
}
// generateNewState will construct most recent state from downloaded blocks
func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker) {
func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker) error {
// update blocks created before node start sync
parentHash := bc.CurrentBlock().Hash()
var err error
for {
block := ss.getBlockFromOldBlocksByParentHash(parentHash)
if block == nil {
break
}
ok := ss.updateBlockAndStatus(block, bc, worker)
if !ok {
err = ss.updateBlockAndStatus(block, bc, worker)
if err != nil {
break
}
parentHash = block.Hash()
@ -586,8 +594,8 @@ func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker
if block == nil {
break
}
ok := ss.updateBlockAndStatus(block, bc, worker)
if !ok {
err = ss.updateBlockAndStatus(block, bc, worker)
if err != nil {
break
}
parentHash = block.Hash()
@ -607,25 +615,26 @@ func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker
if block == nil {
break
}
ok := ss.updateBlockAndStatus(block, bc, worker)
if !ok {
err = ss.updateBlockAndStatus(block, bc, worker)
if err != nil {
break
}
parentHash = block.Hash()
}
return err
}
// ProcessStateSync processes state sync from the blocks received but not yet processed so far
// TODO: return error
func (ss *StateSync) ProcessStateSync(startHash []byte, size uint32, bc *core.BlockChain, worker *worker.Worker) {
func (ss *StateSync) ProcessStateSync(startHash []byte, size uint32, bc *core.BlockChain, worker *worker.Worker) error {
// Gets consensus hashes.
ss.GetConsensusHashes(startHash, size)
ss.getConsensusHashes(startHash, size)
ss.generateStateSyncTaskQueue(bc)
// Download blocks.
if ss.stateSyncTaskQueue.Len() > 0 {
ss.downloadBlocks(bc)
}
ss.generateNewState(bc, worker)
return ss.generateNewState(bc, worker)
}
func (peerConfig *SyncPeerConfig) registerToBroadcast(peerHash []byte, ip, port string) error {
@ -738,17 +747,28 @@ Loop:
currentHeight := bc.CurrentBlock().NumberU64()
if currentHeight >= otherHeight {
utils.Logger().Info().Msgf("[SYNC] Node is now IN SYNC! (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", isBeacon, bc.ShardID(), otherHeight, currentHeight)
utils.Logger().Info().
Msgf("[SYNC] Node is now IN SYNC! (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)",
isBeacon, bc.ShardID(), otherHeight, currentHeight)
break Loop
} else {
utils.Logger().Debug().Msgf("[SYNC] Node is Not in Sync (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", isBeacon, bc.ShardID(), otherHeight, currentHeight)
utils.Logger().Debug().
Msgf("[SYNC] Node is Not in Sync (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)",
isBeacon, bc.ShardID(), otherHeight, currentHeight)
}
startHash := bc.CurrentBlock().Hash()
size := uint32(otherHeight - currentHeight)
if size > BatchSize {
size = BatchSize
if size > SyncLoopBatchSize {
size = SyncLoopBatchSize
}
err := ss.ProcessStateSync(startHash[:], size, bc, worker)
if err != nil {
utils.Logger().Error().Err(err).
Msgf("[SYNC] ProcessStateSync failed (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)",
isBeacon, bc.ShardID(), otherHeight, currentHeight)
// should we still call UpdateConsensusInformation() upon state sync failure?
// how to handle error here?
}
ss.ProcessStateSync(startHash[:], size, bc, worker)
ss.purgeOldBlocksFromCache()
if consensus != nil {
consensus.UpdateConsensusInformation()

@ -27,7 +27,7 @@ const (
mainnetV1_2Epoch = 25
mainnetV1_3Epoch = 36
mainnetV1_4Epoch = 46
mainnetV1_5Epoch = 50
mainnetV1_5Epoch = 54
mainnetMaxTxAmountLimit = 1e3 // unit is interface{} One
mainnetMaxNumRecentTxsPerAccountLimit = 1e2
@ -50,7 +50,7 @@ type mainnetSchedule struct{}
func (mainnetSchedule) InstanceForEpoch(epoch *big.Int) Instance {
switch {
case epoch.Cmp(big.NewInt(mainnetV1_5Epoch)) >= 0:
// forty-nine resharding epoch (for shard 0) around 17/10/2019 4:05:16 PDT
// 54 resharding epoch (for shard 0) around 23/10/2019 ~10:05 PDT
return mainnetV1_5
case epoch.Cmp(big.NewInt(mainnetV1_4Epoch)) >= 0:
// forty-sixth resharding epoch around 10/10/2019 8:06pm PDT

@ -36,7 +36,7 @@ func TestMainnetInstanceForEpoch(t *testing.T) {
mainnetV1_4,
},
{
big.NewInt(50),
big.NewInt(54),
mainnetV1_5,
},
}

@ -2680,7 +2680,7 @@ var FoundationalNodeAccountsV1_4 = []DeployAccount{
{Index: "319", Address: "one19c4uqfzezuws7e4ka4kvc5r09suks2ghpyg6xw", BlsPublicKey: "51b2019b222df63fc99d202b03834dee09f1ef11e25a03592a96c1d01bca2bedfc25e0f26d88dcbb8a7176e30e1ec116"},
}
// FoundationalNodeAccountsV1_5 are the accounts for the foundational nodes from Epoch 50.
// FoundationalNodeAccountsV1_5 are the accounts for the foundational nodes from Epoch 54.
var FoundationalNodeAccountsV1_5 = []DeployAccount{
{Index: "0", Address: "one1y0xcf40fg65n2ehm8fx5vda4thrkymhpg45ecj", BlsPublicKey: "9e70e8d76851f6e8dc648255acdd57bb5c49cdae7571aed43f86e9f140a6343caed2ffa860919d03e0912411fee4850a"},
{Index: "1", Address: "one18lp2w7ghhuajdpzl8zqeddza97u92wtkfcwpjk", BlsPublicKey: "fce3097d9fc234d34d6eaef3eecd0365d435d1118f69f2da1ed2a69ba725270771572e40347c222aca784cb973307b11"},
@ -2800,7 +2800,7 @@ var FoundationalNodeAccountsV1_5 = []DeployAccount{
{Index: "115", Address: "one14ajehwyxpzpzxhke77mhtt0z6k5z6cevgf6rfa", BlsPublicKey: "52ba9ca9d046ac237214e81438b054d42b17c16654b041562723d8e6e928f92a83e6373da28a821d285ebfe118e81884"},
{Index: "116", Address: "one1hxqhp9tls9r4v5hz208g93exhvz5ak258ut7d2", BlsPublicKey: "95bad32a857901a2eecf20aa516a6fc0c21d85015ba0dc70a966f0bd70b0f3bc0f5af356fac630ef53e5e1a329d7fe0a"},
{Index: "117", Address: "one1wt5darzj8wd385xl8stccj4sv6553hgckaypfr", BlsPublicKey: "9622f8a5590d6ef8ca94e6c866d663aa0398caf00a88b2dd059dc7a63daa8600828a85737eca4e595caa382b5d407205"},
{Index: "118", Address: "one19saqljg2w5n402p589y6xenjc6lan46a9l9tah", BlsPublicKey: "bcd24c722dc5dd3727bc3f027e3f681e4d1f5a552513d158645833eb8d8d39ec1076370b55e063aeed5a7825eb6aa20a"},
{Index: "118", Address: "one1k80wv3uvfw5r0qhzp9yxn94u4jxu8my2xwuk87", BlsPublicKey: "bcd24c722dc5dd3727bc3f027e3f681e4d1f5a552513d158645833eb8d8d39ec1076370b55e063aeed5a7825eb6aa20a"},
{Index: "119", Address: "one1kwqkyzq2pmhvufe9528g9nd966ur54v6auzruf", BlsPublicKey: "aaac4eb8260e6cee7f19fbcae721ce2d68f125461953a583adca44407194452e7ac41de0757e2921c8fed83469172f92"},
{Index: "120", Address: "one1gjas4xurmc0rguafq63ql65rwuxayukm74w2mn", BlsPublicKey: "d6c8cf5553fa77257d26ba6b201294a2a497d070d420ab76c044efc0f4325f40b5664e7a7f973940ef1ea57530215886"},
{Index: "121", Address: "one1pkw7wnplp077fn6phv2kfejw3u7wvx0m9vppzc", BlsPublicKey: "92d5e3fb5d3f1e64af4be7c0acbd457b68a2ec59cf34aaaa0bac04d0e0346b283a65e0227378a60e1fe7af2407d9c50a"},

@ -57,6 +57,9 @@ func TestCommitteeAccounts(test *testing.T) {
testAccounts(test, FoundationalNodeAccountsV1)
testAccounts(test, FoundationalNodeAccountsV1_1)
testAccounts(test, FoundationalNodeAccountsV1_2)
testAccounts(test, FoundationalNodeAccountsV1_3)
testAccounts(test, FoundationalNodeAccountsV1_4)
testAccounts(test, FoundationalNodeAccountsV1_5)
testAccounts(test, HarmonyAccounts)
testAccounts(test, TNHarmonyAccounts)
testAccounts(test, TNFoundationalAccounts)

@ -332,7 +332,7 @@ func (node *Node) CalculateResponse(request *downloader_pb.DownloaderRequest, in
if request.BlockHash == nil {
return response, fmt.Errorf("[SYNC] GetBlockHashes Request BlockHash is NIL")
}
if request.Size == 0 || request.Size > syncing.BatchSize {
if request.Size == 0 || request.Size > syncing.SyncLoopBatchSize {
return response, fmt.Errorf("[SYNC] GetBlockHashes Request contains invalid Size %v", request.Size)
}
size := uint64(request.Size)

@ -21,6 +21,7 @@ RACE=
VERBOSE=
DEBUG=false
NETWORK=main
STATIC=false
unset -v progdir
case "${0}" in
@ -62,6 +63,8 @@ OPTIONS:
-f folder set the upload folder name in the bucket (default: $FOLDER)
-r enable -race build option (default: $RACE)
-v verbose build process (default: $VERBOSE)
-s build static linux executable (default: $STATIC)
ACTION:
build build binaries only (default action)
@ -89,6 +92,11 @@ EOF
function build_only
{
if [[ "$STATIC" == "true" && "$GOOS" == "darwin" ]]; then
echo "static build only supported on Linux platform"
exit 2
fi
VERSION=$(git rev-list --count HEAD)
COMMIT=$(git describe --always --long --dirty)
BUILTAT=$(date +%FT%T%z)
@ -104,7 +112,11 @@ function build_only
if [ "$DEBUG" == "true" ]; then
env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-N -l -c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]}
else
env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]}
if [ "$STATIC" == "true" ]; then
env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY} -w -extldflags \"-static\"" -o $BINDIR/$bin $RACE ${SRC[$bin]}
else
env GOOS=$GOOS GOARCH=$GOARCH go build $VERBOSE -gcflags="all=-c 2" -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin $RACE ${SRC[$bin]}
fi
fi
if [ "$(uname -s)" == "Linux" ]; then
$BINDIR/$bin -version || $BINDIR/$bin version
@ -131,13 +143,15 @@ function upload
[ -e $BINDIR/$bin ] && $AWSCLI s3 cp $BINDIR/$bin s3://${BUCKET}$FOLDER/$bin --acl public-read
done
for lib in "${!LIB[@]}"; do
if [ -e ${LIB[$lib]} ]; then
$AWSCLI s3 cp ${LIB[$lib]} s3://${BUCKET}$FOLDER/$lib --acl public-read
else
echo "!! MISSING ${LIB[$lib]} !!"
fi
done
if [ "$STATIC" != "true" ]; then
for lib in "${!LIB[@]}"; do
if [ -e ${LIB[$lib]} ]; then
$AWSCLI s3 cp ${LIB[$lib]} s3://${BUCKET}$FOLDER/$lib --acl public-read
else
echo "!! MISSING ${LIB[$lib]} !!"
fi
done
fi
[ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${BUCKET}$FOLDER/md5sum.txt --acl public-read
}
@ -170,13 +184,15 @@ function release
fi
done
for lib in "${!LIB[@]}"; do
if [ -e ${LIB[$lib]} ]; then
$AWSCLI s3 cp ${LIB[$lib]} s3://${PUBBUCKET}/$FOLDER/$lib --acl public-read
else
echo "!! MISSING ${LIB[$lib]} !!"
fi
done
if [ "$STATIC" != "true" ]; then
for lib in "${!LIB[@]}"; do
if [ -e ${LIB[$lib]} ]; then
$AWSCLI s3 cp ${LIB[$lib]} s3://${PUBBUCKET}/$FOLDER/$lib --acl public-read
else
echo "!! MISSING ${LIB[$lib]} !!"
fi
done
fi
[ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${PUBBUCKET}/$FOLDER/md5sum.txt --acl public-read
}
@ -216,7 +232,7 @@ function upload_wallet
}
################################ MAIN FUNCTION ##############################
while getopts "hp:a:o:b:f:rvN:" option; do
while getopts "hp:a:o:b:f:rvsN:" option; do
case $option in
h) usage ;;
p) PROFILE=$OPTARG ;;
@ -227,6 +243,7 @@ while getopts "hp:a:o:b:f:rvN:" option; do
r) RACE=-race ;;
v) VERBOSE='-v -x' ;;
d) DEBUG=true ;;
s) STATIC=true ;;
N) NETWORK=$OPTARG ;;
esac
done

@ -48,13 +48,13 @@ func (d Directive) String() string {
// CreateValidator - type for creating a new validator
type CreateValidator struct {
Description *Description `json:"description" yaml:"description" rlp:"nil"`
CommissionRates `json:"commission" yaml:"commission" rlp:"nil"`
MinSelfDelegation *big.Int `json:"min_self_delegation" yaml:"min_self_delegation" rlp:"nil"`
MaxTotalDelegation *big.Int `json:"max_total_delegation" yaml:"max_total_delegation" rlp:"nil"`
ValidatorAddress common.Address `json:"validator_address" yaml:"validator_address" rlp:"nil"`
SlotPubKeys []shard.BlsPublicKey `json:"slot_pub_keys" yaml:"slot_pub_keys" rlp:"nil"`
Amount *big.Int `json:"amount" yaml:"amount" rlp:"nil"`
ValidatorAddress common.Address `json:"validator_address" yaml:"validator_address"`
Description *Description `json:"description" yaml:"description"`
CommissionRates `json:"commission" yaml:"commission"`
MinSelfDelegation *big.Int `json:"min_self_delegation" yaml:"min_self_delegation"`
MaxTotalDelegation *big.Int `json:"max_total_delegation" yaml:"max_total_delegation"`
SlotPubKeys []shard.BlsPublicKey `json:"slot_pub_keys" yaml:"slot_pub_keys"`
Amount *big.Int `json:"amount" yaml:"amount"`
}
// EditValidator - type for edit existing validator

@ -0,0 +1,25 @@
package main
import (
"encoding/hex"
"fmt"
"github.com/ethereum/go-ethereum/crypto"
common2 "github.com/harmony-one/harmony/internal/common"
)
func main() {
// Create an account
key, _ := crypto.GenerateKey()
// Get the address
address := crypto.PubkeyToAddress(key.PublicKey)
// 0x8ee3333cDE801ceE9471ADf23370c48b011f82a6
// Get the private key
privateKey := hex.EncodeToString(key.D.Bytes())
// 05b14254a1d0c77a49eae3bdf080f926a2df17d8e2ebdf7af941ea001481e57f
fmt.Printf("account: %s\n", common2.MustAddressToBech32(address))
fmt.Printf("private Key : %s\n", privateKey)
}
Loading…
Cancel
Save