|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"math/big"
|
|
|
|
"math/rand"
|
|
|
|
_ "net/http/pprof"
|
|
|
|
"os"
|
|
|
|
"os/signal"
|
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/harmony-one/bls/ffi/go/bls"
|
|
|
|
"github.com/harmony-one/harmony/api/service"
|
|
|
|
"github.com/harmony-one/harmony/api/service/crosslink_sending"
|
|
|
|
"github.com/harmony-one/harmony/api/service/pprof"
|
|
|
|
"github.com/harmony-one/harmony/api/service/prometheus"
|
|
|
|
"github.com/harmony-one/harmony/api/service/stagedstreamsync"
|
|
|
|
"github.com/harmony-one/harmony/api/service/synchronize"
|
|
|
|
"github.com/harmony-one/harmony/common/fdlimit"
|
|
|
|
"github.com/harmony-one/harmony/common/ntp"
|
|
|
|
"github.com/harmony-one/harmony/consensus"
|
|
|
|
"github.com/harmony-one/harmony/consensus/quorum"
|
|
|
|
"github.com/harmony-one/harmony/core"
|
|
|
|
"github.com/harmony-one/harmony/hmy/downloader"
|
|
|
|
"github.com/harmony-one/harmony/internal/chain"
|
|
|
|
"github.com/harmony-one/harmony/internal/cli"
|
|
|
|
"github.com/harmony-one/harmony/internal/common"
|
|
|
|
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
|
|
|
|
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
|
|
|
|
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
|
|
|
|
"github.com/harmony-one/harmony/internal/genesis"
|
|
|
|
"github.com/harmony-one/harmony/internal/params"
|
|
|
|
"github.com/harmony-one/harmony/internal/registry"
|
|
|
|
"github.com/harmony-one/harmony/internal/shardchain"
|
|
|
|
"github.com/harmony-one/harmony/internal/shardchain/tikv_manage"
|
|
|
|
"github.com/harmony-one/harmony/internal/tikv/redis_helper"
|
|
|
|
"github.com/harmony-one/harmony/internal/tikv/statedb_cache"
|
|
|
|
"github.com/harmony-one/harmony/internal/utils"
|
|
|
|
"github.com/harmony-one/harmony/multibls"
|
|
|
|
"github.com/harmony-one/harmony/node"
|
|
|
|
"github.com/harmony-one/harmony/numeric"
|
|
|
|
"github.com/harmony-one/harmony/p2p"
|
|
|
|
rosetta_common "github.com/harmony-one/harmony/rosetta/common"
|
|
|
|
rpc_common "github.com/harmony-one/harmony/rpc/common"
|
|
|
|
"github.com/harmony-one/harmony/shard"
|
|
|
|
"github.com/harmony-one/harmony/webhooks"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/spf13/cobra"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Host
|
|
|
|
var (
|
|
|
|
myHost p2p.Host
|
|
|
|
initialAccounts = []*genesis.DeployAccount{}
|
|
|
|
)
|
|
|
|
|
|
|
|
var rootCmd = &cobra.Command{
|
|
|
|
Use: "harmony",
|
|
|
|
Short: "harmony is the Harmony node binary file",
|
|
|
|
Long: `harmony is the Harmony node binary file
|
|
|
|
|
|
|
|
Examples usage:
|
|
|
|
|
|
|
|
# start a validator node with default bls folder (default bls key files in ./.hmy/blskeys)
|
|
|
|
./harmony
|
|
|
|
|
|
|
|
# start a validator node with customized bls key folder
|
|
|
|
./harmony --bls.dir [bls_folder]
|
|
|
|
|
|
|
|
# start a validator node with open RPC endpoints and customized ports
|
|
|
|
./harmony --http.ip=0.0.0.0 --http.port=[http_port] --ws.ip=0.0.0.0 --ws.port=[ws_port]
|
|
|
|
|
|
|
|
# start an explorer node
|
|
|
|
./harmony --run=explorer --run.shard=[shard_id]
|
|
|
|
|
|
|
|
# start a harmony internal node on testnet
|
|
|
|
./harmony --run.legacy --network testnet
|
|
|
|
`,
|
|
|
|
Run: runHarmonyNode,
|
|
|
|
}
|
|
|
|
|
|
|
|
var configFlag = cli.StringFlag{
|
|
|
|
Name: "config",
|
|
|
|
Usage: "load node config from the config toml file.",
|
|
|
|
Shorthand: "c",
|
|
|
|
DefValue: "",
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
cli.SetParseErrorHandle(func(err error) {
|
|
|
|
os.Exit(128) // 128 - invalid command line arguments
|
|
|
|
})
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
configCmd.AddCommand(dumpConfigCmd)
|
|
|
|
configCmd.AddCommand(updateConfigCmd)
|
|
|
|
rootCmd.AddCommand(configCmd)
|
|
|
|
rootCmd.AddCommand(versionCmd)
|
|
|
|
rootCmd.AddCommand(dumpConfigLegacyCmd)
|
|
|
|
rootCmd.AddCommand(dumpDBCmd)
|
|
|
|
rootCmd.AddCommand(inspectDBCmd)
|
|
|
|
|
|
|
|
if err := registerRootCmdFlags(); err != nil {
|
|
|
|
os.Exit(2)
|
|
|
|
}
|
|
|
|
if err := registerDumpConfigFlags(); err != nil {
|
|
|
|
os.Exit(2)
|
|
|
|
}
|
|
|
|
if err := registerDumpDBFlags(); err != nil {
|
|
|
|
os.Exit(2)
|
|
|
|
}
|
|
|
|
if err := registerInspectionFlags(); err != nil {
|
|
|
|
os.Exit(2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
rootCmd.Execute()
|
|
|
|
}
|
|
|
|
|
|
|
|
func registerRootCmdFlags() error {
|
|
|
|
flags := getRootFlags()
|
|
|
|
|
|
|
|
return cli.RegisterFlags(rootCmd, flags)
|
|
|
|
}
|
|
|
|
|
|
|
|
func runHarmonyNode(cmd *cobra.Command, args []string) {
|
|
|
|
if cli.GetBoolFlagValue(cmd, versionFlag) {
|
|
|
|
printVersion()
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := prepareRootCmd(cmd); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, err)
|
|
|
|
os.Exit(128)
|
|
|
|
}
|
|
|
|
cfg, err := getHarmonyConfig(cmd)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, err)
|
|
|
|
os.Exit(128)
|
|
|
|
}
|
|
|
|
|
|
|
|
setupNodeLog(cfg)
|
|
|
|
setupNodeAndRun(cfg)
|
|
|
|
}
|
|
|
|
|
|
|
|
func prepareRootCmd(cmd *cobra.Command) error {
|
|
|
|
// HACK Force usage of go implementation rather than the C based one. Do the right way, see the
|
|
|
|
// notes one line 66,67 of https://golang.org/src/net/net.go that say can make the decision at
|
|
|
|
// build time.
|
|
|
|
os.Setenv("GODEBUG", "netdns=go")
|
|
|
|
// Don't set higher than num of CPU. It will make go scheduler slower.
|
|
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
|
|
// Raise fd limits
|
|
|
|
return raiseFdLimits()
|
|
|
|
}
|
|
|
|
|
|
|
|
func raiseFdLimits() error {
|
|
|
|
limit, err := fdlimit.Maximum()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Failed to retrieve file descriptor allowance")
|
|
|
|
}
|
|
|
|
_, err = fdlimit.Raise(uint64(limit))
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Failed to raise file descriptor allowance")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getHarmonyConfig(cmd *cobra.Command) (harmonyconfig.HarmonyConfig, error) {
|
|
|
|
var (
|
|
|
|
config harmonyconfig.HarmonyConfig
|
|
|
|
err error
|
|
|
|
migratedFrom string
|
|
|
|
configFile string
|
|
|
|
isUsingDefault bool
|
|
|
|
)
|
|
|
|
if cli.IsFlagChanged(cmd, configFlag) {
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
configFile = cli.GetStringFlagValue(cmd, configFlag)
|
|
|
|
config, migratedFrom, err = loadHarmonyConfig(configFile)
|
|
|
|
} else {
|
|
|
|
nt := getNetworkType(cmd)
|
|
|
|
config = getDefaultHmyConfigCopy(nt)
|
|
|
|
isUsingDefault = true
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return harmonyconfig.HarmonyConfig{}, err
|
|
|
|
}
|
|
|
|
if migratedFrom != defaultConfig.Version && !isUsingDefault {
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
fmt.Printf("Old config version detected %s\n",
|
|
|
|
migratedFrom)
|
|
|
|
stat, _ := os.Stdin.Stat()
|
|
|
|
// Ask to update if only using terminal
|
|
|
|
if stat.Mode()&os.ModeCharDevice != 0 {
|
|
|
|
if promptConfigUpdate() {
|
|
|
|
err := updateConfigFile(configFile)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Could not update config - %s", err.Error())
|
|
|
|
fmt.Println("Update config manually with `./harmony config update [config_file]`")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
fmt.Println("Update saved config with `./harmony config update [config_file]`")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
applyRootFlags(cmd, &config)
|
|
|
|
|
|
|
|
if err := validateHarmonyConfig(config); err != nil {
|
|
|
|
return harmonyconfig.HarmonyConfig{}, err
|
|
|
|
}
|
|
|
|
sanityFixHarmonyConfig(&config)
|
|
|
|
return config, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func applyRootFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
|
|
|
|
// Misc flags shall be applied first since legacy ip / port is overwritten
|
|
|
|
// by new ip / port flags
|
|
|
|
applyLegacyMiscFlags(cmd, config)
|
|
|
|
applyGeneralFlags(cmd, config)
|
|
|
|
applyNetworkFlags(cmd, config)
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
applyDNSSyncFlags(cmd, config)
|
|
|
|
applyP2PFlags(cmd, config)
|
|
|
|
applyHTTPFlags(cmd, config)
|
|
|
|
applyWSFlags(cmd, config)
|
|
|
|
applyRPCOptFlags(cmd, config)
|
|
|
|
applyBLSFlags(cmd, config)
|
|
|
|
applyConsensusFlags(cmd, config)
|
|
|
|
applyTxPoolFlags(cmd, config)
|
|
|
|
applyPprofFlags(cmd, config)
|
|
|
|
applyLogFlags(cmd, config)
|
|
|
|
applySysFlags(cmd, config)
|
|
|
|
applyDevnetFlags(cmd, config)
|
|
|
|
applyRevertFlags(cmd, config)
|
|
|
|
applyPreimageFlags(cmd, config)
|
|
|
|
applyPrometheusFlags(cmd, config)
|
|
|
|
applySyncFlags(cmd, config)
|
|
|
|
applyShardDataFlags(cmd, config)
|
|
|
|
applyGPOFlags(cmd, config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupNodeLog(config harmonyconfig.HarmonyConfig) {
|
|
|
|
logPath := filepath.Join(config.Log.Folder, config.Log.FileName)
|
|
|
|
verbosity := config.Log.Verbosity
|
|
|
|
|
|
|
|
utils.SetLogVerbosity(log.Lvl(verbosity))
|
|
|
|
if config.Log.Context != nil {
|
|
|
|
ip := config.Log.Context.IP
|
|
|
|
port := config.Log.Context.Port
|
|
|
|
utils.SetLogContext(ip, strconv.Itoa(port))
|
|
|
|
}
|
|
|
|
|
|
|
|
if !config.Log.Console {
|
|
|
|
utils.AddLogFile(logPath, config.Log.RotateSize, config.Log.RotateCount, config.Log.RotateMaxAge)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func revert(chain core.BlockChain, hc harmonyconfig.HarmonyConfig) {
|
|
|
|
curNum := chain.CurrentBlock().NumberU64()
|
|
|
|
if curNum < uint64(hc.Revert.RevertBefore) && curNum >= uint64(hc.Revert.RevertTo) {
|
|
|
|
// Remove invalid blocks
|
|
|
|
for chain.CurrentBlock().NumberU64() >= uint64(hc.Revert.RevertTo) {
|
|
|
|
curBlock := chain.CurrentBlock()
|
|
|
|
rollbacks := []ethCommon.Hash{curBlock.Hash()}
|
|
|
|
if err := chain.Rollback(rollbacks); err != nil {
|
|
|
|
fmt.Printf("Revert failed: %v\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
lastSig := curBlock.Header().LastCommitSignature()
|
|
|
|
sigAndBitMap := append(lastSig[:], curBlock.Header().LastCommitBitmap()...)
|
|
|
|
chain.WriteCommitSig(curBlock.NumberU64()-1, sigAndBitMap)
|
|
|
|
}
|
|
|
|
fmt.Printf("Revert finished. Current block: %v\n", chain.CurrentBlock().NumberU64())
|
|
|
|
utils.Logger().Warn().
|
|
|
|
Uint64("Current Block", chain.CurrentBlock().NumberU64()).
|
|
|
|
Msg("Revert finished.")
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupNodeAndRun(hc harmonyconfig.HarmonyConfig) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
nodeconfigSetShardSchedule(hc)
|
|
|
|
nodeconfig.SetShardingSchedule(shard.Schedule)
|
|
|
|
nodeconfig.SetVersion(getHarmonyVersion())
|
|
|
|
|
|
|
|
if hc.General.NodeType == "validator" {
|
|
|
|
var err error
|
|
|
|
if hc.General.NoStaking {
|
|
|
|
err = setupLegacyNodeAccount(hc)
|
|
|
|
} else {
|
|
|
|
err = setupStakingNodeAccount(hc)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "cannot set up node account: %s\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if hc.General.NodeType == "validator" {
|
|
|
|
fmt.Printf("%s mode; node key %s -> shard %d\n",
|
|
|
|
map[bool]string{false: "Legacy", true: "Staking"}[!hc.General.NoStaking],
|
|
|
|
nodeconfig.GetDefaultConfig().ConsensusPriKey.GetPublicKeys().SerializeToHexStr(),
|
|
|
|
initialAccounts[0].ShardID)
|
|
|
|
}
|
|
|
|
if hc.General.NodeType != "validator" && hc.General.ShardID >= 0 {
|
|
|
|
for _, initialAccount := range initialAccounts {
|
|
|
|
utils.Logger().Info().
|
|
|
|
Uint32("original", initialAccount.ShardID).
|
|
|
|
Int("override", hc.General.ShardID).
|
|
|
|
Msg("ShardID Override")
|
|
|
|
initialAccount.ShardID = uint32(hc.General.ShardID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeConfig, err := createGlobalConfig(hc)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "ERROR cannot configure node: %s\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
if hc.General.RunElasticMode && hc.TiKV == nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "Use TIKV MUST HAS TIKV CONFIG")
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update ethereum compatible chain ids
|
|
|
|
params.UpdateEthChainIDByShard(nodeConfig.ShardID)
|
|
|
|
|
|
|
|
currentNode := setupConsensusAndNode(hc, nodeConfig, registry.New())
|
|
|
|
nodeconfig.GetDefaultConfig().ShardID = nodeConfig.ShardID
|
|
|
|
nodeconfig.GetDefaultConfig().IsOffline = nodeConfig.IsOffline
|
|
|
|
nodeconfig.GetDefaultConfig().Downloader = nodeConfig.Downloader
|
|
|
|
nodeconfig.GetDefaultConfig().StagedSync = nodeConfig.StagedSync
|
|
|
|
|
|
|
|
// Check NTP configuration
|
|
|
|
accurate, err := ntp.CheckLocalTimeAccurate(nodeConfig.NtpServer)
|
|
|
|
if !accurate {
|
|
|
|
if os.IsTimeout(err) {
|
|
|
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
|
|
fmt.Fprintf(os.Stderr, "NTP query timed out. Continuing.\n")
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
|
|
fmt.Fprintf(os.Stderr, "Error: local timeclock is not accurate. Please config NTP properly.\n")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
utils.Logger().Warn().Err(err).Msg("Check Local Time Accuracy Error")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse RPC config
|
|
|
|
nodeConfig.RPCServer = hc.ToRPCServerConfig()
|
|
|
|
|
|
|
|
// Parse rosetta config
|
|
|
|
nodeConfig.RosettaServer = nodeconfig.RosettaServerConfig{
|
|
|
|
HTTPEnabled: hc.HTTP.RosettaEnabled,
|
|
|
|
HTTPIp: hc.HTTP.IP,
|
|
|
|
HTTPPort: hc.HTTP.RosettaPort,
|
|
|
|
}
|
|
|
|
|
|
|
|
if hc.Revert != nil && hc.Revert.RevertBefore != 0 && hc.Revert.RevertTo != 0 {
|
|
|
|
chain := currentNode.Blockchain()
|
|
|
|
if hc.Revert.RevertBeacon {
|
|
|
|
chain = currentNode.Beaconchain()
|
|
|
|
}
|
|
|
|
revert(chain, hc)
|
|
|
|
}
|
|
|
|
|
|
|
|
//// code to handle pre-image export, import and generation
|
|
|
|
if hc.Preimage != nil {
|
|
|
|
if hc.Preimage.ImportFrom != "" {
|
|
|
|
if err := core.ImportPreimages(
|
|
|
|
currentNode.Blockchain(),
|
|
|
|
hc.Preimage.ImportFrom,
|
|
|
|
); err != nil {
|
|
|
|
fmt.Println("Error importing", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
|
|
|
} else if exportPath := hc.Preimage.ExportTo; exportPath != "" {
|
|
|
|
if err := core.ExportPreimages(
|
|
|
|
currentNode.Blockchain(),
|
|
|
|
exportPath,
|
|
|
|
); err != nil {
|
|
|
|
fmt.Println("Error exporting", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
|
|
|
// both must be set
|
|
|
|
} else if hc.Preimage.GenerateStart > 0 {
|
|
|
|
chain := currentNode.Blockchain()
|
|
|
|
end := hc.Preimage.GenerateEnd
|
|
|
|
current := chain.CurrentBlock().NumberU64()
|
|
|
|
if end > current {
|
|
|
|
fmt.Printf(
|
|
|
|
"Cropping generate endpoint from %d to %d\n",
|
|
|
|
end, current,
|
|
|
|
)
|
|
|
|
end = current
|
|
|
|
}
|
|
|
|
|
|
|
|
if end == 0 {
|
|
|
|
end = current
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Println("Starting generation")
|
|
|
|
if err := core.GeneratePreimages(
|
|
|
|
chain,
|
|
|
|
hc.Preimage.GenerateStart, end,
|
|
|
|
); err != nil {
|
|
|
|
fmt.Println("Error generating", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
fmt.Println("Generation successful")
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
startMsg := "==== New Harmony Node ===="
|
|
|
|
if hc.General.NodeType == nodeTypeExplorer {
|
|
|
|
startMsg = "==== New Explorer Node ===="
|
|
|
|
}
|
|
|
|
|
|
|
|
utils.Logger().Info().
|
|
|
|
Str("BLSPubKey", nodeConfig.ConsensusPriKey.GetPublicKeys().SerializeToHexStr()).
|
|
|
|
Uint32("ShardID", nodeConfig.ShardID).
|
|
|
|
Str("ShardGroupID", nodeConfig.GetShardGroupID().String()).
|
|
|
|
Str("BeaconGroupID", nodeConfig.GetBeaconGroupID().String()).
|
|
|
|
Str("ClientGroupID", nodeConfig.GetClientGroupID().String()).
|
|
|
|
Str("Role", currentNode.NodeConfig.Role().String()).
|
|
|
|
Str("Version", getHarmonyVersion()).
|
|
|
|
Str("multiaddress",
|
|
|
|
fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%s", hc.P2P.IP, hc.P2P.Port, myHost.GetID().Pretty()),
|
|
|
|
).
|
|
|
|
Msg(startMsg)
|
|
|
|
|
|
|
|
nodeconfig.SetPeerID(myHost.GetID())
|
|
|
|
|
|
|
|
if hc.Log.VerbosePrints.Config {
|
|
|
|
utils.Logger().Info().Interface("config", rpc_common.Config{
|
|
|
|
HarmonyConfig: hc,
|
|
|
|
NodeConfig: *nodeConfig,
|
|
|
|
ChainConfig: *currentNode.Blockchain().Config(),
|
|
|
|
}).Msg("verbose prints config")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup services
|
|
|
|
if hc.Sync.Enabled {
|
|
|
|
if hc.Sync.StagedSync {
|
|
|
|
setupStagedSyncService(currentNode, myHost, hc)
|
|
|
|
} else {
|
|
|
|
setupSyncService(currentNode, myHost, hc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if currentNode.NodeConfig.Role() == nodeconfig.Validator {
|
|
|
|
currentNode.RegisterValidatorServices()
|
|
|
|
} else if currentNode.NodeConfig.Role() == nodeconfig.ExplorerNode {
|
|
|
|
currentNode.RegisterExplorerServices()
|
|
|
|
}
|
|
|
|
currentNode.RegisterService(service.CrosslinkSending, crosslink_sending.New(currentNode, currentNode.Blockchain()))
|
|
|
|
if hc.Pprof.Enabled {
|
|
|
|
setupPprofService(currentNode, hc)
|
|
|
|
}
|
|
|
|
if hc.Prometheus.Enabled {
|
|
|
|
setupPrometheusService(currentNode, hc, nodeConfig.ShardID)
|
|
|
|
}
|
|
|
|
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
if hc.DNSSync.Server && !hc.General.IsOffline {
|
|
|
|
utils.Logger().Info().Msg("support gRPC sync server")
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
currentNode.SupportGRPCSyncServer(hc.DNSSync.ServerPort)
|
|
|
|
}
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
if hc.DNSSync.Client && !hc.General.IsOffline {
|
|
|
|
utils.Logger().Info().Msg("go with gRPC sync client")
|
|
|
|
currentNode.StartGRPCSyncClient()
|
|
|
|
}
|
|
|
|
|
|
|
|
currentNode.NodeSyncing()
|
|
|
|
|
|
|
|
if err := currentNode.StartServices(); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, err.Error())
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.StartRPC(); err != nil {
|
|
|
|
utils.Logger().Warn().
|
|
|
|
Err(err).
|
|
|
|
Msg("StartRPC failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.StartRosetta(); err != nil {
|
|
|
|
utils.Logger().Warn().
|
|
|
|
Err(err).
|
|
|
|
Msg("Start Rosetta failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
go core.WritePreimagesMetricsIntoPrometheus(
|
|
|
|
currentNode.Blockchain(),
|
|
|
|
currentNode.Consensus.UpdatePreimageGenerationMetrics,
|
|
|
|
)
|
|
|
|
|
|
|
|
go listenOSSigAndShutDown(currentNode)
|
|
|
|
|
|
|
|
if !hc.General.IsOffline {
|
|
|
|
if err := myHost.Start(); err != nil {
|
|
|
|
utils.Logger().Fatal().
|
|
|
|
Err(err).
|
|
|
|
Msg("Start p2p host failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.BootstrapConsensus(); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, "could not bootstrap consensus", err.Error())
|
|
|
|
if !currentNode.NodeConfig.IsOffline {
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.StartPubSub(); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, "could not begin network message handling for node", err.Error())
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {}
|
|
|
|
}
|
|
|
|
|
|
|
|
func nodeconfigSetShardSchedule(config harmonyconfig.HarmonyConfig) {
|
|
|
|
switch config.Network.NetworkType {
|
|
|
|
case nodeconfig.Mainnet:
|
|
|
|
shard.Schedule = shardingconfig.MainnetSchedule
|
|
|
|
case nodeconfig.Testnet:
|
|
|
|
shard.Schedule = shardingconfig.TestnetSchedule
|
|
|
|
case nodeconfig.Pangaea:
|
|
|
|
shard.Schedule = shardingconfig.PangaeaSchedule
|
|
|
|
case nodeconfig.Localnet:
|
|
|
|
shard.Schedule = shardingconfig.LocalnetSchedule
|
|
|
|
case nodeconfig.Partner:
|
|
|
|
shard.Schedule = shardingconfig.PartnerSchedule
|
|
|
|
case nodeconfig.Stressnet:
|
|
|
|
shard.Schedule = shardingconfig.StressNetSchedule
|
|
|
|
case nodeconfig.Devnet:
|
|
|
|
var dnConfig harmonyconfig.DevnetConfig
|
|
|
|
if config.Devnet != nil {
|
|
|
|
dnConfig = *config.Devnet
|
|
|
|
} else {
|
|
|
|
dnConfig = getDefaultDevnetConfigCopy()
|
|
|
|
}
|
|
|
|
|
|
|
|
devnetConfig, err := shardingconfig.NewInstance(
|
|
|
|
uint32(dnConfig.NumShards), dnConfig.ShardSize,
|
|
|
|
dnConfig.HmyNodeSize, dnConfig.SlotsLimit,
|
|
|
|
numeric.OneDec(), genesis.HarmonyAccounts,
|
|
|
|
genesis.FoundationalNodeAccounts, shardingconfig.Allowlist{},
|
|
|
|
nil, numeric.ZeroDec(), ethCommon.Address{},
|
|
|
|
nil, shardingconfig.VLBPE,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
_, _ = fmt.Fprintf(os.Stderr, "ERROR invalid devnet sharding config: %s",
|
|
|
|
err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
shard.Schedule = shardingconfig.NewFixedSchedule(devnetConfig)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func findAccountsByPubKeys(config shardingconfig.Instance, pubKeys multibls.PublicKeys) {
|
|
|
|
for _, key := range pubKeys {
|
|
|
|
keyStr := key.Bytes.Hex()
|
|
|
|
_, account := config.FindAccount(keyStr)
|
|
|
|
if account != nil {
|
|
|
|
initialAccounts = append(initialAccounts, account)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupLegacyNodeAccount(hc harmonyconfig.HarmonyConfig) error {
|
|
|
|
genesisShardingConfig := shard.Schedule.InstanceForEpoch(big.NewInt(core.GenesisEpoch))
|
|
|
|
multiBLSPubKey := setupConsensusKeys(hc, nodeconfig.GetDefaultConfig())
|
|
|
|
|
|
|
|
reshardingEpoch := genesisShardingConfig.ReshardingEpoch()
|
|
|
|
if len(reshardingEpoch) > 0 {
|
|
|
|
for _, epoch := range reshardingEpoch {
|
|
|
|
config := shard.Schedule.InstanceForEpoch(epoch)
|
|
|
|
findAccountsByPubKeys(config, multiBLSPubKey)
|
|
|
|
if len(initialAccounts) != 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
findAccountsByPubKeys(genesisShardingConfig, multiBLSPubKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(initialAccounts) == 0 {
|
|
|
|
fmt.Fprintf(
|
|
|
|
os.Stderr,
|
|
|
|
"ERROR cannot find your BLS key in the genesis/FN tables: %s\n",
|
|
|
|
multiBLSPubKey.SerializeToHexStr(),
|
|
|
|
)
|
|
|
|
os.Exit(100)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, account := range initialAccounts {
|
|
|
|
fmt.Printf("My Genesis Account: %v\n", *account)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupStakingNodeAccount(hc harmonyconfig.HarmonyConfig) error {
|
|
|
|
pubKeys := setupConsensusKeys(hc, nodeconfig.GetDefaultConfig())
|
|
|
|
shardID, err := nodeconfig.GetDefaultConfig().ShardIDFromConsensusKey()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "cannot determine shard to join")
|
|
|
|
}
|
|
|
|
if err := nodeconfig.GetDefaultConfig().ValidateConsensusKeysForSameShard(
|
|
|
|
pubKeys, shardID,
|
|
|
|
); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, blsKey := range pubKeys {
|
|
|
|
initialAccount := &genesis.DeployAccount{}
|
|
|
|
initialAccount.ShardID = shardID
|
|
|
|
initialAccount.BLSPublicKey = blsKey.Bytes.Hex()
|
|
|
|
initialAccount.Address = ""
|
|
|
|
initialAccounts = append(initialAccounts, initialAccount)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createGlobalConfig(hc harmonyconfig.HarmonyConfig) (*nodeconfig.ConfigType, error) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if len(initialAccounts) == 0 {
|
|
|
|
initialAccounts = append(initialAccounts, &genesis.DeployAccount{ShardID: uint32(hc.General.ShardID)})
|
|
|
|
}
|
|
|
|
nodeConfig := nodeconfig.GetShardConfig(initialAccounts[0].ShardID)
|
|
|
|
if hc.General.NodeType == nodeTypeValidator {
|
|
|
|
// Set up consensus keys.
|
|
|
|
setupConsensusKeys(hc, nodeConfig)
|
|
|
|
} else {
|
|
|
|
// set dummy bls key for consensus object
|
|
|
|
nodeConfig.ConsensusPriKey = multibls.GetPrivateKeys(&bls.SecretKey{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set network type
|
|
|
|
netType := nodeconfig.NetworkType(hc.Network.NetworkType)
|
|
|
|
nodeconfig.SetNetworkType(netType) // sets for both global and shard configs
|
|
|
|
nodeConfig.SetShardID(initialAccounts[0].ShardID) // sets shard ID
|
|
|
|
nodeConfig.SetArchival(hc.General.IsBeaconArchival, hc.General.IsArchival)
|
|
|
|
nodeConfig.IsOffline = hc.General.IsOffline
|
|
|
|
nodeConfig.Downloader = hc.Sync.Downloader
|
|
|
|
nodeConfig.StagedSync = hc.Sync.StagedSync
|
|
|
|
nodeConfig.StagedSyncTurboMode = hc.Sync.StagedSyncCfg.TurboMode
|
|
|
|
nodeConfig.UseMemDB = hc.Sync.StagedSyncCfg.UseMemDB
|
|
|
|
nodeConfig.DoubleCheckBlockHashes = hc.Sync.StagedSyncCfg.DoubleCheckBlockHashes
|
|
|
|
nodeConfig.MaxBlocksPerSyncCycle = hc.Sync.StagedSyncCfg.MaxBlocksPerSyncCycle
|
|
|
|
nodeConfig.MaxBackgroundBlocks = hc.Sync.StagedSyncCfg.MaxBackgroundBlocks
|
|
|
|
nodeConfig.MaxMemSyncCycleSize = hc.Sync.StagedSyncCfg.MaxMemSyncCycleSize
|
|
|
|
nodeConfig.VerifyAllSig = hc.Sync.StagedSyncCfg.VerifyAllSig
|
|
|
|
nodeConfig.VerifyHeaderBatchSize = hc.Sync.StagedSyncCfg.VerifyHeaderBatchSize
|
|
|
|
nodeConfig.InsertChainBatchSize = hc.Sync.StagedSyncCfg.InsertChainBatchSize
|
|
|
|
nodeConfig.LogProgress = hc.Sync.StagedSyncCfg.LogProgress
|
|
|
|
nodeConfig.DebugMode = hc.Sync.StagedSyncCfg.DebugMode
|
|
|
|
// P2P private key is used for secure message transfer between p2p nodes.
|
|
|
|
nodeConfig.P2PPriKey, _, err = utils.LoadKeyFromFile(hc.P2P.KeyFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "cannot load or create P2P key at %#v",
|
|
|
|
hc.P2P.KeyFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
selfPeer := p2p.Peer{
|
|
|
|
IP: hc.P2P.IP,
|
|
|
|
Port: strconv.Itoa(hc.P2P.Port),
|
|
|
|
ConsensusPubKey: nodeConfig.ConsensusPriKey[0].Pub.Object,
|
|
|
|
}
|
|
|
|
|
|
|
|
// for local-net the node has to be forced to assume it is public reachable
|
|
|
|
forceReachabilityPublic := false
|
|
|
|
if hc.Network.NetworkType == nodeconfig.Localnet {
|
|
|
|
forceReachabilityPublic = true
|
|
|
|
}
|
|
|
|
|
|
|
|
myHost, err = p2p.NewHost(p2p.HostConfig{
|
|
|
|
Self: &selfPeer,
|
|
|
|
BLSKey: nodeConfig.P2PPriKey,
|
|
|
|
BootNodes: hc.Network.BootNodes,
|
|
|
|
DataStoreFile: hc.P2P.DHTDataStore,
|
|
|
|
DiscConcurrency: hc.P2P.DiscConcurrency,
|
|
|
|
MaxConnPerIP: hc.P2P.MaxConnsPerIP,
|
|
|
|
DisablePrivateIPScan: hc.P2P.DisablePrivateIPScan,
|
|
|
|
MaxPeers: hc.P2P.MaxPeers,
|
|
|
|
ConnManagerLowWatermark: hc.P2P.ConnManagerLowWatermark,
|
|
|
|
ConnManagerHighWatermark: hc.P2P.ConnManagerHighWatermark,
|
|
|
|
WaitForEachPeerToConnect: hc.P2P.WaitForEachPeerToConnect,
|
|
|
|
ForceReachabilityPublic: forceReachabilityPublic,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "cannot create P2P network host")
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeConfig.DBDir = hc.General.DataDir
|
|
|
|
|
|
|
|
if hc.Legacy != nil && hc.Legacy.WebHookConfig != nil && len(*hc.Legacy.WebHookConfig) != 0 {
|
|
|
|
p := *hc.Legacy.WebHookConfig
|
|
|
|
config, err := webhooks.NewWebHooksFromPath(p)
|
[double-sign] Provide proof of double sign in slash record sent to beaconchain (#2253)
* [double-sign] Commit changes in consensus needed for double-sign
* [double-sign] Leader captures when valdator double signs, broadcasts to beaconchain
* [slash] Add quick iteration tool for testing double-signing
* [slash] Add webhook example
* [slash] Add http server for hook to trigger double sign behavior
* [double-sign] Use bin/trigger-double-sign to cause a double-sign
* [double-sign] Full feedback loop working
* [slash] Thread through the slash records in the block proposal step
* [slash] Compute the slashing rate
* [double-sign] Generalize yaml malicious for many keys
* [double-sign][slash] Modify data structures, verify via webhook handler
* [slash][double-sign] Find one address of bls public key signer, seemingly settle on data structures
* [slash] Apply to state slashing for double signing
* [slash][double-sign] Checkpoint for working code that slashes on beaconchain
* [slash] Keep track of the total slash and total reporters reward
* [slash] Dump account state before and after the slash
* [slash] Satisfy Travis
* [slash][state] Apply slash to the snapshot at beginning of epoch, now need to capture also the new delegates
* [slash] Capture the unique new delegations since snapshot as well
* [slash] Filter undelegation by epoch of double sign
* [slash] Add TODO of correctness needed in slash needs on off-chain data
* [rpc] Fix closure issue on shardID
* [slash] Add delegator to double-sign testing script
* [slash] Expand crt-validator.sh with commenting printfs and make delegation
* [slash] Finish track payment of leftover slash debt after undelegation runs out
* [slash] Now be explicit about error wrt delegatorSlashApply
* [slash] Capture specific sanity check on slash paidoff
* [slash] Track slash from undelegation piecemeal
* [slash][delegation] Named slice types, .String()
* [slash] Do no RLP encode twice, once is enough
* [slash] Remove special case of validators own delegation
* [slash] Refactor approach to slash state application
* [slash] Begin expanding out Verify
* [slash] Slash on snapshot delegations, not current
* [slash] Fix Epoch Cmp
* [slash] Third iteration on slash logic
* [slash] Use full slash amount
* [slash] More log, whitespace
* [slash] Remove Println, add log
* [slash] Remove debug Println
* [slash] Add record in unit test
* [slash] Build Validator snapshot, current. Fill out slash record
* [slash] Need to get RLP dump of a header to use in test
* [slash] Factor out double sign test constants
* [slash] Factor out common for validator, stub out slash application, finish out deserialization setup
* [slash] Factor out data structure creation because of var lexical scoping
* [slash] Seem to have pipeline of unit test e2e executing
* [slash] Add expected snitch, slash amounts
* [slash] Checkpoint
* [slash] Unit test correctly checks case of validator own stake which could drop below 1 ONE in slashing
* [config] add double-sign testnet config (#1)
Signed-off-by: Leo Chen <leo@harmony.one>
* [slash] Commit for as is code & data of current dump.json
* [slash] Order of state operation not correct in test, hence bad results, thank you dlv
* [slash] Add snapshot state dump
* [slash] Pay off slash of validator own delegation correctly
* [slash] Pay off slash debt with special case for min-self
* [slash] Pass first scenario conclusively
* [slash] 2% slash passes unit test for own delegation and external
* [slash] Parameterize unit test to easily test .02 vs .80 slash
* [slash] Handle own delegation correctly at 80% slash
* [slash] Have 80% slash working with external delegator
* [slash] Remove debug code from slash
* [slash] Adjust Apply signature, test again for 2% slash
* [slash] Factor out scenario in testing so can test 2% and 80% at same time
* [slash] Correct balance deduction on plan delegation
* [slash] Mock out ChainReader for TestVerify
* [slash] Small surface area interface, now feedback loop for verify
* [slash] Remove development json
* [slash] trigger-double-sign consumes yaml
* [slash] Remove dead code
* [slash][test] Factor ValidatorWrapper into scenario
* [slash][test] Add example from local-testing dump - caution might be off
* [slash] Factor out mutation of slashDebt
* [slash][test] Factor out tests so can easily load test-case from bytes
* [slash] Fix payment mistake in validator own delegation wrt min-self-delgation respected
* [slash] Satisfy Travis
* [slash] Begin cleanup of PR
* [slash] Apply slash from header to Finalize via state processor
* [slash] Productionize code, Println => logs; adjust slash picked in newblock
* [slash] Need pointer for rlp.Decode
* [slash] ValidatorInformation use full wrapper
* Fix median stake
* [staking] Adjust MarshalJSON for Validator, Wrapper
* Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)
* Refactor offchain data; Add epoch to ValidatorSnapshot
* Make block onchain/offchain data commit atomically
* [slash][committee] Set .Active to false on double sign, do not consider banned or inactive for committee assignment
* [effective] VC eligible.go
* [consensus] Redundant field in printf
* [docker] import-ks for a dev account
* [slash] Create BLS key for dockerfile and crt-validator.sh
* [slash][docker] Easy deployment of double-sign testing
* [docker] Have slash work as single docker command
* [rpc] Fix median-stake RPC
* [slash] Update webhook with default docker BLS key
* [docker][slash] Fresh yaml copy for docker build, remove dev code in main.go
* [slash] Remove helper binary, commented out code, change to local config
* [params] Factor out test genesis value
* Add shard checking to Tx-Pool & correct blacklist (#2301)
* [core] Fix blacklist & add shardID check
* [staking + node + cmd] Fix blacklist & add shardID check
* [slash] Adjust to PR comments part 1
* [docker] Use different throw away funded account
* [docker] Create easier testing for delegation with private keys
* [docker] Update yaml
* [slash] Remove special case for slashing validator own delegation wrt min-self-delegate
* [docker] Install nano as well
* [slash] Early error if banned
* [quorum] Expose earning account in decider marshal json
* Revert "Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)"
This reverts commit 9ffbf682c075b49188923c65a0bbf39ac188be00.
* [slash] Add non-sanity check way to update validator
* [reward] Increase percision on percentage in schedule
* [slash] Adjust logs
* [committee] Check eligibility of validator before doing sanity check
* [slash] Update docker
* [slash] Move create validator script to test
* [slash] More log
* [param] Make things faster
* [slash][off-chain] Clear out slashes from pending in writeblockwithstate
* [cross-link] Log is not error, just info
* [blockchain] Not necessary to guard DeletePendingSlashingCandidates
* [slash][consensus] Use plain []byte for signature b/c bls.Sign has private impl fields, rlp does not encode that
* [slash][test] Use faucet as sender, assume user imported
* [slash] Test setup
* [slash] reserve error for real error in logs
* [slash][availability] Apply availability correct, bump signing count each block
* [slash][staking] Consider banned field in sanity check, pay snitch only half of what was actually slashed
* [slash] Pay as much as can
* [slash] use right nowAmt
* [slash] Take away from rewards as well
* [slash] iterate faster
* [slash] Remove dev based timing
* [slash] Add more log, sanity check incoming slash records, only count external for slash rate
* [availability][state] Adjust signature of ValidatorWrapper wrt state, filter out for staked validators, correct availaibility measure on running counters
* [availability] More log
* [slash] Simply pre slash erra slashing
* [slash] Remove development code
* [slash] Use height from recvMsg, todo on epoch
* [staking] Not necessary to touch LastEpochInCommittee in staking_verifier
* [slash] Undo ds in endpoint pattern config
* [slash] Add TODO and log when delegation becomes 0 b/c slash debt payment
* [slash] Abstract staked validators from shard.State into type, set slash rate based BLSKey count
Co-authored-by: Leo Chen <leo@harmony.one>
Co-authored-by: flicker-harmony <52401354+flicker-harmony@users.noreply.github.com>
Co-authored-by: Rongjian Lan <rongjian@harmony.one>
Co-authored-by: Daniel Van Der Maden <daniel@harmony.one>
5 years ago
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(
|
|
|
|
os.Stderr, "yaml path is bad: %s", p,
|
|
|
|
)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
nodeConfig.WebHooks.Hooks = config
|
[double-sign] Provide proof of double sign in slash record sent to beaconchain (#2253)
* [double-sign] Commit changes in consensus needed for double-sign
* [double-sign] Leader captures when valdator double signs, broadcasts to beaconchain
* [slash] Add quick iteration tool for testing double-signing
* [slash] Add webhook example
* [slash] Add http server for hook to trigger double sign behavior
* [double-sign] Use bin/trigger-double-sign to cause a double-sign
* [double-sign] Full feedback loop working
* [slash] Thread through the slash records in the block proposal step
* [slash] Compute the slashing rate
* [double-sign] Generalize yaml malicious for many keys
* [double-sign][slash] Modify data structures, verify via webhook handler
* [slash][double-sign] Find one address of bls public key signer, seemingly settle on data structures
* [slash] Apply to state slashing for double signing
* [slash][double-sign] Checkpoint for working code that slashes on beaconchain
* [slash] Keep track of the total slash and total reporters reward
* [slash] Dump account state before and after the slash
* [slash] Satisfy Travis
* [slash][state] Apply slash to the snapshot at beginning of epoch, now need to capture also the new delegates
* [slash] Capture the unique new delegations since snapshot as well
* [slash] Filter undelegation by epoch of double sign
* [slash] Add TODO of correctness needed in slash needs on off-chain data
* [rpc] Fix closure issue on shardID
* [slash] Add delegator to double-sign testing script
* [slash] Expand crt-validator.sh with commenting printfs and make delegation
* [slash] Finish track payment of leftover slash debt after undelegation runs out
* [slash] Now be explicit about error wrt delegatorSlashApply
* [slash] Capture specific sanity check on slash paidoff
* [slash] Track slash from undelegation piecemeal
* [slash][delegation] Named slice types, .String()
* [slash] Do no RLP encode twice, once is enough
* [slash] Remove special case of validators own delegation
* [slash] Refactor approach to slash state application
* [slash] Begin expanding out Verify
* [slash] Slash on snapshot delegations, not current
* [slash] Fix Epoch Cmp
* [slash] Third iteration on slash logic
* [slash] Use full slash amount
* [slash] More log, whitespace
* [slash] Remove Println, add log
* [slash] Remove debug Println
* [slash] Add record in unit test
* [slash] Build Validator snapshot, current. Fill out slash record
* [slash] Need to get RLP dump of a header to use in test
* [slash] Factor out double sign test constants
* [slash] Factor out common for validator, stub out slash application, finish out deserialization setup
* [slash] Factor out data structure creation because of var lexical scoping
* [slash] Seem to have pipeline of unit test e2e executing
* [slash] Add expected snitch, slash amounts
* [slash] Checkpoint
* [slash] Unit test correctly checks case of validator own stake which could drop below 1 ONE in slashing
* [config] add double-sign testnet config (#1)
Signed-off-by: Leo Chen <leo@harmony.one>
* [slash] Commit for as is code & data of current dump.json
* [slash] Order of state operation not correct in test, hence bad results, thank you dlv
* [slash] Add snapshot state dump
* [slash] Pay off slash of validator own delegation correctly
* [slash] Pay off slash debt with special case for min-self
* [slash] Pass first scenario conclusively
* [slash] 2% slash passes unit test for own delegation and external
* [slash] Parameterize unit test to easily test .02 vs .80 slash
* [slash] Handle own delegation correctly at 80% slash
* [slash] Have 80% slash working with external delegator
* [slash] Remove debug code from slash
* [slash] Adjust Apply signature, test again for 2% slash
* [slash] Factor out scenario in testing so can test 2% and 80% at same time
* [slash] Correct balance deduction on plan delegation
* [slash] Mock out ChainReader for TestVerify
* [slash] Small surface area interface, now feedback loop for verify
* [slash] Remove development json
* [slash] trigger-double-sign consumes yaml
* [slash] Remove dead code
* [slash][test] Factor ValidatorWrapper into scenario
* [slash][test] Add example from local-testing dump - caution might be off
* [slash] Factor out mutation of slashDebt
* [slash][test] Factor out tests so can easily load test-case from bytes
* [slash] Fix payment mistake in validator own delegation wrt min-self-delgation respected
* [slash] Satisfy Travis
* [slash] Begin cleanup of PR
* [slash] Apply slash from header to Finalize via state processor
* [slash] Productionize code, Println => logs; adjust slash picked in newblock
* [slash] Need pointer for rlp.Decode
* [slash] ValidatorInformation use full wrapper
* Fix median stake
* [staking] Adjust MarshalJSON for Validator, Wrapper
* Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)
* Refactor offchain data; Add epoch to ValidatorSnapshot
* Make block onchain/offchain data commit atomically
* [slash][committee] Set .Active to false on double sign, do not consider banned or inactive for committee assignment
* [effective] VC eligible.go
* [consensus] Redundant field in printf
* [docker] import-ks for a dev account
* [slash] Create BLS key for dockerfile and crt-validator.sh
* [slash][docker] Easy deployment of double-sign testing
* [docker] Have slash work as single docker command
* [rpc] Fix median-stake RPC
* [slash] Update webhook with default docker BLS key
* [docker][slash] Fresh yaml copy for docker build, remove dev code in main.go
* [slash] Remove helper binary, commented out code, change to local config
* [params] Factor out test genesis value
* Add shard checking to Tx-Pool & correct blacklist (#2301)
* [core] Fix blacklist & add shardID check
* [staking + node + cmd] Fix blacklist & add shardID check
* [slash] Adjust to PR comments part 1
* [docker] Use different throw away funded account
* [docker] Create easier testing for delegation with private keys
* [docker] Update yaml
* [slash] Remove special case for slashing validator own delegation wrt min-self-delegate
* [docker] Install nano as well
* [slash] Early error if banned
* [quorum] Expose earning account in decider marshal json
* Revert "Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)"
This reverts commit 9ffbf682c075b49188923c65a0bbf39ac188be00.
* [slash] Add non-sanity check way to update validator
* [reward] Increase percision on percentage in schedule
* [slash] Adjust logs
* [committee] Check eligibility of validator before doing sanity check
* [slash] Update docker
* [slash] Move create validator script to test
* [slash] More log
* [param] Make things faster
* [slash][off-chain] Clear out slashes from pending in writeblockwithstate
* [cross-link] Log is not error, just info
* [blockchain] Not necessary to guard DeletePendingSlashingCandidates
* [slash][consensus] Use plain []byte for signature b/c bls.Sign has private impl fields, rlp does not encode that
* [slash][test] Use faucet as sender, assume user imported
* [slash] Test setup
* [slash] reserve error for real error in logs
* [slash][availability] Apply availability correct, bump signing count each block
* [slash][staking] Consider banned field in sanity check, pay snitch only half of what was actually slashed
* [slash] Pay as much as can
* [slash] use right nowAmt
* [slash] Take away from rewards as well
* [slash] iterate faster
* [slash] Remove dev based timing
* [slash] Add more log, sanity check incoming slash records, only count external for slash rate
* [availability][state] Adjust signature of ValidatorWrapper wrt state, filter out for staked validators, correct availaibility measure on running counters
* [availability] More log
* [slash] Simply pre slash erra slashing
* [slash] Remove development code
* [slash] Use height from recvMsg, todo on epoch
* [staking] Not necessary to touch LastEpochInCommittee in staking_verifier
* [slash] Undo ds in endpoint pattern config
* [slash] Add TODO and log when delegation becomes 0 b/c slash debt payment
* [slash] Abstract staked validators from shard.State into type, set slash rate based BLSKey count
Co-authored-by: Leo Chen <leo@harmony.one>
Co-authored-by: flicker-harmony <52401354+flicker-harmony@users.noreply.github.com>
Co-authored-by: Rongjian Lan <rongjian@harmony.one>
Co-authored-by: Daniel Van Der Maden <daniel@harmony.one>
5 years ago
|
|
|
}
|
|
|
|
|
|
|
|
nodeConfig.NtpServer = hc.Sys.NtpServer
|
|
|
|
|
|
|
|
nodeConfig.TraceEnable = hc.General.TraceEnable
|
|
|
|
|
|
|
|
return nodeConfig, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupChain(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType, registry *registry.Registry) *registry.Registry {
|
|
|
|
|
|
|
|
// Current node.
|
|
|
|
var chainDBFactory shardchain.DBFactory
|
|
|
|
if hc.General.RunElasticMode {
|
|
|
|
chainDBFactory = setupTiKV(hc)
|
|
|
|
} else if hc.ShardData.EnableShardData {
|
|
|
|
chainDBFactory = &shardchain.LDBShardFactory{
|
|
|
|
RootDir: nodeConfig.DBDir,
|
|
|
|
DiskCount: hc.ShardData.DiskCount,
|
|
|
|
ShardCount: hc.ShardData.ShardCount,
|
|
|
|
CacheTime: hc.ShardData.CacheTime,
|
|
|
|
CacheSize: hc.ShardData.CacheSize,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
chainDBFactory = &shardchain.LDBFactory{RootDir: nodeConfig.DBDir}
|
|
|
|
}
|
[slash][consensus] Notice double sign & broadcast, factor out tech debt of consensus (#2152)
* [slash] Remove dead interface, associated piping
* [slash] Expand out structs
* [consensus] Write to a chan when find a case of double-signing, remove dead code
* [slash] Broadcast the noticing of a double signing
* [rawdb] CRUD for slashing candidates
* [slashing][node][proto] Broadcast the slash record after receive from consensus, handle received proto message, persist in off-chain db while pending
* [slash][node][propose-block] Add verified slashes proposed into the header in block proposal
* [slash][shard] Factor out external validator as method on shard state, add double-signature field
* [slash][engine] Apply slash, name boolean expression for sorts, use stable sort
* [slash] Abstract Ballot results so keep track of both pre and post double sign event
* [slash] Fix type errors on test code
* [slash] Read from correct rawdb
* [slash] Add epoch based guards in CRUD of slashing
* [slash] Write to correct cache for slashing candidates
* [shard] Use explicit named type of BLS Signature, use convention
* [slash] Fix mistake done in refactor, improper header used. Factor out fromSlice to set
* [slash][node] Restore newblock to master, try again minimial change
* [cx-receipts] Break up one-liner, use SliceStable, not Slice
* [network] Finish refactor that makes network message headers once
* [network] Simplify creation further of headers write
* [slash] Adjust data structure of slash after offline discussion with RJ, Chao
* [slash] Still did need signature of the double signature
* [consensus] Prepare message does not have block header
* [consensus] Soft reset three files to 968517d~1
* [consensus] Begin factor consensus network intended message out with prepare first
* [consensus] Factor out Prepared message
* [consensus] Factor out announce message creation
* [consensus] Committed Message, branch on verify sender key for clearer log
* [consensus] Committed Message Factor out
* [consensus] Do jenkins MVP of signatures adjustment
* [main][slash] Provide YAML config as webhook config for double sign event
* [consensus] Adjust signatures, whitespace, lessen GC pressure
* [consensus] Remove dead code
* [consensus] Factor out commit overloaded message, give commit payload override in construct
* [consensus] Fix travis tests
* [consensus] Provide block bytes in SubmitVote(quorum.Commit)
* [consensus] Factor out noisy sanity checks in BFT, move existing commit check earlier as was before
* [quorum] Adjust signatures in quorum
* [staking] Adjust after merge from master
* [consensus] Finish refactor of consensus
* [node] Fix import
* [consensus] Fix travis
* [consensus] Use origin/master copy of block, fix mistake of pointer to empty byte
* [consensus] Less verbose bools
* [consensus] Remove unused trailing mutation hook in message construct
* [consensus] Address some TODOs on err, comment out double sign
5 years ago
|
|
|
|
|
|
|
engine := chain.NewEngine()
|
|
|
|
registry.SetEngine(engine)
|
|
|
|
|
|
|
|
chainConfig := nodeConfig.GetNetworkType().ChainConfig()
|
|
|
|
collection := shardchain.NewCollection(
|
|
|
|
&hc, chainDBFactory, &core.GenesisInitializer{NetworkType: nodeConfig.GetNetworkType()}, engine, &chainConfig,
|
|
|
|
)
|
|
|
|
for shardID, archival := range nodeConfig.ArchiveModes() {
|
|
|
|
if archival {
|
|
|
|
collection.DisableCache(shardID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
registry.SetShardChainCollection(collection)
|
|
|
|
|
|
|
|
var blockchain core.BlockChain
|
|
|
|
|
|
|
|
// We are not beacon chain, make sure beacon already initialized.
|
|
|
|
if nodeConfig.ShardID != shard.BeaconChainShardID {
|
|
|
|
beacon, err := collection.ShardChain(shard.BeaconChainShardID, core.Options{EpochChain: true})
|
|
|
|
if err != nil {
|
|
|
|
_, _ = fmt.Fprintf(os.Stderr, "Error :%v \n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
registry.SetBeaconchain(beacon)
|
|
|
|
}
|
|
|
|
|
|
|
|
blockchain, err := collection.ShardChain(nodeConfig.ShardID)
|
|
|
|
if err != nil {
|
|
|
|
_, _ = fmt.Fprintf(os.Stderr, "Error :%v \n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
registry.SetBlockchain(blockchain)
|
|
|
|
if registry.GetBeaconchain() == nil {
|
|
|
|
registry.SetBeaconchain(registry.GetBlockchain())
|
|
|
|
}
|
|
|
|
return registry
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType, registry *registry.Registry) *node.Node {
|
|
|
|
decider := quorum.NewDecider(quorum.SuperMajorityVote, uint32(hc.General.ShardID))
|
|
|
|
|
|
|
|
// Parse minPeers from harmonyconfig.HarmonyConfig
|
|
|
|
var minPeers int
|
|
|
|
var aggregateSig bool
|
|
|
|
if hc.Consensus != nil {
|
|
|
|
minPeers = hc.Consensus.MinPeers
|
|
|
|
aggregateSig = hc.Consensus.AggregateSig
|
|
|
|
} else {
|
|
|
|
minPeers = defaultConsensusConfig.MinPeers
|
|
|
|
aggregateSig = defaultConsensusConfig.AggregateSig
|
|
|
|
}
|
|
|
|
|
|
|
|
blacklist, err := setupBlacklist(hc)
|
|
|
|
if err != nil {
|
|
|
|
utils.Logger().Warn().Msgf("Blacklist setup error: %s", err.Error())
|
|
|
|
}
|
|
|
|
allowedTxs, err := setupAllowedTxs(hc)
|
|
|
|
if err != nil {
|
|
|
|
utils.Logger().Warn().Msgf("AllowedTxs setup error: %s", err.Error())
|
|
|
|
}
|
|
|
|
localAccounts, err := setupLocalAccounts(hc, blacklist)
|
|
|
|
if err != nil {
|
|
|
|
utils.Logger().Warn().Msgf("local accounts setup error: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
registry = setupChain(hc, nodeConfig, registry)
|
|
|
|
if registry.GetShardChainCollection() == nil {
|
|
|
|
panic("shard chain collection is nil1111111")
|
|
|
|
}
|
|
|
|
registry.SetWebHooks(nodeConfig.WebHooks.Hooks)
|
|
|
|
cxPool := core.NewCxPool(core.CxPoolSize)
|
|
|
|
registry.SetCxPool(cxPool)
|
|
|
|
|
|
|
|
// Consensus object.
|
|
|
|
registry.SetIsBackup(isBackup(hc))
|
|
|
|
currentConsensus, err := consensus.New(
|
|
|
|
myHost, nodeConfig.ShardID, nodeConfig.ConsensusPriKey, registry, decider, minPeers, aggregateSig)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
_, _ = fmt.Fprintf(os.Stderr, "Error :%v \n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
currentNode := node.New(myHost, currentConsensus, blacklist, allowedTxs, localAccounts, &hc, registry)
|
|
|
|
|
|
|
|
if hc.Legacy != nil && hc.Legacy.TPBroadcastInvalidTxn != nil {
|
|
|
|
currentNode.BroadcastInvalidTx = *hc.Legacy.TPBroadcastInvalidTxn
|
|
|
|
} else {
|
|
|
|
currentNode.BroadcastInvalidTx = defaultBroadcastInvalidTx
|
|
|
|
}
|
[slash][consensus] Notice double sign & broadcast, factor out tech debt of consensus (#2152)
* [slash] Remove dead interface, associated piping
* [slash] Expand out structs
* [consensus] Write to a chan when find a case of double-signing, remove dead code
* [slash] Broadcast the noticing of a double signing
* [rawdb] CRUD for slashing candidates
* [slashing][node][proto] Broadcast the slash record after receive from consensus, handle received proto message, persist in off-chain db while pending
* [slash][node][propose-block] Add verified slashes proposed into the header in block proposal
* [slash][shard] Factor out external validator as method on shard state, add double-signature field
* [slash][engine] Apply slash, name boolean expression for sorts, use stable sort
* [slash] Abstract Ballot results so keep track of both pre and post double sign event
* [slash] Fix type errors on test code
* [slash] Read from correct rawdb
* [slash] Add epoch based guards in CRUD of slashing
* [slash] Write to correct cache for slashing candidates
* [shard] Use explicit named type of BLS Signature, use convention
* [slash] Fix mistake done in refactor, improper header used. Factor out fromSlice to set
* [slash][node] Restore newblock to master, try again minimial change
* [cx-receipts] Break up one-liner, use SliceStable, not Slice
* [network] Finish refactor that makes network message headers once
* [network] Simplify creation further of headers write
* [slash] Adjust data structure of slash after offline discussion with RJ, Chao
* [slash] Still did need signature of the double signature
* [consensus] Prepare message does not have block header
* [consensus] Soft reset three files to 968517d~1
* [consensus] Begin factor consensus network intended message out with prepare first
* [consensus] Factor out Prepared message
* [consensus] Factor out announce message creation
* [consensus] Committed Message, branch on verify sender key for clearer log
* [consensus] Committed Message Factor out
* [consensus] Do jenkins MVP of signatures adjustment
* [main][slash] Provide YAML config as webhook config for double sign event
* [consensus] Adjust signatures, whitespace, lessen GC pressure
* [consensus] Remove dead code
* [consensus] Factor out commit overloaded message, give commit payload override in construct
* [consensus] Fix travis tests
* [consensus] Provide block bytes in SubmitVote(quorum.Commit)
* [consensus] Factor out noisy sanity checks in BFT, move existing commit check earlier as was before
* [quorum] Adjust signatures in quorum
* [staking] Adjust after merge from master
* [consensus] Finish refactor of consensus
* [node] Fix import
* [consensus] Fix travis
* [consensus] Use origin/master copy of block, fix mistake of pointer to empty byte
* [consensus] Less verbose bools
* [consensus] Remove unused trailing mutation hook in message construct
* [consensus] Address some TODOs on err, comment out double sign
5 years ago
|
|
|
|
|
|
|
// Syncing provider is provided by following rules:
|
|
|
|
// 1. If starting with a localnet or offline, use local sync peers.
|
|
|
|
// 2. If specified with --dns=false, use legacy syncing which is syncing through self-
|
|
|
|
// discover peers.
|
|
|
|
// 3. Else, use the dns for syncing.
|
|
|
|
if hc.Network.NetworkType == nodeconfig.Localnet || hc.General.IsOffline {
|
|
|
|
epochConfig := shard.Schedule.InstanceForEpoch(ethCommon.Big0)
|
|
|
|
selfPort := hc.P2P.Port
|
|
|
|
currentNode.SyncingPeerProvider = node.NewLocalSyncingPeerProvider(
|
|
|
|
6000, uint16(selfPort), epochConfig.NumShards(), uint32(epochConfig.NumNodesPerShard()))
|
|
|
|
} else {
|
|
|
|
addrs := myHost.GetP2PHost().Addrs()
|
|
|
|
currentNode.SyncingPeerProvider = node.NewDNSSyncingPeerProvider(hc.DNSSync.Zone, strconv.Itoa(hc.DNSSync.Port), addrs)
|
|
|
|
}
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
currentNode.NodeConfig.DNSZone = hc.DNSSync.Zone
|
|
|
|
|
[slash][consensus] Notice double sign & broadcast, factor out tech debt of consensus (#2152)
* [slash] Remove dead interface, associated piping
* [slash] Expand out structs
* [consensus] Write to a chan when find a case of double-signing, remove dead code
* [slash] Broadcast the noticing of a double signing
* [rawdb] CRUD for slashing candidates
* [slashing][node][proto] Broadcast the slash record after receive from consensus, handle received proto message, persist in off-chain db while pending
* [slash][node][propose-block] Add verified slashes proposed into the header in block proposal
* [slash][shard] Factor out external validator as method on shard state, add double-signature field
* [slash][engine] Apply slash, name boolean expression for sorts, use stable sort
* [slash] Abstract Ballot results so keep track of both pre and post double sign event
* [slash] Fix type errors on test code
* [slash] Read from correct rawdb
* [slash] Add epoch based guards in CRUD of slashing
* [slash] Write to correct cache for slashing candidates
* [shard] Use explicit named type of BLS Signature, use convention
* [slash] Fix mistake done in refactor, improper header used. Factor out fromSlice to set
* [slash][node] Restore newblock to master, try again minimial change
* [cx-receipts] Break up one-liner, use SliceStable, not Slice
* [network] Finish refactor that makes network message headers once
* [network] Simplify creation further of headers write
* [slash] Adjust data structure of slash after offline discussion with RJ, Chao
* [slash] Still did need signature of the double signature
* [consensus] Prepare message does not have block header
* [consensus] Soft reset three files to 968517d~1
* [consensus] Begin factor consensus network intended message out with prepare first
* [consensus] Factor out Prepared message
* [consensus] Factor out announce message creation
* [consensus] Committed Message, branch on verify sender key for clearer log
* [consensus] Committed Message Factor out
* [consensus] Do jenkins MVP of signatures adjustment
* [main][slash] Provide YAML config as webhook config for double sign event
* [consensus] Adjust signatures, whitespace, lessen GC pressure
* [consensus] Remove dead code
* [consensus] Factor out commit overloaded message, give commit payload override in construct
* [consensus] Fix travis tests
* [consensus] Provide block bytes in SubmitVote(quorum.Commit)
* [consensus] Factor out noisy sanity checks in BFT, move existing commit check earlier as was before
* [quorum] Adjust signatures in quorum
* [staking] Adjust after merge from master
* [consensus] Finish refactor of consensus
* [node] Fix import
* [consensus] Fix travis
* [consensus] Use origin/master copy of block, fix mistake of pointer to empty byte
* [consensus] Less verbose bools
* [consensus] Remove unused trailing mutation hook in message construct
* [consensus] Address some TODOs on err, comment out double sign
5 years ago
|
|
|
currentNode.NodeConfig.SetBeaconGroupID(
|
|
|
|
nodeconfig.NewGroupIDByShardID(shard.BeaconChainShardID),
|
|
|
|
)
|
|
|
|
|
|
|
|
nodeconfig.GetDefaultConfig().DBDir = nodeConfig.DBDir
|
|
|
|
processNodeType(hc, currentNode.NodeConfig)
|
|
|
|
currentNode.NodeConfig.SetShardGroupID(nodeconfig.NewGroupIDByShardID(nodeconfig.ShardID(nodeConfig.ShardID)))
|
|
|
|
currentNode.NodeConfig.SetClientGroupID(nodeconfig.NewClientGroupIDByShardID(shard.BeaconChainShardID))
|
|
|
|
currentNode.NodeConfig.ConsensusPriKey = nodeConfig.ConsensusPriKey
|
|
|
|
|
|
|
|
// This needs to be executed after consensus setup
|
|
|
|
if err := currentConsensus.InitConsensusWithValidators(); err != nil {
|
|
|
|
utils.Logger().Warn().
|
|
|
|
Int("shardID", hc.General.ShardID).
|
|
|
|
Err(err).
|
|
|
|
Msg("InitConsensusWithMembers failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the consensus ID to be the current block number
|
|
|
|
viewID := currentNode.Blockchain().CurrentBlock().Header().ViewID().Uint64()
|
|
|
|
currentConsensus.SetViewIDs(viewID + 1)
|
|
|
|
utils.Logger().Info().
|
|
|
|
Uint64("viewID", viewID).
|
|
|
|
Msg("Init Blockchain")
|
|
|
|
|
|
|
|
currentConsensus.PostConsensusJob = currentNode.PostConsensusProcessing
|
|
|
|
// update consensus information based on the blockchain
|
|
|
|
currentConsensus.SetMode(currentConsensus.UpdateConsensusInformation())
|
|
|
|
currentConsensus.NextBlockDue = time.Now()
|
|
|
|
return currentNode
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupTiKV(hc harmonyconfig.HarmonyConfig) shardchain.DBFactory {
|
|
|
|
err := redis_helper.Init(hc.TiKV.StateDBRedisServerAddr)
|
|
|
|
if err != nil {
|
|
|
|
panic("can not connect to redis: " + err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
factory := &shardchain.TiKvFactory{
|
|
|
|
PDAddr: hc.TiKV.PDAddr,
|
|
|
|
Role: hc.TiKV.Role,
|
|
|
|
CacheConfig: statedb_cache.StateDBCacheConfig{
|
|
|
|
CacheSizeInMB: hc.TiKV.StateDBCacheSizeInMB,
|
|
|
|
CachePersistencePath: hc.TiKV.StateDBCachePersistencePath,
|
|
|
|
RedisServerAddr: hc.TiKV.StateDBRedisServerAddr,
|
|
|
|
RedisLRUTimeInDay: hc.TiKV.StateDBRedisLRUTimeInDay,
|
|
|
|
DebugHitRate: hc.TiKV.Debug,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
tikv_manage.SetDefaultTiKVFactory(factory)
|
|
|
|
return factory
|
|
|
|
}
|
|
|
|
|
|
|
|
func processNodeType(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType) {
|
|
|
|
switch hc.General.NodeType {
|
|
|
|
case nodeTypeExplorer:
|
|
|
|
nodeconfig.SetDefaultRole(nodeconfig.ExplorerNode)
|
|
|
|
nodeConfig.SetRole(nodeconfig.ExplorerNode)
|
|
|
|
|
|
|
|
case nodeTypeValidator:
|
|
|
|
nodeconfig.SetDefaultRole(nodeconfig.Validator)
|
|
|
|
nodeConfig.SetRole(nodeconfig.Validator)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func isBackup(hc harmonyconfig.HarmonyConfig) (isBackup bool) {
|
|
|
|
switch hc.General.NodeType {
|
|
|
|
case nodeTypeExplorer:
|
|
|
|
|
|
|
|
case nodeTypeValidator:
|
|
|
|
return hc.General.IsBackup
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupPprofService(node *node.Node, hc harmonyconfig.HarmonyConfig) {
|
|
|
|
pprofConfig := pprof.Config{
|
|
|
|
Enabled: hc.Pprof.Enabled,
|
|
|
|
ListenAddr: hc.Pprof.ListenAddr,
|
|
|
|
Folder: hc.Pprof.Folder,
|
|
|
|
ProfileNames: hc.Pprof.ProfileNames,
|
|
|
|
ProfileIntervals: hc.Pprof.ProfileIntervals,
|
|
|
|
ProfileDebugValues: hc.Pprof.ProfileDebugValues,
|
|
|
|
}
|
|
|
|
s := pprof.NewService(pprofConfig)
|
|
|
|
node.RegisterService(service.Pprof, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupPrometheusService(node *node.Node, hc harmonyconfig.HarmonyConfig, sid uint32) {
|
|
|
|
prometheusConfig := prometheus.Config{
|
|
|
|
Enabled: hc.Prometheus.Enabled,
|
|
|
|
IP: hc.Prometheus.IP,
|
|
|
|
Port: hc.Prometheus.Port,
|
|
|
|
EnablePush: hc.Prometheus.EnablePush,
|
|
|
|
Gateway: hc.Prometheus.Gateway,
|
|
|
|
Network: hc.Network.NetworkType,
|
|
|
|
Legacy: hc.General.NoStaking,
|
|
|
|
NodeType: hc.General.NodeType,
|
|
|
|
Shard: sid,
|
|
|
|
Instance: myHost.GetID().Pretty(),
|
|
|
|
}
|
|
|
|
|
|
|
|
if hc.General.RunElasticMode {
|
|
|
|
prometheusConfig.TikvRole = hc.TiKV.Role
|
|
|
|
}
|
|
|
|
|
|
|
|
p := prometheus.NewService(prometheusConfig)
|
|
|
|
node.RegisterService(service.Prometheus, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.HarmonyConfig) {
|
|
|
|
blockchains := []core.BlockChain{node.Blockchain()}
|
|
|
|
if node.Blockchain().ShardID() != shard.BeaconChainShardID {
|
|
|
|
blockchains = append(blockchains, node.EpochChain())
|
|
|
|
}
|
|
|
|
|
|
|
|
dConfig := downloader.Config{
|
|
|
|
ServerOnly: !hc.Sync.Downloader,
|
|
|
|
Network: nodeconfig.NetworkType(hc.Network.NetworkType),
|
|
|
|
Concurrency: hc.Sync.Concurrency,
|
|
|
|
MinStreams: hc.Sync.MinPeers,
|
|
|
|
InitStreams: hc.Sync.InitStreams,
|
|
|
|
SmSoftLowCap: hc.Sync.DiscSoftLowCap,
|
|
|
|
SmHardLowCap: hc.Sync.DiscHardLowCap,
|
|
|
|
SmHiCap: hc.Sync.DiscHighCap,
|
|
|
|
SmDiscBatch: hc.Sync.DiscBatch,
|
|
|
|
}
|
|
|
|
// If we are running side chain, we will need to do some extra works for beacon
|
|
|
|
// sync.
|
|
|
|
if !node.IsRunningBeaconChain() {
|
|
|
|
dConfig.BHConfig = &downloader.BeaconHelperConfig{
|
|
|
|
BlockC: node.BeaconBlockChannel,
|
|
|
|
InsertHook: node.BeaconSyncHook,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s := synchronize.NewService(host, blockchains, dConfig)
|
|
|
|
|
|
|
|
node.RegisterService(service.Synchronize, s)
|
|
|
|
|
|
|
|
d := s.Downloaders.GetShardDownloader(node.Blockchain().ShardID())
|
|
|
|
if hc.Sync.Downloader && hc.General.NodeType != nodeTypeExplorer {
|
|
|
|
node.Consensus.SetDownloader(d) // Set downloader when stream client is active
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.HarmonyConfig) {
|
|
|
|
blockchains := []core.BlockChain{node.Blockchain()}
|
|
|
|
if node.Blockchain().ShardID() != shard.BeaconChainShardID {
|
|
|
|
blockchains = append(blockchains, node.EpochChain())
|
|
|
|
}
|
|
|
|
|
|
|
|
sConfig := stagedstreamsync.Config{
|
|
|
|
ServerOnly: !hc.Sync.Downloader,
|
|
|
|
SyncMode: stagedstreamsync.SyncMode(hc.Sync.SyncMode),
|
|
|
|
Network: nodeconfig.NetworkType(hc.Network.NetworkType),
|
|
|
|
Concurrency: hc.Sync.Concurrency,
|
|
|
|
MinStreams: hc.Sync.MinPeers,
|
|
|
|
InitStreams: hc.Sync.InitStreams,
|
|
|
|
MaxAdvertiseWaitTime: hc.Sync.MaxAdvertiseWaitTime,
|
|
|
|
SmSoftLowCap: hc.Sync.DiscSoftLowCap,
|
|
|
|
SmHardLowCap: hc.Sync.DiscHardLowCap,
|
|
|
|
SmHiCap: hc.Sync.DiscHighCap,
|
|
|
|
SmDiscBatch: hc.Sync.DiscBatch,
|
|
|
|
UseMemDB: hc.Sync.StagedSyncCfg.UseMemDB,
|
|
|
|
LogProgress: hc.Sync.StagedSyncCfg.LogProgress,
|
|
|
|
DebugMode: true, // hc.Sync.StagedSyncCfg.DebugMode,
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are running side chain, we will need to do some extra works for beacon
|
|
|
|
// sync.
|
|
|
|
if !node.IsRunningBeaconChain() {
|
|
|
|
sConfig.BHConfig = &stagedstreamsync.BeaconHelperConfig{
|
|
|
|
BlockC: node.BeaconBlockChannel,
|
|
|
|
InsertHook: node.BeaconSyncHook,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//Setup stream sync service
|
|
|
|
s := stagedstreamsync.NewService(host, blockchains, node.Consensus, sConfig, hc.General.DataDir)
|
|
|
|
|
|
|
|
node.RegisterService(service.StagedStreamSync, s)
|
|
|
|
|
|
|
|
d := s.Downloaders.GetShardDownloader(node.Blockchain().ShardID())
|
|
|
|
if hc.Sync.Downloader && hc.General.NodeType != nodeTypeExplorer {
|
|
|
|
node.Consensus.SetDownloader(d) // Set downloader when stream client is active
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupBlacklist(hc harmonyconfig.HarmonyConfig) (map[ethCommon.Address]struct{}, error) {
|
|
|
|
rosetta_common.InitRosettaFile(hc.TxPool.RosettaFixFile)
|
|
|
|
|
|
|
|
utils.Logger().Debug().Msgf("Using blacklist file at `%s`", hc.TxPool.BlacklistFile)
|
|
|
|
dat, err := os.ReadFile(hc.TxPool.BlacklistFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addrMap := make(map[ethCommon.Address]struct{})
|
|
|
|
for _, line := range strings.Split(string(dat), "\n") {
|
|
|
|
if len(line) != 0 { // blacklist file may have trailing empty string line
|
|
|
|
b32 := strings.TrimSpace(strings.Split(string(line), "#")[0])
|
|
|
|
addr, err := common.ParseAddr(b32)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addrMap[addr] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return addrMap, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseAllowedTxs(data []byte) (map[ethCommon.Address]core.AllowedTxData, error) {
|
|
|
|
allowedTxs := make(map[ethCommon.Address]core.AllowedTxData)
|
|
|
|
for _, line := range strings.Split(string(data), "\n") {
|
|
|
|
line = strings.TrimSpace(line)
|
|
|
|
if len(line) != 0 { // AllowedTxs file may have trailing empty string line
|
|
|
|
substrings := strings.Split(string(line), "->")
|
|
|
|
fromStr := strings.TrimSpace(substrings[0])
|
|
|
|
txSubstrings := strings.Split(substrings[1], ":")
|
|
|
|
toStr := strings.TrimSpace(txSubstrings[0])
|
|
|
|
dataStr := strings.TrimSpace(txSubstrings[1])
|
|
|
|
from, err := common.ParseAddr(fromStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
to, err := common.ParseAddr(toStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
data, err := hexutil.Decode(dataStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
allowedTxs[from] = core.AllowedTxData{
|
|
|
|
To: to,
|
|
|
|
Data: data,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return allowedTxs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupAllowedTxs(hc harmonyconfig.HarmonyConfig) (map[ethCommon.Address]core.AllowedTxData, error) {
|
|
|
|
utils.Logger().Debug().Msgf("Using AllowedTxs file at `%s`", hc.TxPool.AllowedTxsFile)
|
|
|
|
data, err := os.ReadFile(hc.TxPool.AllowedTxsFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return parseAllowedTxs(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupLocalAccounts(hc harmonyconfig.HarmonyConfig, blacklist map[ethCommon.Address]struct{}) ([]ethCommon.Address, error) {
|
|
|
|
file := hc.TxPool.LocalAccountsFile
|
|
|
|
// check if file exist
|
|
|
|
var fileData string
|
|
|
|
if _, err := os.Stat(file); err == nil {
|
|
|
|
b, err := os.ReadFile(file)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fileData = string(b)
|
|
|
|
} else if errors.Is(err, os.ErrNotExist) {
|
|
|
|
// file path does not exist
|
|
|
|
return []ethCommon.Address{}, nil
|
|
|
|
} else {
|
|
|
|
// some other errors happened
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
localAccounts := make(map[ethCommon.Address]struct{})
|
|
|
|
lines := strings.Split(fileData, "\n")
|
|
|
|
for _, line := range lines {
|
|
|
|
if len(line) == 0 { // the file may have trailing empty string line
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addrPart := strings.TrimSpace(strings.Split(string(line), "#")[0])
|
|
|
|
if len(addrPart) == 0 { // if the line is commented by #
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addr, err := common.ParseAddr(addrPart)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// skip the blacklisted addresses
|
|
|
|
if _, exists := blacklist[addr]; exists {
|
|
|
|
utils.Logger().Warn().Msgf("local account with address %s is blacklisted", addr.String())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
localAccounts[addr] = struct{}{}
|
|
|
|
}
|
|
|
|
uniqueAddresses := make([]ethCommon.Address, 0, len(localAccounts))
|
|
|
|
for addr := range localAccounts {
|
|
|
|
uniqueAddresses = append(uniqueAddresses, addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return uniqueAddresses, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func listenOSSigAndShutDown(node *node.Node) {
|
|
|
|
// Prepare for graceful shutdown from os signals
|
|
|
|
osSignal := make(chan os.Signal, 1)
|
|
|
|
signal.Notify(osSignal, syscall.SIGINT, syscall.SIGTERM)
|
|
|
|
sig := <-osSignal
|
|
|
|
utils.Logger().Warn().Str("signal", sig.String()).Msg("Gracefully shutting down...")
|
|
|
|
const msg = "Got %s signal. Gracefully shutting down...\n"
|
|
|
|
fmt.Fprintf(os.Stderr, msg, sig)
|
|
|
|
|
|
|
|
go node.ShutDown()
|
|
|
|
|
|
|
|
for i := 10; i > 0; i-- {
|
|
|
|
<-osSignal
|
|
|
|
if i > 1 {
|
|
|
|
fmt.Printf("Already shutting down, interrupt more to force quit: (times=%v)\n", i-1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Println("Forced QUIT.")
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|