|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"math/big"
|
|
|
|
"math/rand"
|
|
|
|
"net/http"
|
|
|
|
_ "net/http/pprof"
|
|
|
|
"os"
|
|
|
|
"os/signal"
|
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
ethCommon "github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/harmony-one/bls/ffi/go/bls"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/spf13/cobra"
|
|
|
|
|
|
|
|
"github.com/harmony-one/harmony/api/service"
|
|
|
|
"github.com/harmony-one/harmony/api/service/prometheus"
|
|
|
|
"github.com/harmony-one/harmony/api/service/synchronize"
|
|
|
|
"github.com/harmony-one/harmony/common/fdlimit"
|
|
|
|
"github.com/harmony-one/harmony/common/ntp"
|
|
|
|
"github.com/harmony-one/harmony/consensus"
|
|
|
|
"github.com/harmony-one/harmony/consensus/quorum"
|
|
|
|
"github.com/harmony-one/harmony/core"
|
|
|
|
"github.com/harmony-one/harmony/hmy/downloader"
|
|
|
|
"github.com/harmony-one/harmony/internal/cli"
|
|
|
|
"github.com/harmony-one/harmony/internal/common"
|
|
|
|
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
|
|
|
|
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
|
|
|
|
"github.com/harmony-one/harmony/internal/genesis"
|
|
|
|
"github.com/harmony-one/harmony/internal/params"
|
|
|
|
"github.com/harmony-one/harmony/internal/shardchain"
|
|
|
|
"github.com/harmony-one/harmony/internal/utils"
|
|
|
|
"github.com/harmony-one/harmony/multibls"
|
|
|
|
"github.com/harmony-one/harmony/node"
|
|
|
|
"github.com/harmony-one/harmony/numeric"
|
|
|
|
"github.com/harmony-one/harmony/p2p"
|
|
|
|
"github.com/harmony-one/harmony/shard"
|
|
|
|
"github.com/harmony-one/harmony/webhooks"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Host
|
|
|
|
var (
|
|
|
|
myHost p2p.Host
|
|
|
|
initialAccounts = []*genesis.DeployAccount{}
|
|
|
|
)
|
|
|
|
|
|
|
|
var rootCmd = &cobra.Command{
|
|
|
|
Use: "harmony",
|
|
|
|
Short: "harmony is the Harmony node binary file",
|
|
|
|
Long: `harmony is the Harmony node binary file
|
|
|
|
|
|
|
|
Examples usage:
|
|
|
|
|
|
|
|
# start a validator node with default bls folder (default bls key files in ./.hmy/blskeys)
|
|
|
|
./harmony
|
|
|
|
|
|
|
|
# start a validator node with customized bls key folder
|
|
|
|
./harmony --bls.dir [bls_folder]
|
|
|
|
|
|
|
|
# start a validator node with open RPC endpoints and customized ports
|
|
|
|
./harmony --http.ip=0.0.0.0 --http.port=[http_port] --ws.ip=0.0.0.0 --ws.port=[ws_port]
|
|
|
|
|
|
|
|
# start an explorer node
|
|
|
|
./harmony --run=explorer --run.shard=[shard_id]
|
|
|
|
|
|
|
|
# start a harmony internal node on testnet
|
|
|
|
./harmony --run.legacy --network testnet
|
|
|
|
`,
|
|
|
|
Run: runHarmonyNode,
|
|
|
|
}
|
|
|
|
|
|
|
|
var configFlag = cli.StringFlag{
|
|
|
|
Name: "config",
|
|
|
|
Usage: "load node config from the config toml file.",
|
|
|
|
Shorthand: "c",
|
|
|
|
DefValue: "",
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
cli.SetParseErrorHandle(func(err error) {
|
|
|
|
os.Exit(128) // 128 - invalid command line arguments
|
|
|
|
})
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
configCmd.AddCommand(dumpConfigCmd)
|
|
|
|
configCmd.AddCommand(updateConfigCmd)
|
|
|
|
rootCmd.AddCommand(configCmd)
|
|
|
|
rootCmd.AddCommand(versionCmd)
|
|
|
|
|
|
|
|
if err := registerRootCmdFlags(); err != nil {
|
|
|
|
os.Exit(2)
|
|
|
|
}
|
|
|
|
if err := registerDumpConfigFlags(); err != nil {
|
|
|
|
os.Exit(2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
rootCmd.Execute()
|
|
|
|
}
|
|
|
|
|
|
|
|
func registerRootCmdFlags() error {
|
|
|
|
flags := getRootFlags()
|
|
|
|
|
|
|
|
return cli.RegisterFlags(rootCmd, flags)
|
|
|
|
}
|
|
|
|
|
|
|
|
func runHarmonyNode(cmd *cobra.Command, args []string) {
|
|
|
|
if cli.GetBoolFlagValue(cmd, versionFlag) {
|
|
|
|
printVersion()
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := prepareRootCmd(cmd); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, err)
|
|
|
|
os.Exit(128)
|
|
|
|
}
|
|
|
|
cfg, err := getHarmonyConfig(cmd)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, err)
|
|
|
|
fmt.Println()
|
|
|
|
cmd.Help()
|
|
|
|
os.Exit(128)
|
|
|
|
}
|
|
|
|
|
|
|
|
setupNodeLog(cfg)
|
|
|
|
setupPprof(cfg)
|
|
|
|
setupNodeAndRun(cfg)
|
|
|
|
}
|
|
|
|
|
|
|
|
func prepareRootCmd(cmd *cobra.Command) error {
|
|
|
|
// HACK Force usage of go implementation rather than the C based one. Do the right way, see the
|
|
|
|
// notes one line 66,67 of https://golang.org/src/net/net.go that say can make the decision at
|
|
|
|
// build time.
|
|
|
|
os.Setenv("GODEBUG", "netdns=go")
|
|
|
|
// Don't set higher than num of CPU. It will make go scheduler slower.
|
|
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
|
|
// Raise fd limits
|
|
|
|
return raiseFdLimits()
|
|
|
|
}
|
|
|
|
|
|
|
|
func raiseFdLimits() error {
|
|
|
|
limit, err := fdlimit.Maximum()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Failed to retrieve file descriptor allowance")
|
|
|
|
}
|
|
|
|
_, err = fdlimit.Raise(uint64(limit))
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Failed to raise file descriptor allowance")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getHarmonyConfig(cmd *cobra.Command) (harmonyConfig, error) {
|
|
|
|
var (
|
|
|
|
config harmonyConfig
|
|
|
|
err error
|
|
|
|
migratedFrom string
|
|
|
|
configFile string
|
|
|
|
isUsingDefault bool
|
|
|
|
)
|
|
|
|
if cli.IsFlagChanged(cmd, configFlag) {
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
configFile = cli.GetStringFlagValue(cmd, configFlag)
|
|
|
|
config, migratedFrom, err = loadHarmonyConfig(configFile)
|
|
|
|
} else {
|
|
|
|
nt := getNetworkType(cmd)
|
|
|
|
config = getDefaultHmyConfigCopy(nt)
|
|
|
|
isUsingDefault = true
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return harmonyConfig{}, err
|
|
|
|
}
|
|
|
|
if migratedFrom != defaultConfig.Version && !isUsingDefault {
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
fmt.Printf("Old config version detected %s\n",
|
|
|
|
migratedFrom)
|
|
|
|
stat, _ := os.Stdin.Stat()
|
|
|
|
// Ask to update if only using terminal
|
|
|
|
if stat.Mode()&os.ModeCharDevice != 0 {
|
|
|
|
if promptConfigUpdate() {
|
|
|
|
err := updateConfigFile(configFile)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Could not update config - %s", err.Error())
|
|
|
|
fmt.Println("Update config manually with `./harmony config update [config_file]`")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
fmt.Println("Update saved config with `./harmony config update [config_file]`")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
applyRootFlags(cmd, &config)
|
|
|
|
|
|
|
|
if err := validateHarmonyConfig(config); err != nil {
|
|
|
|
return harmonyConfig{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return config, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func applyRootFlags(cmd *cobra.Command, config *harmonyConfig) {
|
|
|
|
// Misc flags shall be applied first since legacy ip / port is overwritten
|
|
|
|
// by new ip / port flags
|
|
|
|
applyLegacyMiscFlags(cmd, config)
|
|
|
|
applyGeneralFlags(cmd, config)
|
|
|
|
applyNetworkFlags(cmd, config)
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
applyDNSSyncFlags(cmd, config)
|
|
|
|
applyP2PFlags(cmd, config)
|
|
|
|
applyHTTPFlags(cmd, config)
|
|
|
|
applyWSFlags(cmd, config)
|
|
|
|
applyRPCOptFlags(cmd, config)
|
|
|
|
applyBLSFlags(cmd, config)
|
|
|
|
applyConsensusFlags(cmd, config)
|
|
|
|
applyTxPoolFlags(cmd, config)
|
|
|
|
applyPprofFlags(cmd, config)
|
|
|
|
applyLogFlags(cmd, config)
|
|
|
|
applySysFlags(cmd, config)
|
|
|
|
applyDevnetFlags(cmd, config)
|
|
|
|
applyRevertFlags(cmd, config)
|
|
|
|
applyPrometheusFlags(cmd, config)
|
|
|
|
applySyncFlags(cmd, config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupNodeLog(config harmonyConfig) {
|
|
|
|
logPath := filepath.Join(config.Log.Folder, config.Log.FileName)
|
|
|
|
rotateSize := config.Log.RotateSize
|
|
|
|
verbosity := config.Log.Verbosity
|
|
|
|
|
|
|
|
utils.AddLogFile(logPath, rotateSize)
|
|
|
|
utils.SetLogVerbosity(log.Lvl(verbosity))
|
|
|
|
if config.Log.Context != nil {
|
|
|
|
ip := config.Log.Context.IP
|
|
|
|
port := config.Log.Context.Port
|
|
|
|
utils.SetLogContext(ip, strconv.Itoa(port))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupPprof(config harmonyConfig) {
|
|
|
|
enabled := config.Pprof.Enabled
|
|
|
|
addr := config.Pprof.ListenAddr
|
|
|
|
|
|
|
|
if enabled {
|
|
|
|
go func() {
|
|
|
|
http.ListenAndServe(addr, nil)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupNodeAndRun(hc harmonyConfig) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
nodeconfigSetShardSchedule(hc)
|
|
|
|
nodeconfig.SetShardingSchedule(shard.Schedule)
|
|
|
|
nodeconfig.SetVersion(getHarmonyVersion())
|
|
|
|
|
|
|
|
if hc.General.NodeType == "validator" {
|
|
|
|
var err error
|
|
|
|
if hc.General.NoStaking {
|
|
|
|
err = setupLegacyNodeAccount(hc)
|
|
|
|
} else {
|
|
|
|
err = setupStakingNodeAccount(hc)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "cannot set up node account: %s\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if hc.General.NodeType == "validator" {
|
|
|
|
fmt.Printf("%s mode; node key %s -> shard %d\n",
|
|
|
|
map[bool]string{false: "Legacy", true: "Staking"}[!hc.General.NoStaking],
|
|
|
|
nodeconfig.GetDefaultConfig().ConsensusPriKey.GetPublicKeys().SerializeToHexStr(),
|
|
|
|
initialAccounts[0].ShardID)
|
|
|
|
}
|
|
|
|
if hc.General.NodeType != "validator" && hc.General.ShardID >= 0 {
|
|
|
|
for _, initialAccount := range initialAccounts {
|
|
|
|
utils.Logger().Info().
|
|
|
|
Uint32("original", initialAccount.ShardID).
|
|
|
|
Int("override", hc.General.ShardID).
|
|
|
|
Msg("ShardID Override")
|
|
|
|
initialAccount.ShardID = uint32(hc.General.ShardID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeConfig, err := createGlobalConfig(hc)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "ERROR cannot configure node: %s\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update ethereum compatible chain ids
|
|
|
|
params.UpdateEthChainIDByShard(nodeConfig.ShardID)
|
|
|
|
|
|
|
|
currentNode := setupConsensusAndNode(hc, nodeConfig)
|
|
|
|
nodeconfig.GetDefaultConfig().ShardID = nodeConfig.ShardID
|
|
|
|
nodeconfig.GetDefaultConfig().IsOffline = nodeConfig.IsOffline
|
|
|
|
nodeconfig.GetDefaultConfig().Downloader = nodeConfig.Downloader
|
|
|
|
|
|
|
|
// Check NTP configuration
|
|
|
|
accurate, err := ntp.CheckLocalTimeAccurate(nodeConfig.NtpServer)
|
|
|
|
if !accurate {
|
|
|
|
if os.IsTimeout(err) {
|
|
|
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
|
|
fmt.Fprintf(os.Stderr, "NTP query timed out. Continuing.\n")
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
|
|
fmt.Fprintf(os.Stderr, "Error: local timeclock is not accurate. Please config NTP properly.\n")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
utils.Logger().Warn().Err(err).Msg("Check Local Time Accuracy Error")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse RPC config
|
|
|
|
nodeConfig.RPCServer = nodeconfig.RPCServerConfig{
|
|
|
|
HTTPEnabled: hc.HTTP.Enabled,
|
|
|
|
HTTPIp: hc.HTTP.IP,
|
|
|
|
HTTPPort: hc.HTTP.Port,
|
|
|
|
WSEnabled: hc.WS.Enabled,
|
|
|
|
WSIp: hc.WS.IP,
|
|
|
|
WSPort: hc.WS.Port,
|
|
|
|
DebugEnabled: hc.RPCOpt.DebugEnabled,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse rosetta config
|
|
|
|
nodeConfig.RosettaServer = nodeconfig.RosettaServerConfig{
|
|
|
|
HTTPEnabled: hc.HTTP.RosettaEnabled,
|
|
|
|
HTTPIp: hc.HTTP.IP,
|
|
|
|
HTTPPort: hc.HTTP.RosettaPort,
|
|
|
|
}
|
|
|
|
|
|
|
|
if hc.Revert != nil && hc.Revert.RevertBefore != 0 && hc.Revert.RevertTo != 0 {
|
|
|
|
chain := currentNode.Blockchain()
|
|
|
|
if hc.Revert.RevertBeacon {
|
|
|
|
chain = currentNode.Beaconchain()
|
|
|
|
}
|
|
|
|
curNum := chain.CurrentBlock().NumberU64()
|
|
|
|
if curNum < uint64(hc.Revert.RevertBefore) && curNum >= uint64(hc.Revert.RevertTo) {
|
|
|
|
// Remove invalid blocks
|
|
|
|
for chain.CurrentBlock().NumberU64() >= uint64(hc.Revert.RevertTo) {
|
|
|
|
curBlock := chain.CurrentBlock()
|
|
|
|
rollbacks := []ethCommon.Hash{curBlock.Hash()}
|
|
|
|
if err := chain.Rollback(rollbacks); err != nil {
|
|
|
|
fmt.Printf("Revert failed: %v\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
lastSig := curBlock.Header().LastCommitSignature()
|
|
|
|
sigAndBitMap := append(lastSig[:], curBlock.Header().LastCommitBitmap()...)
|
|
|
|
chain.WriteCommitSig(curBlock.NumberU64()-1, sigAndBitMap)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
startMsg := "==== New Harmony Node ===="
|
|
|
|
if hc.General.NodeType == nodeTypeExplorer {
|
|
|
|
startMsg = "==== New Explorer Node ===="
|
|
|
|
}
|
|
|
|
|
|
|
|
utils.Logger().Info().
|
|
|
|
Str("BLSPubKey", nodeConfig.ConsensusPriKey.GetPublicKeys().SerializeToHexStr()).
|
|
|
|
Uint32("ShardID", nodeConfig.ShardID).
|
|
|
|
Str("ShardGroupID", nodeConfig.GetShardGroupID().String()).
|
|
|
|
Str("BeaconGroupID", nodeConfig.GetBeaconGroupID().String()).
|
|
|
|
Str("ClientGroupID", nodeConfig.GetClientGroupID().String()).
|
|
|
|
Str("Role", currentNode.NodeConfig.Role().String()).
|
|
|
|
Str("Version", getHarmonyVersion()).
|
|
|
|
Str("multiaddress",
|
|
|
|
fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%s", hc.P2P.IP, hc.P2P.Port, myHost.GetID().Pretty()),
|
|
|
|
).
|
|
|
|
Msg(startMsg)
|
|
|
|
|
|
|
|
nodeconfig.SetPeerID(myHost.GetID())
|
|
|
|
|
|
|
|
// Setup services
|
|
|
|
setupSyncService(currentNode, myHost, hc)
|
|
|
|
|
|
|
|
if currentNode.NodeConfig.Role() == nodeconfig.Validator {
|
|
|
|
currentNode.RegisterValidatorServices()
|
|
|
|
} else if currentNode.NodeConfig.Role() == nodeconfig.ExplorerNode {
|
|
|
|
currentNode.RegisterExplorerServices()
|
|
|
|
}
|
|
|
|
if hc.Prometheus.Enabled {
|
|
|
|
setupPrometheusService(currentNode, hc, nodeConfig.ShardID)
|
|
|
|
}
|
|
|
|
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
if hc.DNSSync.Server && !hc.General.IsOffline {
|
|
|
|
utils.Logger().Info().Msg("support gRPC sync server")
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
currentNode.SupportGRPCSyncServer(hc.DNSSync.ServerPort)
|
|
|
|
}
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
if hc.DNSSync.Client && !hc.General.IsOffline {
|
|
|
|
utils.Logger().Info().Msg("go with gRPC sync client")
|
|
|
|
currentNode.StartGRPCSyncClient()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.StartServices(); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, err.Error())
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.StartRPC(); err != nil {
|
|
|
|
utils.Logger().Warn().
|
|
|
|
Err(err).
|
|
|
|
Msg("StartRPC failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.StartRosetta(); err != nil {
|
|
|
|
utils.Logger().Warn().
|
|
|
|
Err(err).
|
|
|
|
Msg("Start Rosetta failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
go listenOSSigAndShutDown(currentNode)
|
|
|
|
|
|
|
|
if !hc.General.IsOffline {
|
|
|
|
if err := myHost.Start(); err != nil {
|
|
|
|
utils.Logger().Fatal().
|
|
|
|
Err(err).
|
|
|
|
Msg("Start p2p host failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.BootstrapConsensus(); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, "could not bootstrap consensus", err.Error())
|
|
|
|
if !currentNode.NodeConfig.IsOffline {
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := currentNode.StartPubSub(); err != nil {
|
|
|
|
fmt.Fprint(os.Stderr, "could not begin network message handling for node", err.Error())
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {}
|
|
|
|
}
|
|
|
|
|
|
|
|
func nodeconfigSetShardSchedule(config harmonyConfig) {
|
|
|
|
switch config.Network.NetworkType {
|
|
|
|
case nodeconfig.Mainnet:
|
|
|
|
shard.Schedule = shardingconfig.MainnetSchedule
|
|
|
|
case nodeconfig.Testnet:
|
|
|
|
shard.Schedule = shardingconfig.TestnetSchedule
|
|
|
|
case nodeconfig.Pangaea:
|
|
|
|
shard.Schedule = shardingconfig.PangaeaSchedule
|
|
|
|
case nodeconfig.Localnet:
|
|
|
|
shard.Schedule = shardingconfig.LocalnetSchedule
|
|
|
|
case nodeconfig.Partner:
|
|
|
|
shard.Schedule = shardingconfig.PartnerSchedule
|
|
|
|
case nodeconfig.Stressnet:
|
|
|
|
shard.Schedule = shardingconfig.StressNetSchedule
|
|
|
|
case nodeconfig.Devnet:
|
|
|
|
var dnConfig devnetConfig
|
|
|
|
if config.Devnet != nil {
|
|
|
|
dnConfig = *config.Devnet
|
|
|
|
} else {
|
|
|
|
dnConfig = getDefaultDevnetConfigCopy()
|
|
|
|
}
|
|
|
|
|
|
|
|
devnetConfig, err := shardingconfig.NewInstance(
|
|
|
|
uint32(dnConfig.NumShards), dnConfig.ShardSize, dnConfig.HmyNodeSize, numeric.OneDec(), genesis.HarmonyAccounts, genesis.FoundationalNodeAccounts, nil, shardingconfig.VLBPE)
|
|
|
|
if err != nil {
|
|
|
|
_, _ = fmt.Fprintf(os.Stderr, "ERROR invalid devnet sharding config: %s",
|
|
|
|
err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
shard.Schedule = shardingconfig.NewFixedSchedule(devnetConfig)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func findAccountsByPubKeys(config shardingconfig.Instance, pubKeys multibls.PublicKeys) {
|
|
|
|
for _, key := range pubKeys {
|
|
|
|
keyStr := key.Bytes.Hex()
|
|
|
|
_, account := config.FindAccount(keyStr)
|
|
|
|
if account != nil {
|
|
|
|
initialAccounts = append(initialAccounts, account)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupLegacyNodeAccount(hc harmonyConfig) error {
|
|
|
|
genesisShardingConfig := shard.Schedule.InstanceForEpoch(big.NewInt(core.GenesisEpoch))
|
|
|
|
multiBLSPubKey := setupConsensusKeys(hc, nodeconfig.GetDefaultConfig())
|
|
|
|
|
|
|
|
reshardingEpoch := genesisShardingConfig.ReshardingEpoch()
|
|
|
|
if len(reshardingEpoch) > 0 {
|
|
|
|
for _, epoch := range reshardingEpoch {
|
|
|
|
config := shard.Schedule.InstanceForEpoch(epoch)
|
|
|
|
findAccountsByPubKeys(config, multiBLSPubKey)
|
|
|
|
if len(initialAccounts) != 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
findAccountsByPubKeys(genesisShardingConfig, multiBLSPubKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(initialAccounts) == 0 {
|
|
|
|
fmt.Fprintf(
|
|
|
|
os.Stderr,
|
|
|
|
"ERROR cannot find your BLS key in the genesis/FN tables: %s\n",
|
|
|
|
multiBLSPubKey.SerializeToHexStr(),
|
|
|
|
)
|
|
|
|
os.Exit(100)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, account := range initialAccounts {
|
|
|
|
fmt.Printf("My Genesis Account: %v\n", *account)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupStakingNodeAccount(hc harmonyConfig) error {
|
|
|
|
pubKeys := setupConsensusKeys(hc, nodeconfig.GetDefaultConfig())
|
|
|
|
shardID, err := nodeconfig.GetDefaultConfig().ShardIDFromConsensusKey()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "cannot determine shard to join")
|
|
|
|
}
|
|
|
|
if err := nodeconfig.GetDefaultConfig().ValidateConsensusKeysForSameShard(
|
|
|
|
pubKeys, shardID,
|
|
|
|
); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, blsKey := range pubKeys {
|
|
|
|
initialAccount := &genesis.DeployAccount{}
|
|
|
|
initialAccount.ShardID = shardID
|
|
|
|
initialAccount.BLSPublicKey = blsKey.Bytes.Hex()
|
|
|
|
initialAccount.Address = ""
|
|
|
|
initialAccounts = append(initialAccounts, initialAccount)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createGlobalConfig(hc harmonyConfig) (*nodeconfig.ConfigType, error) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if len(initialAccounts) == 0 {
|
|
|
|
initialAccounts = append(initialAccounts, &genesis.DeployAccount{ShardID: uint32(hc.General.ShardID)})
|
|
|
|
}
|
|
|
|
nodeConfig := nodeconfig.GetShardConfig(initialAccounts[0].ShardID)
|
|
|
|
if hc.General.NodeType == nodeTypeValidator {
|
|
|
|
// Set up consensus keys.
|
|
|
|
setupConsensusKeys(hc, nodeConfig)
|
|
|
|
} else {
|
|
|
|
// set dummy bls key for consensus object
|
|
|
|
nodeConfig.ConsensusPriKey = multibls.GetPrivateKeys(&bls.SecretKey{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set network type
|
|
|
|
netType := nodeconfig.NetworkType(hc.Network.NetworkType)
|
|
|
|
nodeconfig.SetNetworkType(netType) // sets for both global and shard configs
|
|
|
|
nodeConfig.SetShardID(initialAccounts[0].ShardID) // sets shard ID
|
|
|
|
nodeConfig.SetArchival(hc.General.IsBeaconArchival, hc.General.IsArchival)
|
|
|
|
nodeConfig.IsOffline = hc.General.IsOffline
|
|
|
|
nodeConfig.Downloader = hc.Sync.Downloader
|
|
|
|
|
|
|
|
// P2P private key is used for secure message transfer between p2p nodes.
|
|
|
|
nodeConfig.P2PPriKey, _, err = utils.LoadKeyFromFile(hc.P2P.KeyFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "cannot load or create P2P key at %#v",
|
|
|
|
hc.P2P.KeyFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
selfPeer := p2p.Peer{
|
|
|
|
IP: hc.P2P.IP,
|
|
|
|
Port: strconv.Itoa(hc.P2P.Port),
|
|
|
|
ConsensusPubKey: nodeConfig.ConsensusPriKey[0].Pub.Object,
|
|
|
|
}
|
|
|
|
|
|
|
|
myHost, err = p2p.NewHost(p2p.HostConfig{
|
|
|
|
Self: &selfPeer,
|
|
|
|
BLSKey: nodeConfig.P2PPriKey,
|
|
|
|
BootNodes: hc.Network.BootNodes,
|
|
|
|
DataStoreFile: hc.P2P.DHTDataStore,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "cannot create P2P network host")
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeConfig.DBDir = hc.General.DataDir
|
|
|
|
|
|
|
|
if hc.Legacy != nil && hc.Legacy.WebHookConfig != nil && len(*hc.Legacy.WebHookConfig) != 0 {
|
|
|
|
p := *hc.Legacy.WebHookConfig
|
|
|
|
config, err := webhooks.NewWebHooksFromPath(p)
|
[double-sign] Provide proof of double sign in slash record sent to beaconchain (#2253)
* [double-sign] Commit changes in consensus needed for double-sign
* [double-sign] Leader captures when valdator double signs, broadcasts to beaconchain
* [slash] Add quick iteration tool for testing double-signing
* [slash] Add webhook example
* [slash] Add http server for hook to trigger double sign behavior
* [double-sign] Use bin/trigger-double-sign to cause a double-sign
* [double-sign] Full feedback loop working
* [slash] Thread through the slash records in the block proposal step
* [slash] Compute the slashing rate
* [double-sign] Generalize yaml malicious for many keys
* [double-sign][slash] Modify data structures, verify via webhook handler
* [slash][double-sign] Find one address of bls public key signer, seemingly settle on data structures
* [slash] Apply to state slashing for double signing
* [slash][double-sign] Checkpoint for working code that slashes on beaconchain
* [slash] Keep track of the total slash and total reporters reward
* [slash] Dump account state before and after the slash
* [slash] Satisfy Travis
* [slash][state] Apply slash to the snapshot at beginning of epoch, now need to capture also the new delegates
* [slash] Capture the unique new delegations since snapshot as well
* [slash] Filter undelegation by epoch of double sign
* [slash] Add TODO of correctness needed in slash needs on off-chain data
* [rpc] Fix closure issue on shardID
* [slash] Add delegator to double-sign testing script
* [slash] Expand crt-validator.sh with commenting printfs and make delegation
* [slash] Finish track payment of leftover slash debt after undelegation runs out
* [slash] Now be explicit about error wrt delegatorSlashApply
* [slash] Capture specific sanity check on slash paidoff
* [slash] Track slash from undelegation piecemeal
* [slash][delegation] Named slice types, .String()
* [slash] Do no RLP encode twice, once is enough
* [slash] Remove special case of validators own delegation
* [slash] Refactor approach to slash state application
* [slash] Begin expanding out Verify
* [slash] Slash on snapshot delegations, not current
* [slash] Fix Epoch Cmp
* [slash] Third iteration on slash logic
* [slash] Use full slash amount
* [slash] More log, whitespace
* [slash] Remove Println, add log
* [slash] Remove debug Println
* [slash] Add record in unit test
* [slash] Build Validator snapshot, current. Fill out slash record
* [slash] Need to get RLP dump of a header to use in test
* [slash] Factor out double sign test constants
* [slash] Factor out common for validator, stub out slash application, finish out deserialization setup
* [slash] Factor out data structure creation because of var lexical scoping
* [slash] Seem to have pipeline of unit test e2e executing
* [slash] Add expected snitch, slash amounts
* [slash] Checkpoint
* [slash] Unit test correctly checks case of validator own stake which could drop below 1 ONE in slashing
* [config] add double-sign testnet config (#1)
Signed-off-by: Leo Chen <leo@harmony.one>
* [slash] Commit for as is code & data of current dump.json
* [slash] Order of state operation not correct in test, hence bad results, thank you dlv
* [slash] Add snapshot state dump
* [slash] Pay off slash of validator own delegation correctly
* [slash] Pay off slash debt with special case for min-self
* [slash] Pass first scenario conclusively
* [slash] 2% slash passes unit test for own delegation and external
* [slash] Parameterize unit test to easily test .02 vs .80 slash
* [slash] Handle own delegation correctly at 80% slash
* [slash] Have 80% slash working with external delegator
* [slash] Remove debug code from slash
* [slash] Adjust Apply signature, test again for 2% slash
* [slash] Factor out scenario in testing so can test 2% and 80% at same time
* [slash] Correct balance deduction on plan delegation
* [slash] Mock out ChainReader for TestVerify
* [slash] Small surface area interface, now feedback loop for verify
* [slash] Remove development json
* [slash] trigger-double-sign consumes yaml
* [slash] Remove dead code
* [slash][test] Factor ValidatorWrapper into scenario
* [slash][test] Add example from local-testing dump - caution might be off
* [slash] Factor out mutation of slashDebt
* [slash][test] Factor out tests so can easily load test-case from bytes
* [slash] Fix payment mistake in validator own delegation wrt min-self-delgation respected
* [slash] Satisfy Travis
* [slash] Begin cleanup of PR
* [slash] Apply slash from header to Finalize via state processor
* [slash] Productionize code, Println => logs; adjust slash picked in newblock
* [slash] Need pointer for rlp.Decode
* [slash] ValidatorInformation use full wrapper
* Fix median stake
* [staking] Adjust MarshalJSON for Validator, Wrapper
* Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)
* Refactor offchain data; Add epoch to ValidatorSnapshot
* Make block onchain/offchain data commit atomically
* [slash][committee] Set .Active to false on double sign, do not consider banned or inactive for committee assignment
* [effective] VC eligible.go
* [consensus] Redundant field in printf
* [docker] import-ks for a dev account
* [slash] Create BLS key for dockerfile and crt-validator.sh
* [slash][docker] Easy deployment of double-sign testing
* [docker] Have slash work as single docker command
* [rpc] Fix median-stake RPC
* [slash] Update webhook with default docker BLS key
* [docker][slash] Fresh yaml copy for docker build, remove dev code in main.go
* [slash] Remove helper binary, commented out code, change to local config
* [params] Factor out test genesis value
* Add shard checking to Tx-Pool & correct blacklist (#2301)
* [core] Fix blacklist & add shardID check
* [staking + node + cmd] Fix blacklist & add shardID check
* [slash] Adjust to PR comments part 1
* [docker] Use different throw away funded account
* [docker] Create easier testing for delegation with private keys
* [docker] Update yaml
* [slash] Remove special case for slashing validator own delegation wrt min-self-delegate
* [docker] Install nano as well
* [slash] Early error if banned
* [quorum] Expose earning account in decider marshal json
* Revert "Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)"
This reverts commit 9ffbf682c075b49188923c65a0bbf39ac188be00.
* [slash] Add non-sanity check way to update validator
* [reward] Increase percision on percentage in schedule
* [slash] Adjust logs
* [committee] Check eligibility of validator before doing sanity check
* [slash] Update docker
* [slash] Move create validator script to test
* [slash] More log
* [param] Make things faster
* [slash][off-chain] Clear out slashes from pending in writeblockwithstate
* [cross-link] Log is not error, just info
* [blockchain] Not necessary to guard DeletePendingSlashingCandidates
* [slash][consensus] Use plain []byte for signature b/c bls.Sign has private impl fields, rlp does not encode that
* [slash][test] Use faucet as sender, assume user imported
* [slash] Test setup
* [slash] reserve error for real error in logs
* [slash][availability] Apply availability correct, bump signing count each block
* [slash][staking] Consider banned field in sanity check, pay snitch only half of what was actually slashed
* [slash] Pay as much as can
* [slash] use right nowAmt
* [slash] Take away from rewards as well
* [slash] iterate faster
* [slash] Remove dev based timing
* [slash] Add more log, sanity check incoming slash records, only count external for slash rate
* [availability][state] Adjust signature of ValidatorWrapper wrt state, filter out for staked validators, correct availaibility measure on running counters
* [availability] More log
* [slash] Simply pre slash erra slashing
* [slash] Remove development code
* [slash] Use height from recvMsg, todo on epoch
* [staking] Not necessary to touch LastEpochInCommittee in staking_verifier
* [slash] Undo ds in endpoint pattern config
* [slash] Add TODO and log when delegation becomes 0 b/c slash debt payment
* [slash] Abstract staked validators from shard.State into type, set slash rate based BLSKey count
Co-authored-by: Leo Chen <leo@harmony.one>
Co-authored-by: flicker-harmony <52401354+flicker-harmony@users.noreply.github.com>
Co-authored-by: Rongjian Lan <rongjian@harmony.one>
Co-authored-by: Daniel Van Der Maden <daniel@harmony.one>
5 years ago
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(
|
|
|
|
os.Stderr, "yaml path is bad: %s", p,
|
|
|
|
)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
nodeConfig.WebHooks.Hooks = config
|
[double-sign] Provide proof of double sign in slash record sent to beaconchain (#2253)
* [double-sign] Commit changes in consensus needed for double-sign
* [double-sign] Leader captures when valdator double signs, broadcasts to beaconchain
* [slash] Add quick iteration tool for testing double-signing
* [slash] Add webhook example
* [slash] Add http server for hook to trigger double sign behavior
* [double-sign] Use bin/trigger-double-sign to cause a double-sign
* [double-sign] Full feedback loop working
* [slash] Thread through the slash records in the block proposal step
* [slash] Compute the slashing rate
* [double-sign] Generalize yaml malicious for many keys
* [double-sign][slash] Modify data structures, verify via webhook handler
* [slash][double-sign] Find one address of bls public key signer, seemingly settle on data structures
* [slash] Apply to state slashing for double signing
* [slash][double-sign] Checkpoint for working code that slashes on beaconchain
* [slash] Keep track of the total slash and total reporters reward
* [slash] Dump account state before and after the slash
* [slash] Satisfy Travis
* [slash][state] Apply slash to the snapshot at beginning of epoch, now need to capture also the new delegates
* [slash] Capture the unique new delegations since snapshot as well
* [slash] Filter undelegation by epoch of double sign
* [slash] Add TODO of correctness needed in slash needs on off-chain data
* [rpc] Fix closure issue on shardID
* [slash] Add delegator to double-sign testing script
* [slash] Expand crt-validator.sh with commenting printfs and make delegation
* [slash] Finish track payment of leftover slash debt after undelegation runs out
* [slash] Now be explicit about error wrt delegatorSlashApply
* [slash] Capture specific sanity check on slash paidoff
* [slash] Track slash from undelegation piecemeal
* [slash][delegation] Named slice types, .String()
* [slash] Do no RLP encode twice, once is enough
* [slash] Remove special case of validators own delegation
* [slash] Refactor approach to slash state application
* [slash] Begin expanding out Verify
* [slash] Slash on snapshot delegations, not current
* [slash] Fix Epoch Cmp
* [slash] Third iteration on slash logic
* [slash] Use full slash amount
* [slash] More log, whitespace
* [slash] Remove Println, add log
* [slash] Remove debug Println
* [slash] Add record in unit test
* [slash] Build Validator snapshot, current. Fill out slash record
* [slash] Need to get RLP dump of a header to use in test
* [slash] Factor out double sign test constants
* [slash] Factor out common for validator, stub out slash application, finish out deserialization setup
* [slash] Factor out data structure creation because of var lexical scoping
* [slash] Seem to have pipeline of unit test e2e executing
* [slash] Add expected snitch, slash amounts
* [slash] Checkpoint
* [slash] Unit test correctly checks case of validator own stake which could drop below 1 ONE in slashing
* [config] add double-sign testnet config (#1)
Signed-off-by: Leo Chen <leo@harmony.one>
* [slash] Commit for as is code & data of current dump.json
* [slash] Order of state operation not correct in test, hence bad results, thank you dlv
* [slash] Add snapshot state dump
* [slash] Pay off slash of validator own delegation correctly
* [slash] Pay off slash debt with special case for min-self
* [slash] Pass first scenario conclusively
* [slash] 2% slash passes unit test for own delegation and external
* [slash] Parameterize unit test to easily test .02 vs .80 slash
* [slash] Handle own delegation correctly at 80% slash
* [slash] Have 80% slash working with external delegator
* [slash] Remove debug code from slash
* [slash] Adjust Apply signature, test again for 2% slash
* [slash] Factor out scenario in testing so can test 2% and 80% at same time
* [slash] Correct balance deduction on plan delegation
* [slash] Mock out ChainReader for TestVerify
* [slash] Small surface area interface, now feedback loop for verify
* [slash] Remove development json
* [slash] trigger-double-sign consumes yaml
* [slash] Remove dead code
* [slash][test] Factor ValidatorWrapper into scenario
* [slash][test] Add example from local-testing dump - caution might be off
* [slash] Factor out mutation of slashDebt
* [slash][test] Factor out tests so can easily load test-case from bytes
* [slash] Fix payment mistake in validator own delegation wrt min-self-delgation respected
* [slash] Satisfy Travis
* [slash] Begin cleanup of PR
* [slash] Apply slash from header to Finalize via state processor
* [slash] Productionize code, Println => logs; adjust slash picked in newblock
* [slash] Need pointer for rlp.Decode
* [slash] ValidatorInformation use full wrapper
* Fix median stake
* [staking] Adjust MarshalJSON for Validator, Wrapper
* Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)
* Refactor offchain data; Add epoch to ValidatorSnapshot
* Make block onchain/offchain data commit atomically
* [slash][committee] Set .Active to false on double sign, do not consider banned or inactive for committee assignment
* [effective] VC eligible.go
* [consensus] Redundant field in printf
* [docker] import-ks for a dev account
* [slash] Create BLS key for dockerfile and crt-validator.sh
* [slash][docker] Easy deployment of double-sign testing
* [docker] Have slash work as single docker command
* [rpc] Fix median-stake RPC
* [slash] Update webhook with default docker BLS key
* [docker][slash] Fresh yaml copy for docker build, remove dev code in main.go
* [slash] Remove helper binary, commented out code, change to local config
* [params] Factor out test genesis value
* Add shard checking to Tx-Pool & correct blacklist (#2301)
* [core] Fix blacklist & add shardID check
* [staking + node + cmd] Fix blacklist & add shardID check
* [slash] Adjust to PR comments part 1
* [docker] Use different throw away funded account
* [docker] Create easier testing for delegation with private keys
* [docker] Update yaml
* [slash] Remove special case for slashing validator own delegation wrt min-self-delegate
* [docker] Install nano as well
* [slash] Early error if banned
* [quorum] Expose earning account in decider marshal json
* Revert "Refactor offchain data commit; Make block onchain/offchain commit atomic (#2279)"
This reverts commit 9ffbf682c075b49188923c65a0bbf39ac188be00.
* [slash] Add non-sanity check way to update validator
* [reward] Increase percision on percentage in schedule
* [slash] Adjust logs
* [committee] Check eligibility of validator before doing sanity check
* [slash] Update docker
* [slash] Move create validator script to test
* [slash] More log
* [param] Make things faster
* [slash][off-chain] Clear out slashes from pending in writeblockwithstate
* [cross-link] Log is not error, just info
* [blockchain] Not necessary to guard DeletePendingSlashingCandidates
* [slash][consensus] Use plain []byte for signature b/c bls.Sign has private impl fields, rlp does not encode that
* [slash][test] Use faucet as sender, assume user imported
* [slash] Test setup
* [slash] reserve error for real error in logs
* [slash][availability] Apply availability correct, bump signing count each block
* [slash][staking] Consider banned field in sanity check, pay snitch only half of what was actually slashed
* [slash] Pay as much as can
* [slash] use right nowAmt
* [slash] Take away from rewards as well
* [slash] iterate faster
* [slash] Remove dev based timing
* [slash] Add more log, sanity check incoming slash records, only count external for slash rate
* [availability][state] Adjust signature of ValidatorWrapper wrt state, filter out for staked validators, correct availaibility measure on running counters
* [availability] More log
* [slash] Simply pre slash erra slashing
* [slash] Remove development code
* [slash] Use height from recvMsg, todo on epoch
* [staking] Not necessary to touch LastEpochInCommittee in staking_verifier
* [slash] Undo ds in endpoint pattern config
* [slash] Add TODO and log when delegation becomes 0 b/c slash debt payment
* [slash] Abstract staked validators from shard.State into type, set slash rate based BLSKey count
Co-authored-by: Leo Chen <leo@harmony.one>
Co-authored-by: flicker-harmony <52401354+flicker-harmony@users.noreply.github.com>
Co-authored-by: Rongjian Lan <rongjian@harmony.one>
Co-authored-by: Daniel Van Der Maden <daniel@harmony.one>
5 years ago
|
|
|
}
|
|
|
|
|
|
|
|
nodeConfig.NtpServer = hc.Sys.NtpServer
|
|
|
|
|
|
|
|
return nodeConfig, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupConsensusAndNode(hc harmonyConfig, nodeConfig *nodeconfig.ConfigType) *node.Node {
|
|
|
|
// Consensus object.
|
|
|
|
// TODO: consensus object shouldn't start here
|
|
|
|
decider := quorum.NewDecider(quorum.SuperMajorityVote, uint32(hc.General.ShardID))
|
|
|
|
|
|
|
|
currentConsensus, err := consensus.New(
|
|
|
|
myHost, nodeConfig.ShardID, p2p.Peer{}, nodeConfig.ConsensusPriKey, decider,
|
|
|
|
)
|
|
|
|
currentConsensus.Decider.SetMyPublicKeyProvider(func() (multibls.PublicKeys, error) {
|
|
|
|
return currentConsensus.GetPublicKeys(), nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
_, _ = fmt.Fprintf(os.Stderr, "Error :%v \n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse minPeers from harmonyConfig
|
|
|
|
var minPeers int
|
|
|
|
var aggregateSig bool
|
|
|
|
if hc.Consensus != nil {
|
|
|
|
minPeers = hc.Consensus.MinPeers
|
|
|
|
aggregateSig = hc.Consensus.AggregateSig
|
|
|
|
} else {
|
|
|
|
minPeers = defaultConsensusConfig.MinPeers
|
|
|
|
aggregateSig = defaultConsensusConfig.AggregateSig
|
|
|
|
}
|
|
|
|
currentConsensus.MinPeers = minPeers
|
|
|
|
currentConsensus.AggregateSig = aggregateSig
|
|
|
|
|
|
|
|
blacklist, err := setupBlacklist(hc)
|
|
|
|
if err != nil {
|
|
|
|
utils.Logger().Warn().Msgf("Blacklist setup error: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Current node.
|
|
|
|
chainDBFactory := &shardchain.LDBFactory{RootDir: nodeConfig.DBDir}
|
[slash][consensus] Notice double sign & broadcast, factor out tech debt of consensus (#2152)
* [slash] Remove dead interface, associated piping
* [slash] Expand out structs
* [consensus] Write to a chan when find a case of double-signing, remove dead code
* [slash] Broadcast the noticing of a double signing
* [rawdb] CRUD for slashing candidates
* [slashing][node][proto] Broadcast the slash record after receive from consensus, handle received proto message, persist in off-chain db while pending
* [slash][node][propose-block] Add verified slashes proposed into the header in block proposal
* [slash][shard] Factor out external validator as method on shard state, add double-signature field
* [slash][engine] Apply slash, name boolean expression for sorts, use stable sort
* [slash] Abstract Ballot results so keep track of both pre and post double sign event
* [slash] Fix type errors on test code
* [slash] Read from correct rawdb
* [slash] Add epoch based guards in CRUD of slashing
* [slash] Write to correct cache for slashing candidates
* [shard] Use explicit named type of BLS Signature, use convention
* [slash] Fix mistake done in refactor, improper header used. Factor out fromSlice to set
* [slash][node] Restore newblock to master, try again minimial change
* [cx-receipts] Break up one-liner, use SliceStable, not Slice
* [network] Finish refactor that makes network message headers once
* [network] Simplify creation further of headers write
* [slash] Adjust data structure of slash after offline discussion with RJ, Chao
* [slash] Still did need signature of the double signature
* [consensus] Prepare message does not have block header
* [consensus] Soft reset three files to 968517d~1
* [consensus] Begin factor consensus network intended message out with prepare first
* [consensus] Factor out Prepared message
* [consensus] Factor out announce message creation
* [consensus] Committed Message, branch on verify sender key for clearer log
* [consensus] Committed Message Factor out
* [consensus] Do jenkins MVP of signatures adjustment
* [main][slash] Provide YAML config as webhook config for double sign event
* [consensus] Adjust signatures, whitespace, lessen GC pressure
* [consensus] Remove dead code
* [consensus] Factor out commit overloaded message, give commit payload override in construct
* [consensus] Fix travis tests
* [consensus] Provide block bytes in SubmitVote(quorum.Commit)
* [consensus] Factor out noisy sanity checks in BFT, move existing commit check earlier as was before
* [quorum] Adjust signatures in quorum
* [staking] Adjust after merge from master
* [consensus] Finish refactor of consensus
* [node] Fix import
* [consensus] Fix travis
* [consensus] Use origin/master copy of block, fix mistake of pointer to empty byte
* [consensus] Less verbose bools
* [consensus] Remove unused trailing mutation hook in message construct
* [consensus] Address some TODOs on err, comment out double sign
5 years ago
|
|
|
|
|
|
|
currentNode := node.New(myHost, currentConsensus, chainDBFactory, blacklist, nodeConfig.ArchiveModes())
|
|
|
|
|
|
|
|
if hc.Legacy != nil && hc.Legacy.TPBroadcastInvalidTxn != nil {
|
|
|
|
currentNode.BroadcastInvalidTx = *hc.Legacy.TPBroadcastInvalidTxn
|
|
|
|
} else {
|
|
|
|
currentNode.BroadcastInvalidTx = defaultBroadcastInvalidTx
|
|
|
|
}
|
[slash][consensus] Notice double sign & broadcast, factor out tech debt of consensus (#2152)
* [slash] Remove dead interface, associated piping
* [slash] Expand out structs
* [consensus] Write to a chan when find a case of double-signing, remove dead code
* [slash] Broadcast the noticing of a double signing
* [rawdb] CRUD for slashing candidates
* [slashing][node][proto] Broadcast the slash record after receive from consensus, handle received proto message, persist in off-chain db while pending
* [slash][node][propose-block] Add verified slashes proposed into the header in block proposal
* [slash][shard] Factor out external validator as method on shard state, add double-signature field
* [slash][engine] Apply slash, name boolean expression for sorts, use stable sort
* [slash] Abstract Ballot results so keep track of both pre and post double sign event
* [slash] Fix type errors on test code
* [slash] Read from correct rawdb
* [slash] Add epoch based guards in CRUD of slashing
* [slash] Write to correct cache for slashing candidates
* [shard] Use explicit named type of BLS Signature, use convention
* [slash] Fix mistake done in refactor, improper header used. Factor out fromSlice to set
* [slash][node] Restore newblock to master, try again minimial change
* [cx-receipts] Break up one-liner, use SliceStable, not Slice
* [network] Finish refactor that makes network message headers once
* [network] Simplify creation further of headers write
* [slash] Adjust data structure of slash after offline discussion with RJ, Chao
* [slash] Still did need signature of the double signature
* [consensus] Prepare message does not have block header
* [consensus] Soft reset three files to 968517d~1
* [consensus] Begin factor consensus network intended message out with prepare first
* [consensus] Factor out Prepared message
* [consensus] Factor out announce message creation
* [consensus] Committed Message, branch on verify sender key for clearer log
* [consensus] Committed Message Factor out
* [consensus] Do jenkins MVP of signatures adjustment
* [main][slash] Provide YAML config as webhook config for double sign event
* [consensus] Adjust signatures, whitespace, lessen GC pressure
* [consensus] Remove dead code
* [consensus] Factor out commit overloaded message, give commit payload override in construct
* [consensus] Fix travis tests
* [consensus] Provide block bytes in SubmitVote(quorum.Commit)
* [consensus] Factor out noisy sanity checks in BFT, move existing commit check earlier as was before
* [quorum] Adjust signatures in quorum
* [staking] Adjust after merge from master
* [consensus] Finish refactor of consensus
* [node] Fix import
* [consensus] Fix travis
* [consensus] Use origin/master copy of block, fix mistake of pointer to empty byte
* [consensus] Less verbose bools
* [consensus] Remove unused trailing mutation hook in message construct
* [consensus] Address some TODOs on err, comment out double sign
5 years ago
|
|
|
|
|
|
|
// Syncing provider is provided by following rules:
|
|
|
|
// 1. If starting with a localnet or offline, use local sync peers.
|
|
|
|
// 2. If specified with --dns=false, use legacy syncing which is syncing through self-
|
|
|
|
// discover peers.
|
|
|
|
// 3. Else, use the dns for syncing.
|
|
|
|
if hc.Network.NetworkType == nodeconfig.Localnet || hc.General.IsOffline {
|
|
|
|
epochConfig := shard.Schedule.InstanceForEpoch(ethCommon.Big0)
|
|
|
|
selfPort := hc.P2P.Port
|
|
|
|
currentNode.SyncingPeerProvider = node.NewLocalSyncingPeerProvider(
|
|
|
|
6000, uint16(selfPort), epochConfig.NumShards(), uint32(epochConfig.NumNodesPerShard()))
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
} else if hc.DNSSync.LegacySyncing {
|
|
|
|
currentNode.SyncingPeerProvider = node.NewLegacySyncingPeerProvider(currentNode)
|
|
|
|
} else {
|
|
|
|
currentNode.SyncingPeerProvider = node.NewDNSSyncingPeerProvider(hc.DNSSync.Zone, strconv.Itoa(hc.DNSSync.Port))
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: refactor the creation of blockchain out of node.New()
|
|
|
|
currentConsensus.Blockchain = currentNode.Blockchain()
|
Config 2.0.0
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Inital commit
Removed lint warning
Fixed names, that have changed in dnsSync
Fixed some lint errors
Some functions were exported although no need to be and some test
variables were also renamed
Fixed 1.0.4 migration
- Tests in config_test.go expected wrong version since it was migrated to
2.0.0
- getDefaultHmyConfigCopy did not get DNSSync defaults
- Key for DNSPort for version prior to 1.0.4 was wrong - DNSSyncPort
Added default for DNSSync when creating default Config
Fixed empty string user input crash
Inputing empty string was causing out of bounds index to be read from
readStr
Fixed flags_test to work with config version 2.0.0
Changed DNSSync fields names
It seems that Enabled was not a good name, considering that actually did
the opposite. Also kept LegacyClient and LegacyServer names
Removed Legacy prefix from dnsSync fields
It seems Legacy prefix is obsolite since moving the fields to dsnSync
Fixes regarding config V2.0.0
The following fixes and improvements have been applied
- Added timeout to config update prompt. Prompt logic has been moved to
a separate func
- When updating config file, the original is saved to <config>.backup
- Added semantic version validation for the version found in config. Error is
returned if version is missing, not a valid sematic version or not
found in the migrations map
- Flags related to DNSSync has been moved to dnsSyncFlags flag array.
Also moved tests related to DNSSync flags to separate test func
- Added dns.server and dns.client flags and tests for them
- Added migration tests with dumps of versions 1.0.2, 1.0.3, 1.0.4
Fix for config_migrations_test
Added default value to serverPort during migration
4 years ago
|
|
|
currentNode.NodeConfig.DNSZone = hc.DNSSync.Zone
|
|
|
|
|
[slash][consensus] Notice double sign & broadcast, factor out tech debt of consensus (#2152)
* [slash] Remove dead interface, associated piping
* [slash] Expand out structs
* [consensus] Write to a chan when find a case of double-signing, remove dead code
* [slash] Broadcast the noticing of a double signing
* [rawdb] CRUD for slashing candidates
* [slashing][node][proto] Broadcast the slash record after receive from consensus, handle received proto message, persist in off-chain db while pending
* [slash][node][propose-block] Add verified slashes proposed into the header in block proposal
* [slash][shard] Factor out external validator as method on shard state, add double-signature field
* [slash][engine] Apply slash, name boolean expression for sorts, use stable sort
* [slash] Abstract Ballot results so keep track of both pre and post double sign event
* [slash] Fix type errors on test code
* [slash] Read from correct rawdb
* [slash] Add epoch based guards in CRUD of slashing
* [slash] Write to correct cache for slashing candidates
* [shard] Use explicit named type of BLS Signature, use convention
* [slash] Fix mistake done in refactor, improper header used. Factor out fromSlice to set
* [slash][node] Restore newblock to master, try again minimial change
* [cx-receipts] Break up one-liner, use SliceStable, not Slice
* [network] Finish refactor that makes network message headers once
* [network] Simplify creation further of headers write
* [slash] Adjust data structure of slash after offline discussion with RJ, Chao
* [slash] Still did need signature of the double signature
* [consensus] Prepare message does not have block header
* [consensus] Soft reset three files to 968517d~1
* [consensus] Begin factor consensus network intended message out with prepare first
* [consensus] Factor out Prepared message
* [consensus] Factor out announce message creation
* [consensus] Committed Message, branch on verify sender key for clearer log
* [consensus] Committed Message Factor out
* [consensus] Do jenkins MVP of signatures adjustment
* [main][slash] Provide YAML config as webhook config for double sign event
* [consensus] Adjust signatures, whitespace, lessen GC pressure
* [consensus] Remove dead code
* [consensus] Factor out commit overloaded message, give commit payload override in construct
* [consensus] Fix travis tests
* [consensus] Provide block bytes in SubmitVote(quorum.Commit)
* [consensus] Factor out noisy sanity checks in BFT, move existing commit check earlier as was before
* [quorum] Adjust signatures in quorum
* [staking] Adjust after merge from master
* [consensus] Finish refactor of consensus
* [node] Fix import
* [consensus] Fix travis
* [consensus] Use origin/master copy of block, fix mistake of pointer to empty byte
* [consensus] Less verbose bools
* [consensus] Remove unused trailing mutation hook in message construct
* [consensus] Address some TODOs on err, comment out double sign
5 years ago
|
|
|
currentNode.NodeConfig.SetBeaconGroupID(
|
|
|
|
nodeconfig.NewGroupIDByShardID(shard.BeaconChainShardID),
|
|
|
|
)
|
|
|
|
|
|
|
|
nodeconfig.GetDefaultConfig().DBDir = nodeConfig.DBDir
|
|
|
|
switch hc.General.NodeType {
|
|
|
|
case nodeTypeExplorer:
|
|
|
|
nodeconfig.SetDefaultRole(nodeconfig.ExplorerNode)
|
|
|
|
currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode)
|
|
|
|
|
|
|
|
case nodeTypeValidator:
|
|
|
|
nodeconfig.SetDefaultRole(nodeconfig.Validator)
|
|
|
|
currentNode.NodeConfig.SetRole(nodeconfig.Validator)
|
|
|
|
}
|
|
|
|
currentNode.NodeConfig.SetShardGroupID(nodeconfig.NewGroupIDByShardID(nodeconfig.ShardID(nodeConfig.ShardID)))
|
|
|
|
currentNode.NodeConfig.SetClientGroupID(nodeconfig.NewClientGroupIDByShardID(shard.BeaconChainShardID))
|
|
|
|
currentNode.NodeConfig.ConsensusPriKey = nodeConfig.ConsensusPriKey
|
|
|
|
|
|
|
|
// This needs to be executed after consensus setup
|
|
|
|
if err := currentNode.InitConsensusWithValidators(); err != nil {
|
|
|
|
utils.Logger().Warn().
|
|
|
|
Int("shardID", hc.General.ShardID).
|
|
|
|
Err(err).
|
|
|
|
Msg("InitConsensusWithMembers failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the consensus ID to be the current block number
|
|
|
|
viewID := currentNode.Blockchain().CurrentBlock().Header().ViewID().Uint64()
|
|
|
|
currentConsensus.SetViewIDs(viewID + 1)
|
|
|
|
utils.Logger().Info().
|
|
|
|
Uint64("viewID", viewID).
|
|
|
|
Msg("Init Blockchain")
|
|
|
|
|
|
|
|
// Assign closure functions to the consensus object
|
|
|
|
currentConsensus.SetBlockVerifier(currentNode.VerifyNewBlock)
|
|
|
|
currentConsensus.PostConsensusJob = currentNode.PostConsensusProcessing
|
|
|
|
// update consensus information based on the blockchain
|
|
|
|
currentConsensus.SetMode(currentConsensus.UpdateConsensusInformation())
|
|
|
|
currentConsensus.NextBlockDue = time.Now()
|
|
|
|
return currentNode
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupPrometheusService(node *node.Node, hc harmonyConfig, sid uint32) {
|
|
|
|
prometheusConfig := prometheus.Config{
|
|
|
|
Enabled: hc.Prometheus.Enabled,
|
|
|
|
IP: hc.Prometheus.IP,
|
|
|
|
Port: hc.Prometheus.Port,
|
|
|
|
EnablePush: hc.Prometheus.EnablePush,
|
|
|
|
Gateway: hc.Prometheus.Gateway,
|
|
|
|
Network: hc.Network.NetworkType,
|
|
|
|
Legacy: hc.General.NoStaking,
|
|
|
|
NodeType: hc.General.NodeType,
|
|
|
|
Shard: sid,
|
|
|
|
Instance: myHost.GetID().Pretty(),
|
|
|
|
}
|
|
|
|
p := prometheus.NewService(prometheusConfig)
|
|
|
|
node.RegisterService(service.Prometheus, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupSyncService(node *node.Node, host p2p.Host, hc harmonyConfig) {
|
|
|
|
blockchains := []*core.BlockChain{node.Blockchain()}
|
|
|
|
if !node.IsRunningBeaconChain() {
|
|
|
|
blockchains = append(blockchains, node.Beaconchain())
|
|
|
|
}
|
|
|
|
|
|
|
|
dConfig := downloader.Config{
|
|
|
|
ServerOnly: !hc.Sync.Downloader,
|
|
|
|
Network: nodeconfig.NetworkType(hc.Network.NetworkType),
|
|
|
|
Concurrency: hc.Sync.Concurrency,
|
|
|
|
MinStreams: hc.Sync.MinPeers,
|
|
|
|
InitStreams: hc.Sync.InitStreams,
|
|
|
|
SmSoftLowCap: hc.Sync.DiscSoftLowCap,
|
|
|
|
SmHardLowCap: hc.Sync.DiscHardLowCap,
|
|
|
|
SmHiCap: hc.Sync.DiscHighCap,
|
|
|
|
SmDiscBatch: hc.Sync.DiscBatch,
|
|
|
|
}
|
|
|
|
// If we are running side chain, we will need to do some extra works for beacon
|
|
|
|
// sync
|
|
|
|
if !node.IsRunningBeaconChain() {
|
|
|
|
dConfig.BHConfig = &downloader.BeaconHelperConfig{
|
|
|
|
BlockC: node.BeaconBlockChannel,
|
|
|
|
InsertHook: node.BeaconSyncHook,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s := synchronize.NewService(host, blockchains, dConfig)
|
|
|
|
|
|
|
|
node.RegisterService(service.Synchronize, s)
|
|
|
|
|
|
|
|
d := s.Downloaders.GetShardDownloader(node.Blockchain().ShardID())
|
|
|
|
node.Consensus.SetDownloader(d)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupBlacklist(hc harmonyConfig) (map[ethCommon.Address]struct{}, error) {
|
|
|
|
utils.Logger().Debug().Msgf("Using blacklist file at `%s`", hc.TxPool.BlacklistFile)
|
|
|
|
dat, err := ioutil.ReadFile(hc.TxPool.BlacklistFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addrMap := make(map[ethCommon.Address]struct{})
|
|
|
|
for _, line := range strings.Split(string(dat), "\n") {
|
|
|
|
if len(line) != 0 { // blacklist file may have trailing empty string line
|
|
|
|
b32 := strings.TrimSpace(strings.Split(string(line), "#")[0])
|
|
|
|
addr, err := common.Bech32ToAddress(b32)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addrMap[addr] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return addrMap, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func listenOSSigAndShutDown(node *node.Node) {
|
|
|
|
// Prepare for graceful shutdown from os signals
|
|
|
|
osSignal := make(chan os.Signal)
|
|
|
|
signal.Notify(osSignal, syscall.SIGINT, syscall.SIGTERM)
|
|
|
|
sig := <-osSignal
|
|
|
|
utils.Logger().Warn().Str("signal", sig.String()).Msg("Gracefully shutting down...")
|
|
|
|
const msg = "Got %s signal. Gracefully shutting down...\n"
|
|
|
|
fmt.Fprintf(os.Stderr, msg, sig)
|
|
|
|
|
|
|
|
go node.ShutDown()
|
|
|
|
|
|
|
|
for i := 10; i > 0; i-- {
|
|
|
|
<-osSignal
|
|
|
|
if i > 1 {
|
|
|
|
fmt.Printf("Already shutting down, interrupt more to force quit: (times=%v)\n", i-1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Println("Forced QUIT.")
|
|
|
|
os.Exit(-1)
|
|
|
|
}
|