setup for having staking service working

pull/506/head
Minh Doan 6 years ago committed by Minh Doan
parent 337e19d2b4
commit 35ff532f2f
  1. 68
      cmd/harmony.go
  2. 121
      test/deploy_newnode.sh

@ -9,18 +9,13 @@ import (
"runtime" "runtime"
"time" "time"
"github.com/harmony-one/harmony/drand"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
peerstore "github.com/libp2p/go-libp2p-peerstore"
multiaddr "github.com/multiformats/go-multiaddr"
"github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/internal/attack" "github.com/harmony-one/harmony/drand"
pkg_newnode "github.com/harmony-one/harmony/internal/newnode"
"github.com/harmony-one/harmony/internal/profiler" "github.com/harmony-one/harmony/internal/profiler"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
contract_constants "github.com/harmony-one/harmony/internal/utils/contract"
"github.com/harmony-one/harmony/node" "github.com/harmony-one/harmony/node"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/p2pimpl" "github.com/harmony-one/harmony/p2p/p2pimpl"
@ -90,7 +85,6 @@ func main() {
ip := flag.String("ip", "127.0.0.1", "IP of the node") ip := flag.String("ip", "127.0.0.1", "IP of the node")
port := flag.String("port", "9000", "port of the node.") port := flag.String("port", "9000", "port of the node.")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution") logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
attackedMode := flag.Int("attacked_mode", 0, "0 means not attacked, 1 means attacked, 2 means being open to be selected as attacked")
dbSupported := flag.Bool("db_supported", true, "false means not db_supported, true means db_supported") dbSupported := flag.Bool("db_supported", true, "false means not db_supported, true means db_supported")
freshDB := flag.Bool("fresh_db", false, "true means the existing disk based db will be removed") freshDB := flag.Bool("fresh_db", false, "true means the existing disk based db will be removed")
profile := flag.Bool("profile", false, "Turn on profiling (CPU, Memory).") profile := flag.Bool("profile", false, "Turn on profiling (CPU, Memory).")
@ -98,11 +92,6 @@ func main() {
versionFlag := flag.Bool("version", false, "Output version info") versionFlag := flag.Bool("version", false, "Output version info")
onlyLogTps := flag.Bool("only_log_tps", false, "Only log TPS if true") onlyLogTps := flag.Bool("only_log_tps", false, "Only log TPS if true")
//This IP belongs to jenkins.harmony.one
bcIP := flag.String("bc", "127.0.0.1", "IP of the beacon chain")
bcPort := flag.String("bc_port", "8081", "port of the beacon chain")
bcAddr := flag.String("bc_addr", "", "MultiAddr of the beacon chain")
//Leader needs to have a minimal number of peers to start consensus //Leader needs to have a minimal number of peers to start consensus
minPeers := flag.Int("min_peers", 100, "Minimal number of Peers in shard") minPeers := flag.Int("min_peers", 100, "Minimal number of Peers in shard")
@ -116,6 +105,9 @@ func main() {
// isBeacon indicates this node is a beacon chain node // isBeacon indicates this node is a beacon chain node
isBeacon := flag.Bool("is_beacon", false, "true means this node is a beacon chain node") isBeacon := flag.Bool("is_beacon", false, "true means this node is a beacon chain node")
// isNewNode indicates this node is a new node
isNewNode := flag.Bool("is_newnode", false, "true means this node is a new node")
// isLeader indicates this node is a beacon chain leader node during the bootstrap process // isLeader indicates this node is a beacon chain leader node during the bootstrap process
isLeader := flag.Bool("is_leader", false, "true means this node is a beacon chain leader node") isLeader := flag.Bool("is_leader", false, "true means this node is a beacon chain leader node")
@ -150,7 +142,6 @@ func main() {
var leader p2p.Peer var leader p2p.Peer
var selfPeer p2p.Peer var selfPeer p2p.Peer
var clientPeer *p2p.Peer var clientPeer *p2p.Peer
var BCPeer *p2p.Peer
var role string var role string
nodePriKey, _, err := utils.LoadKeyFromFile(*keyFile) nodePriKey, _, err := utils.LoadKeyFromFile(*keyFile)
@ -164,48 +155,6 @@ func main() {
} }
selfPeer = p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, PubKey: peerPubKey} selfPeer = p2p.Peer{IP: *ip, Port: *port, ValidatorID: -1, PubKey: peerPubKey}
if !*libp2pPD {
if *bcAddr != "" {
// Turn the destination into a multiaddr.
maddr, err := multiaddr.NewMultiaddr(*bcAddr)
if err != nil {
panic(err)
}
// Extract the peer ID from the multiaddr.
info, err := peerstore.InfoFromP2pAddr(maddr)
if err != nil {
panic(err)
}
BCPeer = &p2p.Peer{IP: *bcIP, Port: *bcPort, Addrs: info.Addrs, PeerID: info.ID}
} else {
BCPeer = &p2p.Peer{IP: *bcIP, Port: *bcPort}
}
// Use Peer Discovery to get shard/leader/peer/...
candidateNode := pkg_newnode.New(*ip, *port, nodePriKey)
candidateNode.AddPeer(BCPeer)
candidateNode.ContactBeaconChain(*BCPeer)
shardID = candidateNode.GetShardID()
leader = candidateNode.GetLeader()
selfPeer = candidateNode.GetSelfPeer()
clientPeer = candidateNode.GetClientPeer()
selfPeer.PubKey = candidateNode.PubK
if leader.IP == *ip && leader.Port == *port {
role = "leader"
} else {
role = "validator"
}
if role == "validator" {
// Attack determination.
attack.GetInstance().SetAttackEnabled(attackDetermination(*attackedMode))
}
utils.UseLibP2P = false
} else {
if *isLeader { if *isLeader {
role = "leader" role = "leader"
leader = selfPeer leader = selfPeer
@ -213,7 +162,7 @@ func main() {
role = "validator" role = "validator"
} }
utils.UseLibP2P = true utils.UseLibP2P = true
}
// Init logging. // Init logging.
loggingInit(*logFolder, role, *ip, *port, *onlyLogTps) loggingInit(*logFolder, role, *ip, *port, *onlyLogTps)
@ -253,6 +202,7 @@ func main() {
currentNode := node.New(host, consensus, ldb) currentNode := node.New(host, consensus, ldb)
currentNode.Consensus.OfflinePeers = currentNode.OfflinePeers currentNode.Consensus.OfflinePeers = currentNode.OfflinePeers
currentNode.Role = node.NewNode currentNode.Role = node.NewNode
currentNode.AccountKey = contract_constants.GenesisBeaconAccountPriKey
if *isBeacon { if *isBeacon {
if role == "leader" { if role == "leader" {
@ -268,7 +218,9 @@ func main() {
} }
currentNode.AddBeaconChainDatabase(beacondb) currentNode.AddBeaconChainDatabase(beacondb)
if role == "leader" { if *isNewNode {
currentNode.Role = node.NewNode
} else if role == "leader" {
currentNode.Role = node.ShardLeader currentNode.Role = node.ShardLeader
} else { } else {
currentNode.Role = node.ShardValidator currentNode.Role = node.ShardValidator

@ -0,0 +1,121 @@
#!/bin/bash
ROOT=$(dirname $0)/..
USER=$(whoami)
. "${ROOT}/scripts/setup_bls_build_flags.sh"
set -x
set -eo pipefail
function check_result() {
find $log_folder -name leader-*.log > $log_folder/all-leaders.txt
find $log_folder -name validator-*.log > $log_folder/all-validators.txt
echo ====== RESULTS ======
results=$($ROOT/test/cal_tps.sh $log_folder/all-leaders.txt $log_folder/all-validators.txt)
echo $results | tee -a $LOG_FILE
echo $results > $log_folder/tps.log
}
function cleanup() {
for pid in `/bin/ps -fu $USER| grep "harmony\|txgen\|soldier\|commander\|profiler\|beacon\|bootnode" | grep -v "grep" | grep -v "vi" | awk '{print $2}'`;
do
echo 'Killed process: '$pid
$DRYRUN kill -9 $pid 2> /dev/null
done
# Remove bc_config.json before starting experiment.
rm -f bc_config.json
rm -rf ./db/harmony_*
}
function killnode() {
local port=$1
if [ -n "port" ]; then
pid=$(/bin/ps -fu $USER | grep "harmony" | grep "$port" | awk '{print $2}')
echo "killing node with port: $port"
$DRYRUN kill -9 $pid 2> /dev/null
echo "node with port: $port is killed"
fi
}
trap cleanup SIGINT SIGTERM
function usage {
local ME=$(basename $0)
cat<<EOU
USAGE: $ME [OPTIONS] config_file_name
-h print this help message
-d enable db support (default: $DB)
-t toggle txgen (default: $TXGEN)
-D duration txgen run duration (default: $DURATION)
-m min_peers minimal number of peers to start consensus (default: $MIN)
-s shards number of shards (default: $SHARDS)
-k nodeport kill the node with specified port number (default: $KILLPORT)
-n dryrun mode (default: $DRYRUN)
-S enable sync test (default: $SYNC)
-P enable libp2p peer discovery test (default: $P2P)
This script will build all the binaries and start harmony and txgen based on the configuration file.
EXAMPLES:
$ME local_config.txt
$ME -p local_config.txt
EOU
exit 0
}
DB=
TXGEN=true
DURATION=90
MIN=5
SHARDS=2
KILLPORT=9004
SYNC=true
DRYRUN=
P2P=false
while getopts "hdtD:m:s:k:nSP" option; do
case $option in
h) usage ;;
d) DB='-db_supported' ;;
t) TXGEN=false ;;
D) DURATION=$OPTARG ;;
m) MIN=$OPTARG ;;
s) SHARDS=$OPTARG ;;
k) KILLPORT=$OPTARG ;;
n) DRYRUN=echo ;;
S) SYNC=true ;;
P) P2P=true ;;
esac
done
shift $((OPTIND-1))
# Since `go run` will generate a temporary exe every time,
# On windows, your system will pop up a network security dialog for each instance
# and you won't be able to turn it off. With `go build` generating one
# exe, the dialog will only pop up once at the very first time.
# Also it's recommended to use `go build` for testing the whole exe.
pushd $ROOT
echo "compiling ..."
go build -o bin/harmony cmd/harmony.go
popd
# Create a tmp folder for logs
t=`date +"%Y%m%d-%H%M%S"`
log_folder="tmp_log/log-$t"
mkdir -p $log_folder
LOG_FILE=$log_folder/r.log
HMY_OPT=
HMY_OPT2=
HMY_OPT3=
($DRYRUN $ROOT/bin/harmony -ip 127.0.0.1 -port 9100 -log_folder $log_folder -is_newnode $DB -min_peers $MIN $HMY_OPT $HMY_OPT2 $HMY_OPT3 -key /tmp/127.0.0.1-9100.key 2>&1 | tee -a $LOG_FILE ) &
Loading…
Cancel
Save