Merge branch 'master' of github.com:simple-rules/harmony-benchmark

pull/61/head
Rongjian Lan 6 years ago
commit f398fc21e0
  1. 73
      agent/main.go
  2. 27
      aws-experiment-launch/experiment/commander/main.go
  3. 62
      aws-experiment-launch/experiment/soldier/main.go
  4. 45
      benchmark.go
  5. 9
      client/btctxgen/main.go
  6. 9
      client/txgen/main.go
  7. 153
      configr/main.go
  8. 1
      deploy.sh
  9. 2
      kill_node.sh
  10. 55
      profiler/main.go
  11. 1
      run_experiment.sh
  12. 20
      utils/utils.go

@ -1,73 +0,0 @@
package main
import (
"flag"
"fmt"
"time"
"github.com/shirou/gopsutil/process"
"github.com/simple-rules/harmony-benchmark/configr"
"github.com/simple-rules/harmony-benchmark/log"
)
var (
shardID string
ip string
port string
pid int32
)
func logMemUsage() {
p, _ := process.NewProcess(pid)
for {
info, _ := p.MemoryInfo()
memMap, _ := p.MemoryMaps(false)
log.Info("Mem Report", "info", info, "map", memMap, "shardID", shardID)
time.Sleep(3 * time.Second)
}
}
func logCPUUsage() {
p, _ := process.NewProcess(pid)
for {
percent, _ := p.CPUPercent()
times, _ := p.Times()
log.Info("CPU Report", "percent", percent, "times", times, "shardID", shardID)
time.Sleep(3 * time.Second)
}
}
func main() {
_ip := flag.String("ip", "127.0.0.1", "IP of the node")
_port := flag.String("port", "9000", "port of the node.")
_pid := flag.Int("pid", 0, "process id of the node")
configFile := flag.String("config_file", "config.txt", "file containing all ip addresses")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
flag.Parse()
ip = *_ip
port = *_port
pid = int32(*_pid)
config, _ := configr.ReadConfigFile(*configFile)
shardID := configr.GetShardID(ip, port, &config)
leader := configr.GetLeader(shardID, &config)
var role string
if leader.Ip == ip && leader.Port == port {
role = "leader"
} else {
role = "validator"
}
// Setup a logger to stdout and log file.
logFileName := fmt.Sprintf("./%v/agent-%s-%v-%v.log", *logFolder, role, ip, port)
h := log.MultiHandler(
log.StdoutHandler,
log.Must.FileHandler(logFileName, log.JSONFormat()), // Log to file
// log.Must.NetHandler("tcp", ":3000", log.JSONFormat()) // Log to remote
)
log.Root().SetHandler(h)
go logMemUsage()
logCPUUsage()
}

@ -36,7 +36,7 @@ type commanderSetting struct {
// Options in s3 mode
configURL string
configs [][]string
configr *configr.Configr
}
type sessionInfo struct {
@ -54,14 +54,6 @@ const (
DefaultConfigUrl = "https://s3-us-west-2.amazonaws.com/unique-bucket-bin/distribution_config.txt"
)
func readConfigFile() [][]string {
if result, err := configr.ReadConfigFile(DistributionFileName); err != nil {
panic(err)
} else {
return result
}
}
func handleCommand(command string) {
args := strings.Split(command, " ")
if len(args) <= 0 {
@ -77,9 +69,9 @@ func handleCommand(command string) {
}
}
setting.configs = readConfigFile()
if setting.configs != nil {
log.Printf("The loaded config has %v nodes\n", len(setting.configs))
err := setting.configr.ReadConfigFile(DistributionFileName)
if err == nil {
log.Printf("The loaded config has %v nodes\n", len(setting.configr.GetConfigEntries()))
} else {
log.Println("Failed to read config file")
}
@ -111,20 +103,21 @@ func config(ip string, port string, mode string, configURL string) {
} else {
setting.configURL = configURL
}
setting.configr = configr.NewConfigr()
}
func dictateNodes(command string) {
resultChan := make(chan int)
for _, config := range setting.configs {
ip := config[0]
port := "1" + config[1] // the port number of solider is "1" + node port
addr := strings.Join([]string{ip, port}, ":")
configs := setting.configr.GetConfigEntries()
for _, entry := range configs {
port := "1" + entry.Port // the port number of solider is "1" + node port
addr := strings.Join([]string{entry.IP, port}, ":")
go func(resultChan chan int) {
resultChan <- dictateNode(addr, command)
}(resultChan)
}
count := len(setting.configs)
count := len(configs)
res := 0
for ; count > 0; count-- {
res += <-resultChan

@ -1,3 +1,10 @@
/*
Soldier is responsible for receiving commands from commander and doing tasks such as starting nodes, uploading logs.
cd harmony-benchmark/bin
go build -o soldier ../aws-experiment-launch/experiment/soldier/main.go
./soldier -ip={node_ip} -port={node_port}
*/
package main
import (
@ -12,7 +19,6 @@ import (
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
@ -20,6 +26,7 @@ import (
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/soldier/s3"
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/utils"
"github.com/simple-rules/harmony-benchmark/configr"
globalUtils "github.com/simple-rules/harmony-benchmark/utils"
)
type soliderSetting struct {
@ -33,6 +40,8 @@ type sessionInfo struct {
commanderPort string
localConfigFileName string
logFolder string
configr *configr.Configr
myConfig configr.ConfigEntry
}
const (
@ -146,6 +155,9 @@ func handleInitCommand(args []string, w *bufio.Writer) {
utils.DownloadFile(globalSession.localConfigFileName, configURL)
log.Println("Successfully downloaded config")
globalSession.configr.ReadConfigFile(globalSession.localConfigFileName)
globalSession.myConfig = *globalSession.configr.GetMyConfigEntry(setting.ip, setting.port)
if err := runInstance(); err == nil {
logAndReply(w, "Done init.")
} else {
@ -165,10 +177,10 @@ func handleKillCommand(w *bufio.Writer) {
func killPort(port string) error {
if runtime.GOOS == "windows" {
command := fmt.Sprintf("(Get-NetTCPConnection -LocalPort %s).OwningProcess -Force", port)
return runCmd("Stop-Process", "-Id", command)
return globalUtils.RunCmd("Stop-Process", "-Id", command)
} else {
command := fmt.Sprintf("lsof -i tcp:%s | grep LISTEN | awk '{print $2}' | xargs kill -9", port)
return runCmd("bash", "-c", command)
return globalUtils.RunCmd("bash", "-c", command)
}
}
@ -269,61 +281,25 @@ func handleLog2Command(w *bufio.Writer) {
logAndReply(w, "Upload log done!")
}
func runCmd(name string, args ...string) error {
cmd := exec.Command(name, args...)
if err := cmd.Start(); err != nil {
log.Fatal(err)
return err
} else {
log.Println("Command running", name, args)
go func() {
if err = cmd.Wait(); err != nil {
log.Printf("Command finished with error: %v", err)
} else {
log.Printf("Command finished successfully")
}
}()
return nil
}
}
func runInstance() error {
config, _ := configr.ReadConfigFile(globalSession.localConfigFileName)
myConfig := getMyConfig(setting.ip, setting.port, &config)
os.MkdirAll(globalSession.logFolder, os.ModePerm)
if myConfig[2] == "client" {
if globalSession.myConfig.Role == "client" {
return runClient()
} else {
return runNode()
}
return runNode()
}
func runNode() error {
log.Println("running instance")
return runCmd("./benchmark", "-ip", setting.ip, "-port", setting.port, "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
return globalUtils.RunCmd("./benchmark", "-ip", setting.ip, "-port", setting.port, "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
}
func runClient() error {
log.Println("running client")
return runCmd("./txgen", "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
}
func getMyConfig(myIP string, myPort string, config *[][]string) []string {
for _, node := range *config {
ip, port := node[0], node[1]
if ip == myIP && port == myPort {
return node
}
}
return nil
return globalUtils.RunCmd("./txgen", "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
}
// cd harmony-benchmark
// go build -o soldier ../aws-experiment-launch/experiment/soldier/main.go
// ./soldier -ip=xx -port=xx
func main() {
ip := flag.String("ip", "127.0.0.1", "IP of the node.")
port := flag.String("port", "9000", "port of the node.")

@ -5,6 +5,7 @@ import (
"fmt"
"math/rand"
"os"
"strconv"
"time"
"github.com/simple-rules/harmony-benchmark/attack"
@ -12,8 +13,7 @@ import (
"github.com/simple-rules/harmony-benchmark/consensus"
"github.com/simple-rules/harmony-benchmark/log"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/shirou/gopsutil/process"
"github.com/simple-rules/harmony-benchmark/utils"
)
const (
@ -32,24 +32,10 @@ func attackDetermination(attackedMode int) bool {
return false
}
func logMemUsage(consensus *consensus.Consensus) {
p, _ := process.NewProcess(int32(os.Getpid()))
for {
info, _ := p.MemoryInfo()
memMap, _ := p.MemoryMaps(false)
log.Info("Mem Report", "info", info, "map", memMap)
time.Sleep(10 * time.Second)
}
}
// TODO: @ricl, start another process for reporting.
func logCPUUsage(consensus *consensus.Consensus) {
p, _ := process.NewProcess(int32(os.Getpid()))
for {
percent, _ := p.CPUPercent()
times, _ := p.Times()
log.Info("CPU Report", "percent", percent, "times", times, "consensus", consensus)
time.Sleep(10 * time.Second)
func startProfiler(shardID string, logFolder string) {
err := utils.RunCmd("./bin/profiler", "-pid", strconv.Itoa(os.Getpid()), "-shard_id", shardID, "-log_folder", logFolder)
if err != nil {
log.Error("Failed to start profiler")
}
}
@ -67,10 +53,11 @@ func main() {
// Attack determination.
attack.GetInstance().SetAttackEnabled(attackDetermination(*attackedMode))
config, _ := configr.ReadConfigFile(*configFile)
shardID := configr.GetShardID(*ip, *port, &config)
peers := configr.GetPeers(*ip, *port, shardID, &config)
leader := configr.GetLeader(shardID, &config)
configr := configr.NewConfigr()
configr.ReadConfigFile(*configFile)
shardID := configr.GetShardID(*ip, *port)
peers := configr.GetPeers(*ip, *port, shardID)
leader := configr.GetLeader(shardID)
var role string
if leader.Ip == *ip && leader.Port == *port {
@ -90,16 +77,18 @@ func main() {
// Consensus object.
consensus := consensus.NewConsensus(*ip, *port, shardID, peers, leader)
// Logging for consensus.
go logMemUsage(consensus)
go logCPUUsage(consensus)
// Start Profiler for leader
if role == "leader" {
startProfiler(shardID, *logFolder)
}
// Set logger to attack model.
attack.GetInstance().SetLogger(consensus.Log)
// Current node.
currentNode := node.New(consensus)
// Create client peer.
clientPeer := configr.GetClientPeer(&config)
clientPeer := configr.GetClientPeer()
// If there is a client configured in the node list.
if clientPeer != nil {
currentNode.ClientPeer = clientPeer

@ -176,8 +176,9 @@ func main() {
flag.Parse()
// Read the configs
config, _ := configr.ReadConfigFile(*configFile)
leaders, shardIDs := configr.GetLeadersAndShardIds(&config)
configr := configr.NewConfigr()
configr.ReadConfigFile(*configFile)
leaders, shardIDs := configr.GetLeadersAndShardIds()
// Do cross shard tx if there are more than one shard
setting.crossShard = len(shardIDs) > 1
@ -203,7 +204,7 @@ func main() {
}
// Client/txgenerator server node setup
clientPort := configr.GetClientPort(&config)
clientPort := configr.GetClientPort()
consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{})
clientNode := node.New(consensusObj)
@ -253,6 +254,6 @@ func main() {
// Send a stop message to stop the nodes at the end
msg := proto_node.ConstructStopMessage()
peers := append(configr.GetValidators(*configFile), leaders...)
peers := append(configr.GetValidators(), leaders...)
p2p.BroadcastMessage(peers, msg)
}

@ -254,8 +254,9 @@ func main() {
flag.Parse()
// Read the configs
config, _ := configr.ReadConfigFile(*configFile)
leaders, shardIds := configr.GetLeadersAndShardIds(&config)
configr := configr.NewConfigr()
configr.ReadConfigFile(*configFile)
leaders, shardIds := configr.GetLeadersAndShardIds()
setting.numOfAddress = 10000
// Do cross shard tx if there are more than one shard
@ -282,7 +283,7 @@ func main() {
}
// Client/txgenerator server node setup
clientPort := configr.GetClientPort(&config)
clientPort := configr.GetClientPort()
consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{})
clientNode := node.New(consensusObj)
@ -359,6 +360,6 @@ func main() {
// Send a stop message to stop the nodes at the end
msg := proto_node.ConstructStopMessage()
peers := append(configr.GetValidators(*configFile), leaders...)
peers := append(configr.GetValidators(), leaders...)
p2p.BroadcastMessage(peers, msg)
}

@ -13,125 +13,148 @@ import (
"github.com/simple-rules/harmony-benchmark/utils"
)
type ConfigEntry struct {
IP string
Port string
Role string
ShardID string
}
type Configr struct {
config []ConfigEntry
}
func NewConfigr() *Configr {
configr := Configr{}
return &configr
}
// Gets all the validator peers
func GetValidators(config string) []p2p.Peer {
file, _ := os.Open(config)
fscanner := bufio.NewScanner(file)
func (configr *Configr) GetValidators() []p2p.Peer {
var peerList []p2p.Peer
for fscanner.Scan() {
p := strings.Split(fscanner.Text(), " ")
ip, port, status := p[0], p[1], p[2]
if status == "leader" || status == "client" {
for _, entry := range configr.config {
if entry.Role != "validator" {
continue
}
peer := p2p.Peer{Port: port, Ip: ip}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
peerList = append(peerList, peer)
}
return peerList
}
// Gets all the leader peers and corresponding shard Ids
func GetLeadersAndShardIds(config *[][]string) ([]p2p.Peer, []uint32) {
func (configr *Configr) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) {
var peerList []p2p.Peer
var shardIds []uint32
for _, node := range *config {
ip, port, status, shardId := node[0], node[1], node[2], node[3]
if status == "leader" {
peerList = append(peerList, p2p.Peer{Ip: ip, Port: port})
val, err := strconv.Atoi(shardId)
var shardIDs []uint32
for _, entry := range configr.config {
if entry.Role == "leader" {
peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port})
val, err := strconv.Atoi(entry.ShardID)
if err == nil {
shardIds = append(shardIds, uint32(val))
shardIDs = append(shardIDs, uint32(val))
} else {
log.Print("[Generator] Error parsing the shard Id ", shardId)
log.Print("[Generator] Error parsing the shard Id ", entry.ShardID)
}
}
}
return peerList, shardIds
return peerList, shardIDs
}
func GetClientPeer(config *[][]string) *p2p.Peer {
for _, node := range *config {
ip, port, status := node[0], node[1], node[2]
if status != "client" {
func (configr *Configr) GetClientPeer() *p2p.Peer {
for _, entry := range configr.config {
if entry.Role != "client" {
continue
}
peer := p2p.Peer{Port: port, Ip: ip}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
return &peer
}
return nil
}
// Gets the port of the client node in the config
func GetClientPort(config *[][]string) string {
for _, node := range *config {
_, port, status, _ := node[0], node[1], node[2], node[3]
if status == "client" {
return port
func (configr *Configr) GetClientPort() string {
for _, entry := range configr.config {
if entry.Role == "client" {
return entry.Port
}
}
return ""
}
// Parse the config file and return a 2d array containing the file data
func (configr *Configr) ReadConfigFile(filename string) error {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Fatal("Failed to read config file ", filename)
return err
}
fscanner := bufio.NewScanner(file)
result := []ConfigEntry{}
for fscanner.Scan() {
p := strings.Split(fscanner.Text(), " ")
entry := ConfigEntry{p[0], p[1], p[2], p[3]}
result = append(result, entry)
}
log.Println(result)
configr.config = result
return nil
}
// GetShardID Gets the shard id of the node corresponding to this ip and port
func GetShardID(myIp, myPort string, config *[][]string) string {
for _, node := range *config {
ip, port, shardId := node[0], node[1], node[3]
if ip == myIp && port == myPort {
return shardId
func (configr *Configr) GetShardID(ip, port string) string {
for _, entry := range configr.config {
if entry.IP == ip && entry.Port == port {
return entry.ShardID
}
}
return "N/A"
}
// GetPeers Gets the peer list of the node corresponding to this ip and port
func GetPeers(myIp, myPort, myShardId string, config *[][]string) []p2p.Peer {
func (configr *Configr) GetPeers(ip, port, shardID string) []p2p.Peer {
var peerList []p2p.Peer
for _, node := range *config {
ip, port, status, shardId := node[0], node[1], node[2], node[3]
if status != "validator" || ip == myIp && port == myPort || myShardId != shardId {
for _, entry := range configr.config {
if entry.Role != "validator" || entry.IP == ip && entry.Port == port || entry.ShardID != shardID {
continue
}
// Get public key deterministically based on ip and port
peer := p2p.Peer{Port: port, Ip: ip}
priKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(utils.GetUniqueIdFromPeer(peer)))
peer.PubKey = pki.GetPublicKeyFromScalar(priKey)
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
setKey(&peer)
peerList = append(peerList, peer)
}
return peerList
}
// GetLeader Gets the leader of this shard id
func GetLeader(myShardId string, config *[][]string) p2p.Peer {
func (configr *Configr) GetLeader(shardID string) p2p.Peer {
var leaderPeer p2p.Peer
for _, node := range *config {
ip, port, status, shardId := node[0], node[1], node[2], node[3]
if status == "leader" && myShardId == shardId {
leaderPeer.Ip = ip
leaderPeer.Port = port
// Get public key deterministically based on ip and port
priKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(utils.GetUniqueIdFromPeer(leaderPeer))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)
leaderPeer.PubKey = pki.GetPublicKeyFromScalar(priKey)
for _, entry := range configr.config {
if entry.Role == "leader" && entry.ShardID == shardID {
leaderPeer.Ip = entry.IP
leaderPeer.Port = entry.Port
setKey(&leaderPeer)
}
}
return leaderPeer
}
// Parse the config file and return a 2d array containing the file data
func ReadConfigFile(filename string) ([][]string, error) {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Fatal("Failed to read config file ", filename)
return nil, err
}
fscanner := bufio.NewScanner(file)
func (configr *Configr) GetConfigEntries() []ConfigEntry {
return configr.config
}
result := [][]string{}
for fscanner.Scan() {
p := strings.Split(fscanner.Text(), " ")
result = append(result, p)
func (configr *Configr) GetMyConfigEntry(ip string, port string) *ConfigEntry {
for _, entry := range configr.config {
if entry.IP == ip && entry.Port == port {
return &entry
}
}
log.Println(result)
return result, nil
return nil
}
func setKey(peer *p2p.Peer) {
// Get public key deterministically based on ip and port
priKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(utils.GetUniqueIdFromPeer(*peer))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)
peer.PubKey = pki.GetPublicKeyFromScalar(priKey)
}

@ -8,6 +8,7 @@
# Also it's recommended to use `go build` for testing the whole exe.
go build -o bin/benchmark
go build -o bin/txgen client/txgen/main.go
go build -o bin/profiler profiler/main.go
# Create a tmp folder for logs
t=`date +"%Y%m%d-%H%M%S"`

@ -1,4 +1,4 @@
for pid in `/bin/ps -fu $USER| grep "benchmark\|txgen\|soldier\|commander" | grep -v "grep" | awk '{print $2}'`;
for pid in `/bin/ps -fu $USER| grep "benchmark\|txgen\|soldier\|commander\|profiler" | grep -v "grep" | awk '{print $2}'`;
do
echo 'Killed process: '$pid
kill -9 $pid

@ -0,0 +1,55 @@
package main
import (
"flag"
"fmt"
"time"
"github.com/shirou/gopsutil/process"
"github.com/simple-rules/harmony-benchmark/log"
)
type profilerSetting struct {
pid int32
shardID string
}
var (
setting profilerSetting
)
func logPerf() {
p, _ := process.NewProcess(setting.pid)
for {
// log mem usage
info, _ := p.MemoryInfo()
memMap, _ := p.MemoryMaps(false)
log.Info("Mem Report", "info", info, "map", memMap, "shardID", setting.shardID)
// log cpu usage
percent, _ := p.CPUPercent()
times, _ := p.Times()
log.Info("CPU Report", "percent", percent, "times", times, "shardID", setting.shardID)
time.Sleep(3 * time.Second)
}
}
func main() {
pid := flag.Int("pid", 0, "process id of the node")
shardID := flag.String("shard_id", "0", "the shard id of this node")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
flag.Parse()
setting.pid = int32(*pid)
setting.shardID = *shardID
logFileName := fmt.Sprintf("./%v/profiler-%v.log", *logFolder, *shardID)
h := log.MultiHandler(
log.StdoutHandler,
log.Must.FileHandler(logFileName, log.JSONFormat()), // Log to file
// log.Must.NetHandler("tcp", ":3000", log.JSONFormat()) // Log to remote
)
log.Root().SetHandler(h)
logPerf()
}

@ -5,6 +5,7 @@ go build -o bin/benchmark
go build -o bin/txgen client/txgen/main.go
go build -o bin/commander aws-experiment-launch/experiment/commander/main.go
go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go
go build -o bin/profiler profiler/main.go
cd bin
# Create a tmp folder for logs

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/binary"
"log"
"os/exec"
"regexp"
"strconv"
"strings"
@ -43,3 +44,22 @@ func GetUniqueIdFromPeer(peer p2p.Peer) uint16 {
value, _ := strconv.Atoi(socketId)
return uint16(value)
}
// RunCmd Runs command `name` with arguments `args`
func RunCmd(name string, args ...string) error {
cmd := exec.Command(name, args...)
if err := cmd.Start(); err != nil {
log.Fatal(err)
return err
}
log.Println("Command running", name, args)
go func() {
if err := cmd.Wait(); err != nil {
log.Printf("Command finished with error: %v", err)
} else {
log.Printf("Command finished successfully")
}
}()
return nil
}

Loading…
Cancel
Save