Richard Liu 6 years ago
commit 6500839c1a
  1. 2
      aws-experiment-launch/create_deploy_pipeline.sh
  2. 4
      aws-experiment-launch/create_instances.py
  3. 9
      aws-experiment-launch/get_leader_tps_average.sh
  4. 96
      aws-experiment-launch/report_extractor.py
  5. 27
      benchmark.go

@ -10,7 +10,7 @@ CLIENT_NUM=$3
echo "Creating $INSTANCE_NUM instances at 8 regions"
python create_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM
sleep 10
sleep 40
echo "Rung collecint raw ips"
python collect_public_ips.py --instance_output instance_output.txt

@ -34,7 +34,7 @@ def create_instances(config, ec2_client, region_number, number_of_instances, tag
available_zone = utils.get_one_availability_zone(ec2_client)
LOGGER.info("Looking at zone %s to create instances." % available_zone)
time.sleep(10)
time.sleep(2)
ec2_client.run_instances(
MinCount=number_of_instances,
MaxCount=number_of_instances,
@ -61,7 +61,7 @@ def create_instances(config, ec2_client, region_number, number_of_instances, tag
},
],
)
time.sleep(10)
time.sleep(30)
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag)
LOGGER.info("Waiting for all %d instances in region %s with node_name_tag %s to be in RUNNING" % (
len(instance_ids), region_number, node_name_tag))

@ -1,9 +0,0 @@
if [ $# -eq 0 ]; then
echo "Please the directory of the log"
exit 1
fi
DIR=$1
for file in $(ls $DIR/*leader*)
do
cat $file | egrep -o "TPS=[0-9]+" | cut -f2 -d "=" | awk '{ sum += $1; n++ } END { if (n > 0) print sum / n; }';
done

@ -0,0 +1,96 @@
import json
import sys
import os
import argparse
def formatFloat(v):
return "%.2f" % v
def formatPercent(v):
return formatFloat(v) + "%"
def formatMem(v):
return formatFloat(float(v) / 10**6) + "MB"
class Profiler:
def __init__(self):
self.tps = 0
self.tps_max = 0
self.tps_min = sys.maxsize
self.tps_count = 0
self.cpu_percent = 0
self.cpu_usr = 0
self.cpu_sys = 0
self.cpu_count = 0
self.mem_rss = 0
self.mem_rss_max = 0
self.mem_count = 0
def handleTPS(self, obj):
tps = obj["TPS"]
self.tps += tps
self.tps_max = max(self.tps_max, tps)
self.tps_min = min(self.tps_min, tps)
self.tps_count += 1
def handleCPU(self, obj):
# http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_times
# https://stackoverflow.com/questions/556405/what-do-real-user-and-sys-mean-in-the-output-of-time1
# http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_percent
self.cpu_percent += obj["percent"]
times = json.loads(obj["times"])
self.cpu_usr = times["user"]
self.cpu_sys = times["system"]
self.cpu_count += 1
def handleMem(self, obj):
# http://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
info = json.loads(obj["info"])
rss = info["rss"]
self.mem_rss += rss
self.mem_rss_max = max(self.mem_rss_max, rss)
self.mem_count += 1
def report(self):
print("TPS",
"Avg", formatFloat(self.tps / self.tps_count),
"Min", formatFloat(self.tps_min),
"Max", formatFloat(self.tps_max))
print("CPU",
"Percent (Avg)", formatPercent(self.cpu_percent / self.cpu_count),
"Time (Usr)", str(self.cpu_usr) + "s",
"Time (Sys)", str(self.cpu_sys) + "s")
print("Mem",
"RSS (Max)", formatMem(self.mem_rss_max),
"RSS (Avg)", formatMem(self.mem_rss / self.mem_count))
def profileFile(path):
print(path)
profiler = Profiler()
with open(path) as f:
for line in f:
obj = json.loads(line)
if obj["lvl"] != "info":
continue
if obj["msg"] == "TPS Report":
profiler.handleTPS(obj)
elif obj["msg"] == "CPU Report":
profiler.handleCPU(obj)
elif obj["msg"] == "Mem Report":
profiler.handleMem(obj)
profiler.report()
# Example: python report_extractor.py --folder ../tmp_log/log-20180713-205431
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This script extracts reports from log files")
parser.add_argument("--folder", type=str, dest="folder",
default="",
help="the path to the log folder")
args = parser.parse_args()
for filename in os.listdir(args.folder):
if "leader" in filename:
profileFile(os.path.join(args.folder, filename))

@ -9,12 +9,12 @@ import (
"harmony-benchmark/log"
"harmony-benchmark/node"
"harmony-benchmark/p2p"
"harmony-benchmark/utils"
"math/rand"
"os"
"runtime"
"strings"
"time"
"github.com/shirou/gopsutil/process"
)
const (
@ -93,11 +93,22 @@ func attackDetermination(attackedMode int) bool {
}
func logMemUsage(consensus *consensus.Consensus) {
p, _ := process.NewProcess(int32(os.Getpid()))
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
log.Info("Mem Report", "Alloc", utils.BToMb(m.Alloc), "TotalAlloc", utils.BToMb(m.TotalAlloc),
"Sys", utils.BToMb(m.Sys), "NumGC", m.NumGC, "consensus", consensus)
info, _ := p.MemoryInfo()
memMap, _ := p.MemoryMaps(false)
log.Info("Mem Report", "info", info, "map", memMap)
time.Sleep(10 * time.Second)
}
}
// TODO: @ricl, start another process for reporting.
func logCPUUsage(consensus *consensus.Consensus) {
p, _ := process.NewProcess(int32(os.Getpid()))
for {
percent, _ := p.CPUPercent()
times, _ := p.Times()
log.Info("CPU Report", "percent", percent, "times", times, "consensus", consensus)
time.Sleep(10 * time.Second)
}
}
@ -132,7 +143,7 @@ func main() {
logFileName := fmt.Sprintf("./%v/%s-%v-%v.log", *logFolder, role, *ip, *port)
h := log.MultiHandler(
log.StdoutHandler,
log.Must.FileHandler(logFileName, log.LogfmtFormat()), // Log to file
log.Must.FileHandler(logFileName, log.JSONFormat()), // Log to file
// log.Must.NetHandler("tcp", ":3000", log.JSONFormat()) // Log to remote
)
log.Root().SetHandler(h)
@ -141,6 +152,8 @@ func main() {
consensus := consensus.NewConsensus(*ip, *port, shardID, peers, leader)
// Logging for consensus.
go logMemUsage(consensus)
go logCPUUsage(consensus)
// Set logger to attack model.
attack.GetInstance().SetLogger(consensus.Log)
// Current node.

Loading…
Cancel
Save