pull/69/head
Rongjian Lan 6 years ago
commit d91ca0c27b
  1. 4
      .travis.yml
  2. 1
      aws-experiment-launch/cal_tps.sh
  3. 35
      aws-experiment-launch/collect_public_ips.py
  4. 62
      aws-experiment-launch/commander_prepare.py
  5. 11
      aws-experiment-launch/configs/userdata-commander.sh
  6. 20
      aws-experiment-launch/configs/userdata-soldier.sh
  7. 8
      aws-experiment-launch/configuration-git.txt
  8. 8
      aws-experiment-launch/configuration.txt
  9. 28
      aws-experiment-launch/create_deploy_soldiers.sh
  10. 193
      aws-experiment-launch/create_solider_instances.py
  11. 8
      aws-experiment-launch/download_log_from_commander.sh
  12. 17
      aws-experiment-launch/download_log_from_leaders.sh
  13. 233
      aws-experiment-launch/experiment/commander/main.go
  14. 312
      aws-experiment-launch/experiment/soldier/main.go
  15. 93
      aws-experiment-launch/experiment/soldier/s3/s3.go
  16. 30
      aws-experiment-launch/experiment/utils/utils.go
  17. 37
      aws-experiment-launch/generate_distribution_config.py
  18. 1
      aws-experiment-launch/logs_download.sh
  19. 96
      aws-experiment-launch/report_extractor.py
  20. 18
      aws-experiment-launch/spot-instance/request-spot.sh
  21. 10
      aws-experiment-launch/spot-instance/run-instances.sh
  22. 105
      aws-experiment-launch/terminate_instances.py
  23. 15
      aws-experiment-launch/upload_binaries.py
  24. 10
      aws-experiment-launch/upload_config.py
  25. 9
      aws-experiment-launch/upload_s3.py
  26. 11
      aws-experiment-launch/userdata-commander.sh
  27. 31
      aws-experiment-launch/userdata-soldier.sh
  28. 0
      aws-experiment-launch/utils/__init__.py
  29. 8
      aws-experiment-launch/utils/configuration.txt
  30. 46
      aws-experiment-launch/utils/launch_template.py
  31. 9
      aws-experiment-launch/utils/logger.py
  32. 119
      aws-experiment-launch/utils/spot_fleet.py
  33. 210
      aws-experiment-launch/utils/utils.py
  34. 31
      aws-experiment-launch/utils/utils_test.py
  35. 5
      aws-scripts/kill_node.sh
  36. 34
      aws-scripts/parse_json.py
  37. 26
      aws-scripts/preprocess_peerlist.py
  38. 34
      aws-scripts/run_instance.sh
  39. 1
      aws-scripts/say_bye.sh
  40. 1
      aws-scripts/say_hello.sh
  41. 43
      aws-scripts/setup.sh
  42. 1
      aws-scripts/setup_instances.sh
  43. 177
      azure/launchvm.py
  44. 2
      benchmark.go
  45. 12
      client/btctxgen/main.go
  46. 99
      client/config/config.go
  47. 12
      client/txgen/main.go
  48. 7
      client/wallet/main.go
  49. 159
      configr/main.go
  50. 7
      consensus/consensus_leader.go
  51. 7
      deploy.sh
  52. 7
      go_executable_build.sh
  53. 5
      identitychain/identityblock.go
  54. 81
      identitychain/identitychain.go
  55. 2
      run_experiment.sh
  56. 3
      runwait/run_wait.go

@ -10,5 +10,5 @@ install:
- ./.travis.gofmt.sh
- go build -v ./...
notifications:
slack:
secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY=
slack: harmonyone:gggCd1QQopsQAW8JYgBWiH7M
# secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY=

@ -1 +0,0 @@
for file in $(ls *leader*); do echo $file; cat $file | grep TPS | head -n 2 | cut -f2 -d ":" | cut -f1 -d "," | awk '{ sum += $1; n++ } END { if (n > 0) print sum / n; }'; done

@ -1,35 +0,0 @@
import argparse
import os
import random
import sys
from utils import utils
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to collect public ips')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt',
help='the file contains node_name_tag and region number of created instances.')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--file_output', type=str,
dest='file_output', default='raw_ip.txt')
args = parser.parse_args()
if not args.instance_output or not os.path.isfile(args.instance_output):
print "%s or %s are not existed" % (args.file_output, args.instance_output)
sys.exit(1)
if args.instance_output:
with open(args.instance_output, "r") as fin, open(args.file_output, "w") as fout:
total_ips = []
node_name_tag_list = []
for line in fin.readlines():
items = line.split(" ")
region_number = items[1].strip()
node_name_tag = items[0].strip()
ip_list = utils.collect_public_ips(region_number, node_name_tag, args.region_config)
total_ips.extend([(ip, node_name_tag) for ip in ip_list])
random.shuffle(total_ips)
for tuple in total_ips:
fout.write(tuple[0] + " " + tuple[1] + "\n")
print "Done collecting public ips %s" % args.file_output

@ -1,62 +0,0 @@
import argparse
import logging
import os
import stat
import sys
from utils import utils
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
PEMS = [
"virginia-key-benchmark.pem",
"ohio-key-benchmark.pem",
"california-key-benchmark.pem",
"oregon-key-benchmark.pem",
"tokyo-key-benchmark.pem",
"singapore-key-benchmark.pem",
"frankfurt-key-benchmark.pem",
"ireland-key-benchmark.pem",
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config')
parser.add_argument('--distribution_config', type=str,
dest='distribution_config', default='distribution_config.txt')
parser.add_argument('--commander_logging', type=str,
dest='commander_logging', default='commander_logging.sh')
args = parser.parse_args()
if not os.path.exists(args.distribution_config):
sys.exit(1)
with open(args.distribution_config, "r") as fin:
lines = fin.readlines()
commander_address = None
commander_region = None
commander_output = None
with open(args.distribution_config, "w") as fout:
for line in lines:
if "commander" in line:
items = [item.strip() for item in line.split(" ")]
commander_address = items[0]
commander_region = int(items[4][0])
commander_output = "\n".join(items)
else:
fout.write(line.strip() + "\n")
LOGGER.info("Generated %s" % args.distribution_config)
if not commander_address or not commander_region:
LOGGER.info("Failed to extract commander address and commander region.")
sys.exit(1)
with open(args.commander_logging, "w") as fout:
fout.write("ssh -i ./keys/%s ec2-user@%s\n" % (PEMS[commander_region - 1], commander_address))
st = os.stat(args.commander_logging)
os.chmod(args.commander_logging, st.st_mode | stat.S_IEXEC)
LOGGER.info("Generated %s" % args.commander_logging)
LOGGER.info("DONE.")

@ -1,11 +0,0 @@
#!/bin/bash -x
REGION=$(curl 169.254.169.254/latest/meta-data/placement/availability-zone/ | sed 's/[a-z]$//')
#yum update -y #This breaking codedeploy right now
yum install ruby wget -y
cd /home/ec2-user
touch yum-not-updated.txt
wget https://aws-codedeploy-$REGION.s3.amazonaws.com/latest/install
chmod +x ./install
./install auto
mkdir projects
mkdir projects/src

@ -1,20 +0,0 @@
#!/bin/bash
cd /home/ec2-user
commanderIP= # <- Put the commander IP here.
curl http://$commanderIP:8080/soldier -o soldier
chmod +x ./soldier
curl http://$commanderIP:8080/benchmark -o benchmark
chmod +x ./benchmark
curl http://$commanderIP:8080/txgen -o txgen
chmod +x ./txgen
# Get My IP
ip=`curl http://169.254.169.254/latest/meta-data/public-ipv4`
node_port=9000
soldier_port=1$node_port
# Kill existing soldier
fuser -k -n tcp $soldier_port
# Run soldier
./soldier -ip $ip -port $node_port > soldier_log 2>&1 &

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-97785bed,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-f63b1193,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-824c4ee2,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-f2d3638a,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-ceafcba8,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-68097514,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-5652ce39,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-d834aba1,sg-0aa8954acb79fdb58

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58

@ -1,28 +0,0 @@
if [ $# -lt 3 ]; then
echo "Please provide # of instances, # of shards, # of clients"
exit 1
fi
INSTANCE_NUM=$1
SHARD_NUM=$2
CLIENT_NUM=$3
SLEEP_TIME=10
echo "Creating $INSTANCE_NUM instances at 8 regions"
python create_solider_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM
echo "Sleep for $SLEEP_TIME seconds"
sleep $SLEEP_TIME
echo "Rung collecting raw ips"
python collect_public_ips.py --instance_output instance_output.txt
# sleep 10
echo "Generate distribution_config"
python generate_distribution_config.py --ip_list_file raw_ip.txt --shard_number $SHARD_NUM --client_number $CLIENT_NUM
echo "Run commander prepare"
python commander_prepare.py
aws s3 cp distribution_config.txt s3://unique-bucket-bin/distribution_config.txt --acl public-read-write

@ -1,193 +0,0 @@
import argparse
import base64
import boto3
import datetime
import json
import sys
import threading
import time
import enum
#TODO REMOVE UTILS
from utils import utils, spot_fleet, logger
LOGGER = logger.getLogger(__file__)
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
class InstanceResource(enum.Enum):
ON_DEMAND = 'ondemand'
SPOT_INSTANCE = 'spot'
SPOT_FLEET = 'fleet'
def __str__(self):
return self.value
def run_one_region_on_demand_instances(config, region_number, number_of_instances, tag):
ec2_client = utils.create_client(config, region_number)
node_name_tag = create_instances(
config, ec2_client, region_number, number_of_instances, tag)
LOGGER.info("Created %s in region %s" % (node_name_tag, region_number))
return node_name_tag, ec2_client
def create_instances(config, ec2_client, region_number, number_of_instances, tag):
node_name_tag = utils.get_node_name_tag2(region_number, tag)
LOGGER.info("Creating node_name_tag: %s" % node_name_tag)
available_zone = utils.get_one_availability_zone(ec2_client)
LOGGER.info("Looking at zone %s to create instances." % available_zone)
time.sleep(2)
ec2_client.run_instances(
MinCount=number_of_instances,
MaxCount=number_of_instances,
ImageId=config[region_number][utils.REGION_AMI],
Placement={
'AvailabilityZone': available_zone,
},
SecurityGroups=[config[region_number][utils.REGION_SECURITY_GROUP]],
IamInstanceProfile={
'Name': utils.IAM_INSTANCE_PROFILE
},
KeyName=config[region_number][utils.REGION_KEY],
UserData=utils.USER_DATA,
InstanceType=utils.INSTANCE_TYPE,
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': node_name_tag
},
]
},
],
)
retry_count = 10
while retry_count > 0:
try:
time.sleep(20)
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag)
LOGGER.info("Waiting for all %d instances in region %s with node_name_tag %s to be in RUNNING" % (
len(instance_ids), region_number, node_name_tag))
break
except:
retry_count -= 1
LOGGER.info("Failed to get instance ids. Retry again.")
retry_count = 10
while retry_count > 0:
try:
time.sleep(20)
waiter = ec2_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
break
except:
retry_count -= 1
LOGGER.info("Failed to wait.")
retry_count = 10
while retry_count > 0:
time.sleep(20)
LOGGER.info("Waiting ...")
ip_list = utils.collect_public_ips_from_ec2_client(
ec2_client, node_name_tag)
if len(ip_list) == number_of_instances:
LOGGER.info("Created %d instances" % number_of_instances)
return node_name_tag
retry_count -= 10
LOGGER.info("Can not get %d instances" % number_of_instances)
return node_name_tag
LOCK_FOR_RUN_ONE_REGION = threading.Lock()
def run_for_one_region_on_demand(config, region_number, number_of_instances, fout, fout2):
tag = 0
number_of_instances = int(number_of_instances)
while number_of_instances > 0:
number_of_creation = min(utils.MAX_INSTANCES_FOR_DEPLOYMENT, number_of_instances)
node_name_tag, ec2_client = run_one_region_on_demand_instances(
config, region_number, number_of_creation, tag)
if node_name_tag:
LOGGER.info("Managed to create instances for region %s with name_name_tag %s" %
(region_number, node_name_tag))
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag)
LOCK_FOR_RUN_ONE_REGION.acquire()
try:
fout.write("%s %s\n" % (node_name_tag, region_number))
for instance_id in instance_ids:
fout2.write(instance_id + " " + node_name_tag + " " + region_number +
" " + config[region_number][utils.REGION_NAME] + "\n")
finally:
LOCK_FOR_RUN_ONE_REGION.release()
else:
LOGGER.info("Failed to create instances for region %s" % region_number)
number_of_instances -= number_of_creation
tag += 1
def read_region_config(region_config='configuration.txt'):
global CONFIG
config = {}
with open(region_config, 'r') as f:
for myline in f:
mylist = [item.strip() for item in myline.strip().split(',')]
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
CONFIG = config
return config
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='This script helps you start instances across multiple regions')
parser.add_argument('--regions', type=str, dest='regions',
default='3', help="Supply a csv list of all regions")
parser.add_argument('--instances', type=str, dest='num_instance_list',
default='1', help='number of instances in respective of region')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt', help='the file to append or write')
parser.add_argument('--instance_ids_output', type=str, dest='instance_ids_output',
default='instance_ids_output.txt', help='the file to append or write')
parser.add_argument('--instance_resource', dest='instance_resource', type=InstanceResource,
default=InstanceResource.ON_DEMAND, choices=list(InstanceResource))
parser.add_argument('--append', dest='append', type=bool, default=False,
help='append to the current instance_output')
args = parser.parse_args()
config = read_region_config(args.region_config)
region_list = args.regions.split(',')
num_instance_list = args.num_instance_list.split(',')
instance_resource = args.instance_resource
assert len(region_list) == len(num_instance_list), "number of regions: %d != number of instances per region: %d" % (
len(region_list), len(num_instance_list))
write_mode = "a" if args.append else "w"
with open(args.instance_output, write_mode) as fout, open(args.instance_ids_output, write_mode) as fout2:
thread_pool = []
for i in range(len(region_list)):
region_number = region_list[i]
number_of_instances = num_instance_list[i]
if instance_resource == InstanceResource.ON_DEMAND:
t = threading.Thread(target=run_for_one_region_on_demand, args=(
config, region_number, number_of_instances, fout, fout2))
elif instance_resource == InstanceResource.SPOT_FLEET:
t = threading.Thread(target=spot_fleet.run_one_region, args=(
region_number, number_of_instances, fout, fout2))
LOGGER.info("creating thread for region %s" % region_number)
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")

@ -1,8 +0,0 @@
# Change the commander address
if [ $# -eq 0 ]; then
echo "Please provide ip address of the commander"
exit 1
fi
ADDRESS=$1
mkdir -p ./tmp
scp -r -i "california-key-benchmark.pem" ec2-user@$ADDRESS:~/projects/src/harmony-benchmark/bin/upload ./tmp/

@ -1,17 +0,0 @@
# Make sure to have all keys with mode 600 at harmony-benchmark directory.
IFS=$'\n'
rm -rf ./tmp
mkdir tmp
for address in $(cat ./leader_addresses.txt)
do
echo "trying to download from address $address"
mkdir -p tmp/$address
scp -r -o "StrictHostKeyChecking no" -i ../keys/california-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/frankfurt-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/ireland-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/ohio-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/oregon-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/singapore-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/tokyo-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/virginia-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
done

@ -1,233 +0,0 @@
/*
Commander has two modes to setup configuration: Local and S3.
Local Config Mode
The Default Mode.
Add `-mode local` or omit `-mode` to enter local config mode. In this mode, the `commander` will host the config file `config.txt` on the commander machine and `solider`s will download the config file from `http://{commander_ip}:{commander_port}/distribution_config.txt`.
Remote Config Mode
Add `-mode remote` to enter remote config mode. In this mode, the `soldier`s will download the config file from a remote URL (use `-config_url {url}` to set the URL).
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"strings"
"time"
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/utils"
"github.com/simple-rules/harmony-benchmark/configr"
)
type commanderSetting struct {
ip string
port string
mode string
// Options in s3 mode
configURL string
configr *configr.Configr
}
type sessionInfo struct {
id string
uploadFolder string
}
var (
setting commanderSetting
session sessionInfo
)
const (
DistributionFileName = "distribution_config.txt"
DefaultConfigUrl = "https://s3-us-west-2.amazonaws.com/unique-bucket-bin/distribution_config.txt"
)
func handleCommand(command string) {
args := strings.Split(command, " ")
if len(args) <= 0 {
return
}
switch cmd := args[0]; cmd {
case "config":
if setting.mode == "s3" {
// In s3 mode, download the config file from configURL first.
if err := utils.DownloadFile(DistributionFileName, setting.configURL); err != nil {
panic(err)
}
}
err := setting.configr.ReadConfigFile(DistributionFileName)
if err == nil {
log.Printf("The loaded config has %v nodes\n", len(setting.configr.GetConfigEntries()))
} else {
log.Println("Failed to read config file")
}
case "init":
session.id = time.Now().Format("150405-20060102")
// create upload folder
session.uploadFolder = fmt.Sprintf("upload/%s", session.id)
err := os.MkdirAll(session.uploadFolder, os.ModePerm)
if err != nil {
log.Println("Failed to create upload folder", session.uploadFolder)
return
}
log.Println("New session", session.id)
dictateNodes(fmt.Sprintf("init %v %v %v %v", setting.ip, setting.port, setting.configURL, session.id))
case "ping", "kill", "log", "log2":
dictateNodes(command)
default:
log.Println("Unknown command")
}
}
func config(ip string, port string, mode string, configURL string) {
setting.ip = ip
setting.port = port
setting.mode = mode
if mode == "local" {
setting.configURL = fmt.Sprintf("http://%s:%s/%s", ip, port, DistributionFileName)
} else {
setting.configURL = configURL
}
setting.configr = configr.NewConfigr()
}
func dictateNodes(command string) {
resultChan := make(chan int)
configs := setting.configr.GetConfigEntries()
for _, entry := range configs {
port := "1" + entry.Port // the port number of solider is "1" + node port
addr := strings.Join([]string{entry.IP, port}, ":")
go func(resultChan chan int) {
resultChan <- dictateNode(addr, command)
}(resultChan)
}
count := len(configs)
res := 0
for ; count > 0; count-- {
res += <-resultChan
}
log.Printf("Finished %s with %v nodes\n", command, res)
}
func dictateNode(addr string, command string) int {
// creates client
conn, err := net.DialTimeout("tcp", addr, 5*time.Second)
if err != nil {
log.Println(err)
return 0
}
defer conn.Close()
// send command
_, err = conn.Write([]byte(command))
if err != nil {
log.Printf("Failed to send command to %s", addr)
return 0
}
// log.Printf("Send \"%s\" to %s", command, addr)
// read response
buff := make([]byte, 1024)
if n, err := conn.Read(buff); err == nil {
received := string(buff[:n])
// log.Printf("Receive from %s: %s", addr, buff[:n])
if strings.Contains(received, "Failed") {
return 0
} else {
return 1
}
}
return 0
}
func handleUploadRequest(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
// reject non-post requests
jsonResponse(w, http.StatusBadRequest, "Only post request is accepted.")
return
}
reader, err := r.MultipartReader()
if err != nil {
jsonResponse(w, http.StatusBadRequest, err.Error())
return
}
for {
part, err := reader.NextPart()
if err == io.EOF {
break
}
dst, err := os.Create(fmt.Sprintf("%s/%s", session.uploadFolder, part.FileName()))
log.Println(part.FileName())
if err != nil {
jsonResponse(w, http.StatusInternalServerError, err.Error())
return
}
defer dst.Close()
if _, err := io.Copy(dst, part); err != nil {
jsonResponse(w, http.StatusInternalServerError, err.Error())
return
}
}
}
func jsonResponse(w http.ResponseWriter, code int, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
fmt.Fprint(w, message)
log.Println(message)
}
func serve() {
if setting.mode == "local" {
// Only host config file if in local mode
http.Handle("/", http.FileServer(http.Dir("./")))
}
http.HandleFunc("/upload", handleUploadRequest)
err := http.ListenAndServe(":"+setting.port, nil)
if err != nil {
log.Fatalf("Failed to setup server! Error: %s", err.Error())
}
log.Printf("Start to host upload endpoint at http://%s:%s/upload\n", setting.ip, setting.port)
}
func main() {
ip := flag.String("ip", "127.0.0.1", "The ip of commander, i.e. this machine")
port := flag.String("port", "8080", "The port which the commander uses to communicate with soldiers")
mode := flag.String("mode", "local", "The config mode, local or s3")
configURL := flag.String("config_url", DefaultConfigUrl, "The config URL")
flag.Parse()
config(*ip, *port, *mode, *configURL)
go serve()
scanner := bufio.NewScanner(os.Stdin)
for true {
log.Printf("Listening to Your Command:")
if !scanner.Scan() {
break
}
handleCommand(scanner.Text())
}
}

@ -1,312 +0,0 @@
/*
Soldier is responsible for receiving commands from commander and doing tasks such as starting nodes, uploading logs.
cd harmony-benchmark/bin
go build -o soldier ../aws-experiment-launch/experiment/soldier/main.go
./soldier -ip={node_ip} -port={node_port}
*/
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/soldier/s3"
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/utils"
"github.com/simple-rules/harmony-benchmark/configr"
globalUtils "github.com/simple-rules/harmony-benchmark/utils"
)
type soliderSetting struct {
ip string
port string
}
type sessionInfo struct {
id string
commanderIP string
commanderPort string
localConfigFileName string
logFolder string
configr *configr.Configr
myConfig configr.ConfigEntry
}
const (
bucketName = "richard-bucket-test"
logFolderPrefix = "../tmp_log/"
)
var (
setting soliderSetting
globalSession sessionInfo
)
func socketServer() {
soldierPort := "1" + setting.port // the soldier port is "1" + node port
listen, err := net.Listen("tcp4", ":"+soldierPort)
if err != nil {
log.Fatalf("Socket listen port %s failed,%s", soldierPort, err)
os.Exit(1)
}
defer listen.Close()
log.Printf("Begin listen for command on port: %s", soldierPort)
for {
conn, err := listen.Accept()
if err != nil {
log.Fatalln(err)
continue
}
go handler(conn)
}
}
func handler(conn net.Conn) {
defer conn.Close()
var (
buf = make([]byte, 1024)
r = bufio.NewReader(conn)
w = bufio.NewWriter(conn)
)
ILOOP:
for {
n, err := r.Read(buf)
data := string(buf[:n])
switch err {
case io.EOF:
break ILOOP
case nil:
log.Println("Received command", data)
handleCommand(data, w)
log.Println("Waiting for new command...")
default:
log.Fatalf("Receive data failed:%s", err)
return
}
}
}
func handleCommand(command string, w *bufio.Writer) {
args := strings.Split(command, " ")
if len(args) <= 0 {
return
}
switch command := args[0]; command {
case "ping":
{
handlePingCommand(w)
}
case "init":
{
handleInitCommand(args[1:], w)
}
case "kill":
{
handleKillCommand(w)
}
case "log":
{
handleLogCommand(w)
}
case "log2":
{
handleLog2Command(w)
}
}
}
func handleInitCommand(args []string, w *bufio.Writer) {
// init ip port config_file sessionID
log.Println("Init command", args)
// read arguments
ip := args[0]
globalSession.commanderIP = ip
port := args[1]
globalSession.commanderPort = port
configURL := args[2]
sessionID := args[3]
globalSession.id = sessionID
globalSession.logFolder = fmt.Sprintf("%slog-%v", logFolderPrefix, sessionID)
// create local config file
globalSession.localConfigFileName = fmt.Sprintf("node_config_%v_%v.txt", setting.port, globalSession.id)
utils.DownloadFile(globalSession.localConfigFileName, configURL)
log.Println("Successfully downloaded config")
globalSession.configr.ReadConfigFile(globalSession.localConfigFileName)
globalSession.myConfig = *globalSession.configr.GetMyConfigEntry(setting.ip, setting.port)
if err := runInstance(); err == nil {
logAndReply(w, "Done init.")
} else {
logAndReply(w, "Failed.")
}
}
func handleKillCommand(w *bufio.Writer) {
log.Println("Kill command")
if err := killPort(setting.port); err == nil {
logAndReply(w, "Done kill.")
} else {
logAndReply(w, "Failed.")
}
}
func killPort(port string) error {
if runtime.GOOS == "windows" {
command := fmt.Sprintf("(Get-NetTCPConnection -LocalPort %s).OwningProcess -Force", port)
return globalUtils.RunCmd("Stop-Process", "-Id", command)
} else {
command := fmt.Sprintf("lsof -i tcp:%s | grep LISTEN | awk '{print $2}' | xargs kill -9", port)
return globalUtils.RunCmd("bash", "-c", command)
}
}
func handlePingCommand(w *bufio.Writer) {
log.Println("Ping command")
logAndReply(w, "I'm alive")
}
func handleLogCommand(w *bufio.Writer) {
log.Println("Log command")
files, err := ioutil.ReadDir(globalSession.logFolder)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to read log folder. Error: %s", err.Error()))
return
}
filePaths := make([]string, len(files))
for i, f := range files {
filePaths[i] = fmt.Sprintf("%s/%s", globalSession.logFolder, f.Name())
}
req, err := newUploadFileRequest(
fmt.Sprintf("http://%s:%s/upload", globalSession.commanderIP, globalSession.commanderPort),
"file",
filePaths,
nil)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to create upload request. Error: %s", err.Error()))
return
}
client := &http.Client{}
_, err = client.Do(req)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to upload log. Error: %s", err.Error()))
return
}
logAndReply(w, "Upload log done!")
}
// Creates a new file upload http request with optional extra params
func newUploadFileRequest(uri string, paramName string, paths []string, params map[string]string) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for _, path := range paths {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
log.Printf(path)
}
for key, val := range params {
_ = writer.WriteField(key, val)
}
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", uri, body)
req.Header.Set("Content-Type", writer.FormDataContentType())
return req, err
}
func logAndReply(w *bufio.Writer, message string) {
log.Println(message)
w.Write([]byte(message))
w.Flush()
}
func handleLog2Command(w *bufio.Writer) {
log.Println("Log command")
files, err := ioutil.ReadDir(globalSession.logFolder)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to create log folder. Error: %s", err.Error()))
return
}
filePaths := make([]string, len(files))
for i, f := range files {
filePaths[i] = fmt.Sprintf("%s/%s", globalSession.logFolder, f.Name())
}
// TODO: currently only upload the first file.
_, err = s3.UploadFile(bucketName, filePaths[0], strings.Replace(filePaths[0], logFolderPrefix, "", 1))
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to create upload request. Error: %s", err.Error()))
return
}
logAndReply(w, "Upload log done!")
}
func runInstance() error {
os.MkdirAll(globalSession.logFolder, os.ModePerm)
if globalSession.myConfig.Role == "client" {
return runClient()
}
return runNode()
}
func runNode() error {
log.Println("running instance")
return globalUtils.RunCmd("./benchmark", "-ip", setting.ip, "-port", setting.port, "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
}
func runClient() error {
log.Println("running client")
return globalUtils.RunCmd("./txgen", "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
}
func main() {
ip := flag.String("ip", "127.0.0.1", "IP of the node.")
port := flag.String("port", "9000", "port of the node.")
flag.Parse()
setting.ip = *ip
setting.port = *port
socketServer()
}

@ -1,93 +0,0 @@
package s3
import (
"fmt"
"log"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
func createSession() *session.Session {
return session.Must(session.NewSession(&aws.Config{
Region: aws.String(endpoints.UsWest2RegionID),
MaxRetries: aws.Int(3),
}))
}
func CreateBucket(bucketName string, region string) {
sess := createSession()
svc := s3.New(sess)
input := &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
CreateBucketConfiguration: &s3.CreateBucketConfiguration{
LocationConstraint: aws.String(region),
},
}
result, err := svc.CreateBucket(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeBucketAlreadyExists:
fmt.Println(s3.ErrCodeBucketAlreadyExists, aerr.Error())
case s3.ErrCodeBucketAlreadyOwnedByYou:
fmt.Println(s3.ErrCodeBucketAlreadyOwnedByYou, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
func UploadFile(bucketName string, fileName string, key string) (result *s3manager.UploadOutput, err error) {
sess := createSession()
uploader := s3manager.NewUploader(sess)
f, err := os.Open(fileName)
if err != nil {
log.Println("Failed to open file", err)
return nil, err
}
// Upload the file to S3.
result, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: f,
})
if err != nil {
log.Println("failed to upload file", err)
return nil, err
}
fmt.Printf("file uploaded to, %s\n", result.Location)
return result, nil
}
func DownloadFile(bucketName string, fileName string, key string) (n int64, err error) {
sess := createSession()
downloader := s3manager.NewDownloader(sess)
f, err := os.Create(fileName)
if err != nil {
return
}
n, err = downloader.Download(f, &s3.GetObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
return
}

@ -1,30 +0,0 @@
package utils
import (
"io"
"net/http"
"os"
)
func DownloadFile(filepath string, url string) error {
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
// Get the data
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}

@ -1,37 +0,0 @@
import argparse
import logging
import sys
from utils import utils
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config')
parser.add_argument('--ip_list_file', type=str, dest='ip_list_file',
default='raw_ip.txt', help="the file containing available raw ips")
# If the ip_list_file is None we need to use the region, node_name_tag and region_config to collect raw_ip
parser.add_argument('--region', type=str, dest='region_number',
default="4", help="region number")
parser.add_argument('--node_name_tag', type=str,
dest='node_name_tag', default='4-NODE-23-36-01-2018-07-05')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--shard_number', type=int, dest='shard_number', default=1)
parser.add_argument('--client_number', type=int, dest='client_number', default=1)
parser.add_argument('--distribution_config', type=str,
dest='distribution_config', default='distribution_config.txt')
args = parser.parse_args()
if args.ip_list_file == None:
utils.generate_distribution_config2(
args.region_number, args.node_name_tag, args.region_config,
args.shard_number, args.client_number, args.distribution_config)
else:
utils.generate_distribution_config3(args.shard_number, args.client_number,
args.ip_list_file, args.distribution_config)
LOGGER.info("Done writing %s" % args.distribution_config)

@ -1 +0,0 @@
scp -i ../keys/ohio-key-benchmark.pem ec2-user@18.219.217.193:~/projects/src/harmony-benchmark/bin/upload tmp/

@ -1,96 +0,0 @@
import json
import sys
import os
import argparse
def formatFloat(v):
return "%.2f" % v
def formatPercent(v):
return formatFloat(v) + "%"
def formatMem(v):
return formatFloat(float(v) / 10**6) + "MB"
class Profiler:
def __init__(self):
self.tps = 0
self.tps_max = 0
self.tps_min = sys.maxsize
self.tps_count = 0
self.cpu_percent = 0
self.cpu_usr = 0
self.cpu_sys = 0
self.cpu_count = 0
self.mem_rss = 0
self.mem_rss_max = 0
self.mem_count = 0
def handleTPS(self, obj):
tps = obj["TPS"]
self.tps += tps
self.tps_max = max(self.tps_max, tps)
self.tps_min = min(self.tps_min, tps)
self.tps_count += 1
def handleCPU(self, obj):
# http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_times
# https://stackoverflow.com/questions/556405/what-do-real-user-and-sys-mean-in-the-output-of-time1
# http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_percent
self.cpu_percent += obj["percent"]
times = json.loads(obj["times"])
self.cpu_usr = times["user"]
self.cpu_sys = times["system"]
self.cpu_count += 1
def handleMem(self, obj):
# http://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
info = json.loads(obj["info"])
rss = info["rss"]
self.mem_rss += rss
self.mem_rss_max = max(self.mem_rss_max, rss)
self.mem_count += 1
def report(self):
print("TPS",
"Avg", formatFloat(self.tps / self.tps_count),
"Min", formatFloat(self.tps_min),
"Max", formatFloat(self.tps_max))
print("CPU",
"Percent (Avg)", formatPercent(self.cpu_percent / self.cpu_count),
"Time (Usr)", str(self.cpu_usr) + "s",
"Time (Sys)", str(self.cpu_sys) + "s")
print("Mem",
"RSS (Max)", formatMem(self.mem_rss_max),
"RSS (Avg)", formatMem(self.mem_rss / self.mem_count))
def profileFile(path):
print(path)
profiler = Profiler()
with open(path) as f:
for line in f:
obj = json.loads(line)
if obj["lvl"] != "info":
continue
if obj["msg"] == "TPS Report":
profiler.handleTPS(obj)
elif obj["msg"] == "CPU Report":
profiler.handleCPU(obj)
elif obj["msg"] == "Mem Report":
profiler.handleMem(obj)
profiler.report()
# Example: python report_extractor.py --folder ../tmp_log/log-20180713-205431
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This script extracts reports from log files")
parser.add_argument("--folder", type=str, dest="folder",
default="",
help="the path to the log folder")
args = parser.parse_args()
for filename in os.listdir(args.folder):
if "leader" in filename:
profileFile(os.path.join(args.folder, filename))

@ -1,18 +0,0 @@
aws ec2 request-spot-instances \
--instance-count 1 \
--block-duration-minutes 60 \
--launch-specification "{ \
\"ImageId\": \"ami-f2d3638a\", \
\"InstanceType\": \"m3.medium\", \
\"SecurityGroups\": [ \
\"richard-spot-instance SSH\" \
], \
\"KeyName\": \"richard-spot-instance\", \
\"IamInstanceProfile\": { \
\"Name\": \"BenchMarkCodeDeployInstanceProfile\" \
}, \
\"UserData\": \"`base64 ../configs/userdata-commander.sh`\" \
}" \
#--dry-run # uncomment this line to send a real request.
# Note: on windows, you need to add `-w 0` to the base64 command"

@ -1,10 +0,0 @@
aws ec2 run-instances \
--count 1 \
--image-id "ami-f2d3638a" \
--instance-type "t2.micro" \
--key-name "richard-spot-instance" \
--security-groups "richard-spot-instance SSH" \
--iam-instance-profile "{ \
\"Name\": \"BenchMarkCodeDeployInstanceProfile\" \
}" \
--user-data "`base64 ../configs/userdata-commander.sh`"

@ -1,105 +0,0 @@
import argparse
import logging
import os
import random
import sys
import threading
import boto3
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
def read_configuration_file(filename):
config = {}
with open(filename, 'r') as f:
for myline in f:
mylist = myline.strip().split(',')
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
return config
def get_instance_ids(describe_instances_response):
instance_ids = []
if describe_instances_response["Reservations"]:
for reservation in describe_instances_response["Reservations"]:
instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId"))
return instance_ids
def get_instance_ids2(ec2_client, node_name_tag):
time.sleep(5)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
return get_instance_ids(ec2_client.describe_instances(Filters=filters))
def create_ec2_client(region_number, region_config):
config = read_configuration_file(region_config)
region_name = config[region_number][REGION_NAME]
session = boto3.Session(region_name=region_name)
return session.client('ec2'), session
def terminate_instances_by_region(region_number, region_config, node_name_tag):
ec2_client, _ = create_ec2_client(region_number, region_config)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
instance_ids = get_instance_ids(ec2_client.describe_instances(Filters=filters))
if instance_ids:
ec2_client.terminate_instances(InstanceIds=instance_ids)
LOGGER.info("waiting until instances with tag %s died." % node_name_tag)
waiter = ec2_client.get_waiter('instance_terminated')
waiter.wait(InstanceIds=instance_ids)
LOGGER.info("instances with node name tag %s terminated." % node_name_tag)
else:
pass
LOGGER.warn("there is no instances to terminate")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to collect public ips')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt',
help='the file contains node_name_tag and region number of created instances.')
parser.add_argument('--node_name_tag', type=str, dest='node_name_tag')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
args = parser.parse_args()
if args.node_name_tag:
node_name_tag_items = args.node_name_tag.split(",")
region_number_items = [item[:1] for item in node_name_tag_items]
thread_pool = []
for i in range(len(region_number_items)):
region_number = region_number_items[i]
node_name_tag = node_name_tag_items[i]
t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag))
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")
elif args.instance_output:
with open(args.instance_output, "r") as fin:
thread_pool = []
for line in fin.readlines():
items = line.split(" ")
region_number = items[1].strip()
node_name_tag = items[0].strip()
t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag))
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")

@ -1,15 +0,0 @@
import boto3
import os
GOHOME ='/Users/alok/Documents/goworkspace/'
s3 = boto3.client('s3')
bucket_name = 'unique-bucket-bin'
#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
response = s3.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
print("Bucket List: %s" % buckets)
dirname = GOHOME + 'src/harmony-benchmark/bin/'
for myfile in os.listdir(dirname):
with open('distribution_config.txt','r') as f:
f = open(os.path.join(dirname,myfile))
response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile)
print(response)

@ -1,10 +0,0 @@
import boto3
import os
#from boto3.session import Session
s3 = boto3.client('s3')
bucket_name = 'unique-bucket-bin'
#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
myfile = 'distribution_config.txt'
with open('distribution_config.txt','r') as f:
response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile)
print(response)

@ -1,9 +0,0 @@
import boto3
s3 = boto3.client('s3')
bucket_name = 'first-try'
s3.create_bucket(Bucket=bucket_name,ACL='public-read-write')
response = s3.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
print("Bucket List: %s" % buckets)
filename='myfirst.txt'
s3.upload_file(filename, bucket_name, filename)

@ -1,11 +0,0 @@
#!/bin/bash -x
REGION=$(curl 169.254.169.254/latest/meta-data/placement/availability-zone/ | sed 's/[a-z]$//')
#yum update -y #This breaking codedeploy right now
yum install ruby wget -y
cd /home/ec2-user
touch yum-not-updated.txt
wget https://aws-codedeploy-$REGION.s3.amazonaws.com/latest/install
chmod +x ./install
./install auto
mkdir projects
mkdir projects/src

@ -1,31 +0,0 @@
#!/bin/bash
yum install ruby -y
cd /home/ec2-user/
curl http://unique-bucket-bin.s3.amazonaws.com/txgen -o txgen
curl http://unique-bucket-bin.s3.amazonaws.com/soldier -o soldier
curl http://unique-bucket-bin.s3.amazonaws.com/benchmark -o benchmark
chmod +x ./soldier
chmod +x ./txgen
chmod +x ./commander
chmod +x ./kill_node.sh
echo "* soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "session required pam_limits.so" | sudo tee -a /etc/pam.d/common-session
# Get My IP
ip=`curl http://169.254.169.254/latest/meta-data/public-ipv4`
NODE_PORT=9000
SOLDIER_PORT=1$NODE_PORT
# Kill existing soldier/node
fuser -k -n tcp $SOLDIER_PORT
fuser -k -n tcp $NODE_PORT
# Run soldier
./soldier -ip $ip -port $NODE_PORT > soldier_log 2>&1 &

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58

@ -1,46 +0,0 @@
import utils
def get_launch_template_name(region_number):
return 'benchmark-' + utils.CONFIG[region_number][utils.REGION_NAME]
def create(ec2_client, region_number):
return ec2_client.create_launch_template(
# DryRun=True,
LaunchTemplateName=get_launch_template_name(region_number),
LaunchTemplateData={
'IamInstanceProfile': {
'Name': utils.IAM_INSTANCE_PROFILE
},
'ImageId': utils.CONFIG[region_number][utils.REGION_AMI],
# 'InstanceType': instance_type,
'KeyName': utils.CONFIG[region_number][utils.REGION_KEY],
'UserData': utils.USER_DATA_BASE64,
'SecurityGroupIds': [
utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID]
],
# 'InstanceInitiatedShutdownBehavior': 'stop',
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'LaunchTemplate',
'Value': 'Yes'
}
]
}
],
# 'InstanceMarketOptions': {
# 'MarketType': 'spot',
# 'SpotOptions': {
# 'MaxPrice': 'string',
# 'SpotInstanceType': 'one-time'|'persistent',
# 'BlockDurationMinutes': 123,
# 'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate'
# }
# },
}
)

@ -1,9 +0,0 @@
import logging
logging.basicConfig(level=logging.INFO,
format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
def getLogger(file):
LOGGER = logging.getLogger(file)
LOGGER.setLevel(logging.INFO)
return LOGGER

@ -1,119 +0,0 @@
import utils
import logger
import launch_template
LOGGER = logger.getLogger(__file__)
def create_launch_specification(region_number, instanceType):
return {
# Region irrelevant fields
'IamInstanceProfile': {
'Name': utils.IAM_INSTANCE_PROFILE
},
'InstanceType': instanceType,
'UserData': utils.USER_DATA_BASE64,
# Region relevant fields
'SecurityGroups': [
{
# In certain scenarios, we have to use group id instead of group name
# https://github.com/boto/boto/issues/350#issuecomment-27359492
'GroupId': utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID]
}
],
'ImageId': utils.CONFIG[region_number][utils.REGION_AMI],
'KeyName': utils.CONFIG[region_number][utils.REGION_KEY],
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': utils.get_node_name_tag(region_number)
}
]
}
],
# 'WeightedCapacity': 123.0,
# 'Placement': {
# # 'AvailabilityZone': get_one_availability_zone(ec2_client)
# }
}
def create_launch_specification_list(region_number, instance_type_list):
return list(map(lambda type: create_launch_specification(region_number, type), instance_type_list))
def get_launch_template(region_number, instance_type):
return {
'LaunchTemplateSpecification': {
'LaunchTemplateName': launch_template.get_launch_template_name(region_number),
'Version': '1'
},
'Overrides': [
{
'InstanceType': instance_type
}
]
}
def get_launch_template_list(region_number, instance_type_list):
return list(map(lambda type: get_launch_template(region_number, type), instance_type_list))
def request_spot_fleet(ec2_client, region_number, number_of_instances, instance_type_list):
LOGGER.info("Requesting spot fleet")
LOGGER.info("Creating node_name_tag: %s" %
utils.get_node_name_tag(region_number))
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet
response = ec2_client.request_spot_fleet(
# DryRun=True,
SpotFleetRequestConfig={
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy
'AllocationStrategy': 'diversified',
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole',
'LaunchSpecifications': create_launch_specification_list(region_number, instance_type_list),
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.
'TargetCapacity': number_of_instances,
'Type': 'maintain'
}
)
return response["SpotFleetRequestId"]
def request_spot_fleet_with_on_demand(ec2_client, region_number, number_of_instances, number_of_on_demand, instance_type_list):
LOGGER.info("Requesting spot fleet")
LOGGER.info("Creating node_name_tag: %s" %
utils.get_node_name_tag(region_number))
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet
response = ec2_client.request_spot_fleet(
# DryRun=True,
SpotFleetRequestConfig={
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy
'AllocationStrategy': 'diversified',
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole',
'LaunchTemplateConfigs': get_launch_template_list(region_number, instance_type_list),
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.
'TargetCapacity': number_of_instances,
'OnDemandTargetCapacity': number_of_on_demand,
'Type': 'maintain'
}
)
return response
def get_instance_ids(client, request_id):
res = client.describe_spot_fleet_instances(
SpotFleetRequestId=request_id
)
return [ inst["InstanceId"] for inst in res["ActiveInstances"] ]
def run_one_region(region_number, number_of_instances, fout, fout2):
client = utils.create_client(utils.CONFIG, region_number)
instance_type_list = ['t2.micro', 't2.small', 'm3.medium']
# node_name_tag = request_spot_fleet_with_on_demand(
# client, region_number, int(number_of_instances), 1, instance_type_list)
request_id = request_spot_fleet(
client, region_number, int(number_of_instances), instance_type_list)
instance_ids = get_instance_ids(client, request_id)
print(instance_ids) # TODO@ricl, no data here since the request is not fulfilled.

@ -1,210 +0,0 @@
import boto3
import datetime
import json
import sys
import time
import base64
import threading
MAX_INTANCES_FOR_WAITER = 100
MAX_INSTANCES_FOR_DEPLOYMENT = 500
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
IAM_INSTANCE_PROFILE = 'BenchMarkCodeDeployInstanceProfile'
time_stamp = time.time()
CURRENT_SESSION = datetime.datetime.fromtimestamp(
time_stamp).strftime('%H-%M-%S-%Y-%m-%d')
NODE_NAME_SUFFIX = "NODE-" + CURRENT_SESSION
def get_node_name_tag(region_number):
return region_number + "-" + NODE_NAME_SUFFIX
def get_node_name_tag2(region_number, tag):
return region_number + "-" + NODE_NAME_SUFFIX + "-" + str(tag)
with open("userdata-soldier.sh", "r") as userdata_file:
USER_DATA = userdata_file.read()
# UserData must be base64 encoded for spot instances.
USER_DATA_BASE64 = base64.b64encode(USER_DATA)
def create_client(config, region_number):
region_name = config[region_number][REGION_NAME]
# Create session.
session = boto3.Session(region_name=region_name)
# Create a client.
return session.client('ec2')
def read_region_config(region_config='configuration.txt'):
global CONFIG
config = {}
with open(region_config, 'r') as f:
for myline in f:
mylist = [item.strip() for item in myline.strip().split(',')]
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
CONFIG = config
return config
def get_ip_list(response):
if response.get('Instances', None):
return [instance.get('PublicIpAddress', None) for instance in response['Instances']]
else:
return []
def create_ec2_client(region_number, region_config):
config = read_region_config(region_config)
region_name = config[region_number][REGION_NAME]
session = boto3.Session(region_name=region_name)
return session.client('ec2'), session
def collect_public_ips_from_ec2_client(ec2_client, node_name_tag):
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
response = ec2_client.describe_instances(Filters=filters)
ip_list = []
if response.get('Reservations'):
for reservation in response[u'Reservations']:
ip_list.extend(instance['PublicIpAddress'] for instance in reservation['Instances'] if instance.get('PublicIpAddress'))
return ip_list
def collect_public_ips(region_number, node_name_tag, region_config):
ec2_client, _ = create_ec2_client(region_number, region_config)
ip_list = collect_public_ips_from_ec2_client(ec2_client, node_name_tag)
return ip_list
def get_application(codedeploy, application_name):
response = codedeploy.list_applications()
if application_name in response['applications']:
return response
else:
response = codedeploy.create_application(
applicationName=application_name,
computePlatform='Server'
)
return response
def create_deployment_group(codedeploy, region_number,application_name, deployment_group_name, node_name_tag):
response = codedeploy.list_deployment_groups(applicationName=application_name)
if response.get('deploymentGroups') and (deployment_group_name in response['deploymentGroups']):
return None
else:
response = codedeploy.create_deployment_group(
applicationName=application_name,
deploymentGroupName=deployment_group_name,
deploymentConfigName='CodeDeployDefault.AllAtOnce',
serviceRoleArn='arn:aws:iam::656503231766:role/BenchMarkCodeDeployServiceRole',
deploymentStyle={
'deploymentType': 'IN_PLACE',
'deploymentOption': 'WITHOUT_TRAFFIC_CONTROL'
},
ec2TagFilters = [
{
'Key': 'Name',
'Value': node_name_tag,
'Type': 'KEY_AND_VALUE'
}
]
)
return response['deploymentGroupId']
def generate_distribution_config2(region_number, node_name_tag, region_config,
shard_number, client_number, distribution_config):
ip_list = collect_public_ips(region_number, node_name_tag, region_config)
generate_distribution_config(shard_number, client_number, ip_list, distribution_config)
def generate_distribution_config3(shard_number, client_number, ip_list_file, distribution_config):
with open(ip_list_file, "r") as fin:
lines = fin.readlines()
ip_list = [line.strip() for line in lines]
generate_distribution_config(shard_number, client_number, ip_list, distribution_config)
def generate_distribution_config(shard_number, client_number, ip_list, distribution_config):
if len(ip_list) < shard_number * 2 + client_number + 1:
print("Not enough nodes to generate a config file")
return False
# Create ip for clients.
commander_id, client_id, leader_id, validator_id = 0, 0, 0, 0
validator_number = len(ip_list) - client_number - shard_number - 1
with open(distribution_config, "w") as fout:
for i in range(len(ip_list)):
ip, node_name_tag = ip_list[i].split(" ")
if commander_id < 1:
fout.write("%s 9000 commander %d %s\n" % (ip, commander_id, node_name_tag))
commander_id = commander_id + 1
elif leader_id < shard_number:
fout.write("%s 9000 leader %d %s\n" % (ip, leader_id, node_name_tag))
leader_id = leader_id + 1
elif validator_id < validator_number:
fout.write("%s 9000 validator %d %s\n" % (ip, validator_id % shard_number, node_name_tag))
validator_id = validator_id + 1
else:
fout.write("%s 9000 client %d %s\n" % (ip, client_id % shard_number, node_name_tag))
client_id = client_id + 1
def get_availability_zones(ec2_client):
response = ec2_client.describe_availability_zones()
all_zones = []
if response.get('AvailabilityZones', None):
all_zones = [info['ZoneName'] for info in response.get('AvailabilityZones') if info['State'] == 'available']
return all_zones
def get_one_availability_zone(ec2_client):
time.sleep(1)
all_zones = get_availability_zones(ec2_client)
if len(all_zones) > 0:
return all_zones[0]
else:
return None
def get_instance_ids2(ec2_client, node_name_tag):
time.sleep(5)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
return get_instance_ids(ec2_client.describe_instances(Filters=filters))
# Get instance_ids from describe_instances_response.
def get_instance_ids(describe_instances_response):
instance_ids = []
if describe_instances_response["Reservations"]:
for reservation in describe_instances_response["Reservations"]:
instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId"))
return instance_ids
WAITER_LOCK = threading.Lock()
def run_waiter_100_instances_for_status(ec2_client, status, instance_ids):
time.sleep(10)
WAITER_LOCK.acquire()
waiter = ec2_client.get_waiter('instance_running')
WAITER_LOCK.release()
waiter.wait(InstanceIds=instance_ids)
def run_waiter_for_status(ec2_client, status, instance_ids):
thread_pool = []
i = 0
while i < len(instance_ids):
j = i + min(len(instance_ids), i + MAX_INTANCES_FOR_WAITER)
t = threading.Thread(target=run_waiter_100_instances_for_status, args=(
ec2_client, status, instance_ids[i:j]))
t.start()
thread_pool.append(t)
i = i + MAX_INTANCES_FOR_WAITER
for t in thread_pool:
t.join()
# used for testing only.
# if __name__ == "__main__":
# ip_list = collect_public_ips('4', "4-NODE-23-36-01-2018-07-05", "configuration.txt")
# print ip_list
# generate_distribution_config(2, 1, ip_list, "config_test.txt")

@ -1,31 +0,0 @@
import unittest
from utils import generate_distribution_config
class TestCreateAndDeploy(unittest.TestCase):
def test_generate_config_file(self):
ips = ["102.000.000.1", "102.000.000.2", "102.000.000.3", "102.000.000.4", "102.000.000.5", "102.000.000.6"]
generate_distribution_config(2, 2, ips, "config_test.txt")
with open("config_test.txt", "r") as fin:
lines = fin.readlines()
collection = {}
collection['ip'] = []
collection['client'] = {}
leader_count, validator_count, client_count = 0, 0, 0
for line in lines:
strs = line.split(" ")
assert(not strs[0] in collection['ip'])
collection['ip'].append(strs[0])
if strs[2] == "client":
client_count = client_count + 1
elif strs[2] == "leader":
leader_count = leader_count + 1
elif strs[2] == "validator":
validator_count = validator_count + 1
assert(validator_count == 2)
assert(leader_count == 2)
assert(client_count == 2)
if __name__ == '__main__':
unittest.main()

@ -1,5 +0,0 @@
for pid in `/bin/ps -fu $USER| grep "slave.go\|slave -port\|leader\|benchmark_node" | grep -v "grep" | awk '{print $2}'`;
do
echo 'Killed process: '$pid
kill -9 $pid
done

@ -1,34 +0,0 @@
import json
def get_public_ip(all_reservations):
all_public_ip_addresses = []
for individual_instances in all_reservations:
instance_information = individual_instances['Instances'][0]
if "running" != instance_information["State"]["Name"]:
continue
all_public_ip_addresses.append(instance_information['PublicIpAddress'])
return all_public_ip_addresses
def make_peers_list(all_reservations,port="9001",filename="config.txt"):
p = get_public_ip(all_reservations)
f = open(filename,"w")
for i in range(len(p)):
if i == 0:
f.write(p[i] + " " + port + " " + "leader"+"\n")
else:
f.write(p[i] + " " + port + " " + "validator"+"\n")
f.close()
def is_it_running(f):
pass
if __name__ == "__main__":
json_data=open("aws.json").read()
f = json.loads(json_data)
all_reservations = f['Reservations']
make_peers_list(all_reservations)

@ -1,26 +0,0 @@
import requests
amazon_ipv4_url = "http://169.254.169.254/latest/meta-data/public-ipv4"
def get_my_ip():
return current_ip = requests.get(amazon_ipv4_url).text
if __name__ == "__main__":
current_ip = requests.get(amazon_ipv4_url).text
f = open("global_nodes.txt","r")
peerList = []
for myline in f:
mylist = myline.split(" ")
ip = mylist[0]
node = mylist[2]
if str(ip) != str(current_ip):
if node != "transaction":
peerList.append(myline)
else:
if node == "transaction":
h = open("isTransaction.txt","w")
h.write("I am just a transaction generator node")
h.close()
f.close()
g = open("global_peerlist.txt","w")
for myline in peerList:
g.write(myline)
g.close()

@ -1,34 +0,0 @@
#!/bin/bash -x
echo "Run Instances starts" >> tmplog
echo "Update systcl" >> tmplog
sudo sysctl net.core.somaxconn=1024
sudo sysctl net.core.netdev_max_backlog=65536;
sudo sysctl net.ipv4.tcp_tw_reuse=1;
sudo sysctl -w net.ipv4.tcp_rmem='65536 873800 1534217728';
sudo sysctl -w net.ipv4.tcp_wmem='65536 873800 1534217728';
sudo sysctl -w net.ipv4.tcp_mem='65536 873800 1534217728';
echo "Setup path" >> tmplog
./kill_node.sh
MyHOME=/home/ec2-user
source ~/.bash_profile
export GOROOT=/usr/lib/golang
export GOPATH=$MyHOME/projects
export PATH=$PATH:$GOROOT/bin
echo "Get ip" >> tmplog
# Get my IP
wget http://169.254.169.254/latest/meta-data/public-ipv4
ip=$(head -n 1 public-ipv4)
echo "Current IP is >>>"
echo $ip
echo ">>>>"
echo "Run soldier" >> tmplog
# Run soldier
cd $GOPATH/src/harmony-benchmark/bin/
node_port=9000
./soldier -ip $ip -port $node_port > soldier_log 2>&1 &
echo "Run Instances done" >> tmplog

@ -1 +0,0 @@
echo "Bye" >> tmplog

@ -1 +0,0 @@
echo "Hello" >> tmplog

@ -1,43 +0,0 @@
#!/bin/bash -x
echo "Setup Golang" >> tmplog
#sudo yum update -y
sudo yum install -y golang
sudo yum install -y git
MyHOME=/home/ec2-user
echo "now setting up go-lang paths"
# GOROOT is the location where Go package is installed on your system
echo "export GOROOT=/usr/lib/golang" >> $MyHOME/.bash_profile
# GOPATH is the location of your work directory
echo "export GOPATH=$MyHOME/projects" >> $MyHOME/.bash_profile
# PATH in order to access go binary system wide
echo "export PATH=$PATH:$GOROOT/bin" >> $MyHOME/.bash_profile
export GOROOT=/usr/lib/golang
export GOPATH=$MyHOME/projects
export PATH=$PATH:$GOROOT/bin
source $MyHOME/.bash_profile
cd $GOPATH/src/harmony-benchmark
touch 'yum_not_updated.txt'
# go get dependencies
go get ./...
curl --silent http://169.254.169.254/latest/meta-data/public-ipv4 >> bin/myip.txt
# build executables
go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go
go build -o bin/commander aws-experiment-launch/experiment/commander/main.go
go build -o bin/benchmark benchmark.go
go build -o bin/txgen client/txgen/main.go
# Setup ulimit
echo "* soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "session required pam_limits.so" | sudo tee -a /etc/pam.d/common-session

@ -1 +0,0 @@
aws ec2 run-instances --image-id ami-e251209a --count 1 --instance-type t2.nano --key-name main --security-group-ids sg-066a8b0ec187c7247

@ -1,177 +0,0 @@
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute.models import DiskCreateOption
SUBSCRIPTION_ID = '8f969b5c-f8cb-4483-8252-354a929962e0'
GROUP_NAME = 'myResourceGroup'
LOCATION = 'westus'
VM_NAME = 'myomyVM'
def get_credentials():
credentials = ServicePrincipalCredentials(
client_id = '3b75dccc-f500-4195-99df-8da994541d03',
secret = 'Nj44R21IECrg8Vp/+3MBsXcmQrHcl0SEIpLjPIeOYc4=',
tenant = '6d22d644-2eec-4dac-9715-7147563a9fe5'
)
return credentials
def create_resource_group(resource_group_client):
resource_group_params = { 'location':LOCATION }
resource_group_result = resource_group_client.resource_groups.create_or_update(
GROUP_NAME,
resource_group_params
)
def create_availability_set(compute_client):
avset_params = {
'location': LOCATION,
'sku': { 'name': 'Aligned' },
'platform_fault_domain_count': 3
}
availability_set_result = compute_client.availability_sets.create_or_update(
GROUP_NAME,
'myAVSet',
avset_params
)
def create_public_ip_address(network_client):
public_ip_addess_params = {
'location': LOCATION,
'public_ip_allocation_method': 'Dynamic'
}
creation_result = network_client.public_ip_addresses.create_or_update(
GROUP_NAME,
'myIPAddress',
public_ip_addess_params
)
return creation_result.result()
def create_vnet(network_client):
vnet_params = {
'location': LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
creation_result = network_client.virtual_networks.create_or_update(
GROUP_NAME,
'myVNet',
vnet_params
)
return creation_result.result()
def create_subnet(network_client):
subnet_params = {
'address_prefix': '10.0.0.0/24'
}
creation_result = network_client.subnets.create_or_update(
GROUP_NAME,
'myVNet',
'mySubnet',
subnet_params
)
return creation_result.result()
def create_nic(network_client):
subnet_info = network_client.subnets.get(
GROUP_NAME,
'myVNet',
'mySubnet'
)
publicIPAddress = network_client.public_ip_addresses.get(
GROUP_NAME,
'myIPAddress'
)
nic_params = {
'location': LOCATION,
'ip_configurations': [{
'name': 'myIPConfig',
'public_ip_address': publicIPAddress,
'subnet': {
'id': subnet_info.id
}
}]
}
creation_result = network_client.network_interfaces.create_or_update(
GROUP_NAME,
'myNic',
nic_params
)
return creation_result.result()
def create_vm(network_client, compute_client):
nic = network_client.network_interfaces.get(
GROUP_NAME,
'myNic'
)
avset = compute_client.availability_sets.get(
GROUP_NAME,
'myAVSet'
)
vm_parameters = {
'location': LOCATION,
'os_profile': {
'computer_name': VM_NAME,
'admin_username': 'azureuser',
'admin_password': 'Azure12345678'
},
'hardware_profile': {
'vm_size': 'Standard_DS1'
},
'storage_profile': {
'image_reference': {
'publisher': 'MicrosoftWindowsServer',
'offer': 'WindowsServer',
'sku': '2012-R2-Datacenter',
'version': 'latest'
}
},
'network_profile': {
'network_interfaces': [{
'id': nic.id
}]
},
'availability_set': {
'id': avset.id
}
}
creation_result = compute_client.virtual_machines.create_or_update(
GROUP_NAME,
VM_NAME,
vm_parameters
)
return creation_result.result()
if __name__ == '__main__':
credentials = get_credentials()
resource_group_client = ResourceManagementClient(
credentials,
SUBSCRIPTION_ID
)
network_client = NetworkManagementClient(
credentials,
SUBSCRIPTION_ID
)
compute_client = ComputeManagementClient(
credentials,
SUBSCRIPTION_ID
)
create_resource_group(resource_group_client)
print('Resource group created....')
create_availability_set(compute_client)
print('Availability set created')
creation_result = create_public_ip_address(network_client)
print('Public IP created')
creation_result = create_vnet(network_client)
print('Virtual Net Created')
creation_result = create_subnet(network_client)
print('Subnet created')
creation_result = create_nic(network_client)
print('NIC Created')
creation_result = create_vm(network_client, compute_client)
print("------------------------------------------------------")
print("VM Created")
print(creation_result)

@ -33,7 +33,7 @@ func attackDetermination(attackedMode int) bool {
}
func startProfiler(shardID string, logFolder string) {
err := utils.RunCmd("./bin/profiler", "-pid", strconv.Itoa(os.Getpid()), "-shard_id", shardID, "-log_folder", logFolder)
err := utils.RunCmd("./profiler", "-pid", strconv.Itoa(os.Getpid()), "-shard_id", shardID, "-log_folder", logFolder)
if err != nil {
log.Error("Failed to start profiler")
}

@ -28,7 +28,7 @@ import (
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
"github.com/simple-rules/harmony-benchmark/client/btctxiter"
"github.com/simple-rules/harmony-benchmark/configr"
client_config "github.com/simple-rules/harmony-benchmark/client/config"
"github.com/simple-rules/harmony-benchmark/consensus"
"github.com/simple-rules/harmony-benchmark/log"
"github.com/simple-rules/harmony-benchmark/node"
@ -176,9 +176,9 @@ func main() {
flag.Parse()
// Read the configs
configr := configr.NewConfigr()
configr.ReadConfigFile(*configFile)
leaders, shardIDs := configr.GetLeadersAndShardIds()
config := client_config.NewConfig()
config.ReadConfigFile(*configFile)
leaders, shardIDs := config.GetLeadersAndShardIds()
// Do cross shard tx if there are more than one shard
setting.crossShard = len(shardIDs) > 1
@ -204,7 +204,7 @@ func main() {
}
// Client/txgenerator server node setup
clientPort := configr.GetClientPort()
clientPort := config.GetClientPort()
consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{})
clientNode := node.New(consensusObj, nil)
@ -254,6 +254,6 @@ func main() {
// Send a stop message to stop the nodes at the end
msg := proto_node.ConstructStopMessage()
peers := append(configr.GetValidators(), leaders...)
peers := append(config.GetValidators(), leaders...)
p2p.BroadcastMessage(peers, msg)
}

@ -0,0 +1,99 @@
package config
import (
"bufio"
"log"
"os"
"strconv"
"strings"
"github.com/simple-rules/harmony-benchmark/p2p"
)
type ConfigEntry struct {
IP string
Port string
Role string
ShardID string
}
type Config struct {
config []ConfigEntry
}
func NewConfig() *Config {
config := Config{}
return &config
}
// Gets all the validator peers
func (config *Config) GetValidators() []p2p.Peer {
var peerList []p2p.Peer
for _, entry := range config.config {
if entry.Role != "validator" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
peerList = append(peerList, peer)
}
return peerList
}
// Gets all the leader peers and corresponding shard Ids
func (config *Config) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) {
var peerList []p2p.Peer
var shardIDs []uint32
for _, entry := range config.config {
if entry.Role == "leader" {
peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port})
val, err := strconv.Atoi(entry.ShardID)
if err == nil {
shardIDs = append(shardIDs, uint32(val))
} else {
log.Print("[Generator] Error parsing the shard Id ", entry.ShardID)
}
}
}
return peerList, shardIDs
}
func (config *Config) GetClientPeer() *p2p.Peer {
for _, entry := range config.config {
if entry.Role != "client" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
return &peer
}
return nil
}
// Gets the port of the client node in the config
func (config *Config) GetClientPort() string {
for _, entry := range config.config {
if entry.Role == "client" {
return entry.Port
}
}
return ""
}
// Parse the config file and return a 2d array containing the file data
func (config *Config) ReadConfigFile(filename string) error {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Fatal("Failed to read config file ", filename)
return err
}
fscanner := bufio.NewScanner(file)
result := []ConfigEntry{}
for fscanner.Scan() {
p := strings.Split(fscanner.Text(), " ")
entry := ConfigEntry{p[0], p[1], p[2], p[3]}
result = append(result, entry)
}
config.config = result
return nil
}

@ -10,7 +10,7 @@ import (
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
"github.com/simple-rules/harmony-benchmark/configr"
client_config "github.com/simple-rules/harmony-benchmark/client/config"
"github.com/simple-rules/harmony-benchmark/consensus"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/log"
@ -244,9 +244,9 @@ func main() {
flag.Parse()
// Read the configs
configr := configr.NewConfigr()
configr.ReadConfigFile(*configFile)
leaders, shardIds := configr.GetLeadersAndShardIds()
config := client_config.NewConfig()
config.ReadConfigFile(*configFile)
leaders, shardIds := config.GetLeadersAndShardIds()
setting.numOfAddress = 10000
// Do cross shard tx if there are more than one shard
@ -272,7 +272,7 @@ func main() {
}
// Client/txgenerator server node setup
clientPort := configr.GetClientPort()
clientPort := config.GetClientPort()
consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{})
clientNode := node.New(consensusObj, nil)
@ -349,6 +349,6 @@ func main() {
// Send a stop message to stop the nodes at the end
msg := proto_node.ConstructStopMessage()
peers := append(configr.GetValidators(), leaders...)
peers := append(config.GetValidators(), leaders...)
p2p.BroadcastMessage(peers, msg)
}

@ -6,10 +6,11 @@ import (
"errors"
"flag"
"fmt"
"github.com/dedis/kyber"
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
"github.com/simple-rules/harmony-benchmark/configr"
client_config "github.com/simple-rules/harmony-benchmark/client/config"
"github.com/simple-rules/harmony-benchmark/crypto"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/node"
@ -99,7 +100,7 @@ func main() {
}
StorePrivateKey(priKeyBytes)
case "showBalance":
configr := configr.NewConfigr()
configr := client_config.NewConfig()
configr.ReadConfigFile("local_config_shards.txt")
leaders, _ := configr.GetLeadersAndShardIds()
clientPeer := configr.GetClientPeer()
@ -174,7 +175,7 @@ func main() {
senderAddressBytes := pki.GetAddressFromPrivateKey(senderPriKey)
// Start client server
configr := configr.NewConfigr()
configr := client_config.NewConfig()
configr.ReadConfigFile("local_config_shards.txt")
leaders, _ := configr.GetLeadersAndShardIds()
clientPeer := configr.GetClientPeer()

@ -1,159 +0,0 @@
package configr
import (
"bufio"
"log"
"os"
"strconv"
"strings"
"github.com/simple-rules/harmony-benchmark/crypto"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/p2p"
"github.com/simple-rules/harmony-benchmark/utils"
)
type ConfigEntry struct {
IP string
Port string
Role string
ShardID string
}
type Configr struct {
config []ConfigEntry
}
func NewConfigr() *Configr {
configr := Configr{}
return &configr
}
// Gets all the validator peers
func (configr *Configr) GetValidators() []p2p.Peer {
var peerList []p2p.Peer
for _, entry := range configr.config {
if entry.Role != "validator" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
peerList = append(peerList, peer)
}
return peerList
}
// Gets all the leader peers and corresponding shard Ids
func (configr *Configr) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) {
var peerList []p2p.Peer
var shardIDs []uint32
for _, entry := range configr.config {
if entry.Role == "leader" {
peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port})
val, err := strconv.Atoi(entry.ShardID)
if err == nil {
shardIDs = append(shardIDs, uint32(val))
} else {
log.Print("[Generator] Error parsing the shard Id ", entry.ShardID)
}
}
}
return peerList, shardIDs
}
func (configr *Configr) GetClientPeer() *p2p.Peer {
for _, entry := range configr.config {
if entry.Role != "client" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
return &peer
}
return nil
}
// Gets the port of the client node in the config
func (configr *Configr) GetClientPort() string {
for _, entry := range configr.config {
if entry.Role == "client" {
return entry.Port
}
}
return ""
}
// Parse the config file and return a 2d array containing the file data
func (configr *Configr) ReadConfigFile(filename string) error {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Fatal("Failed to read config file ", filename)
return err
}
fscanner := bufio.NewScanner(file)
result := []ConfigEntry{}
for fscanner.Scan() {
p := strings.Split(fscanner.Text(), " ")
entry := ConfigEntry{p[0], p[1], p[2], p[3]}
result = append(result, entry)
}
configr.config = result
return nil
}
// GetShardID Gets the shard id of the node corresponding to this ip and port
func (configr *Configr) GetShardID(ip, port string) string {
for _, entry := range configr.config {
if entry.IP == ip && entry.Port == port {
return entry.ShardID
}
}
return "N/A"
}
// GetPeers Gets the validator list
func (configr *Configr) GetPeers(ip, port, shardID string) []p2p.Peer {
var peerList []p2p.Peer
for _, entry := range configr.config {
if entry.Role != "validator" || entry.ShardID != shardID {
continue
}
// Get public key deterministically based on ip and port
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
setKey(&peer)
peerList = append(peerList, peer)
}
return peerList
}
// GetLeader Gets the leader of this shard id
func (configr *Configr) GetLeader(shardID string) p2p.Peer {
var leaderPeer p2p.Peer
for _, entry := range configr.config {
if entry.Role == "leader" && entry.ShardID == shardID {
leaderPeer.Ip = entry.IP
leaderPeer.Port = entry.Port
setKey(&leaderPeer)
}
}
return leaderPeer
}
func (configr *Configr) GetConfigEntries() []ConfigEntry {
return configr.config
}
func (configr *Configr) GetMyConfigEntry(ip string, port string) *ConfigEntry {
for _, entry := range configr.config {
if entry.IP == ip && entry.Port == port {
return &entry
}
}
return nil
}
func setKey(peer *p2p.Peer) {
// Get public key deterministically based on ip and port
priKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(utils.GetUniqueIdFromPeer(*peer))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)
peer.PubKey = pki.GetPublicKeyFromScalar(priKey)
}

@ -424,7 +424,12 @@ func (consensus *Consensus) reportMetrics(block blockchain.Block) {
"txCount": {strconv.Itoa(int(numOfTxs))},
"nodeCount": {strconv.Itoa(len(consensus.validators) + 1)},
"latestBlockHash": {hex.EncodeToString(consensus.blockHash[:])},
"latestTxHash": {hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-1][:])},
"latestTxHashes": {
hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-1][:]),
hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-2][:]),
hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-3][:]),
},
"blockLatency": {strconv.Itoa(int(timeElapsed / time.Millisecond))},
}
body := bytes.NewBufferString(form.Encode())

@ -38,6 +38,7 @@ db_supported=$2
go build -o bin/benchmark
go build -o bin/txgen client/txgen/main.go
go build -o bin/profiler profiler/main.go
cd bin
# Create a tmp folder for logs
t=`date +"%Y%m%d-%H%M%S"`
@ -51,9 +52,9 @@ while IFS='' read -r line || [[ -n "$line" ]]; do
#echo $ip $port $mode
if [ "$mode" != "client" ]; then
if [ -z "$db_supported" ]; then
./bin/benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder&
./benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder&
else
./bin/benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder -db_supported&
./benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder -db_supported&
fi
fi
done < $config
@ -61,5 +62,5 @@ done < $config
txgen_disabled=$3
# Generate transactions
if [ -z "$txgen_disabled" ]; then
./bin/txgen -config_file $config -log_folder $log_folder
./txgen -config_file $config -log_folder $log_folder
fi

@ -3,9 +3,8 @@
GOOS=linux
GOARCH=amd64
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/benchmark benchmark.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/commander aws-experiment-launch/experiment/commander/main.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/txgen client/txgen/main.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/profiler profiler/main.go
AWSCLI=aws
if [ "$1" != "" ]; then
@ -13,7 +12,5 @@ if [ "$1" != "" ]; then
fi
$AWSCLI s3 cp bin/benchmark s3://unique-bucket-bin/benchmark --acl public-read-write
$AWSCLI s3 cp bin/soldier s3://unique-bucket-bin/soldier --acl public-read-write
$AWSCLI s3 cp bin/commander s3://unique-bucket-bin/commander --acl public-read-write
$AWSCLI s3 cp bin/txgen s3://unique-bucket-bin/txgen --acl public-read-write
$AWSCLI s3 cp kill_node.sh s3://unique-bucket-bin/kill_node.sh --acl public-read-write
$AWSCLI s3 cp bin/profiler s3://unique-bucket-bin/profiler --acl public-read-write

@ -30,6 +30,11 @@ func (b *IdentityBlock) Serialize() []byte {
return result.Bytes()
}
//Get Identities
func (b *IdentityBlock) GetIdentities() []*waitnode.WaitNode {
return b.Identities
}
// DeserializeBlock deserializes a block
func DeserializeBlock(d []byte) *IdentityBlock {
var block IdentityBlock

@ -2,6 +2,8 @@ package identitychain
import (
"fmt"
"math"
"math/rand"
"net"
"os"
"sync"
@ -16,20 +18,83 @@ var identityPerBlock = 100000
// IdentityChain (Blockchain) keeps Identities per epoch, currently centralized!
type IdentityChain struct {
Identities []*IdentityBlock
PendingIdentities []*waitnode.WaitNode
log log.Logger
Peer p2p.Peer
Identities []*IdentityBlock
PendingIdentities []*waitnode.WaitNode
log log.Logger
Peer p2p.Peer
SelectedIdentitites []*waitnode.WaitNode
EpochNum int
PeerToShardMap map[*waitnode.WaitNode]int
ShardLeaderMap map[int]*waitnode.WaitNode
PubKey string
CurrentEpochStartTime int64
NumberOfShards int
NumberOfNodesInShard int
}
func seekRandomNumber(EpochNum int, SelectedIdentitites []*waitnode.WaitNode) int {
// Broadcast message to all nodes and collect back their numbers, do consensus and get a leader.
// Use leader to generate a random number.
//all here mocked
// interact with "node" and "wait_node"
return rand.Intn(1000)
}
//GlobalBlockchainConfig stores global level blockchain configurations.
type GlobalBlockchainConfig struct {
NumberOfShards int
EpochTimeSecs int16
NumberOfShards int
EpochTimeSecs int16
MaxNodesInShard int
}
//Shard
func (IDC *IdentityChain) Shard() {
num := seekRandomNumber(IDC.EpochNum, IDC.SelectedIdentitites)
IDC.CreateShardAssignment(num)
IDC.ElectLeaders()
}
//
func (IDC *IdentityChain) ElectLeaders() {
}
//CreateShardAssignment
func (IDC *IdentityChain) CreateShardAssignment(num int) {
IDC.NumberOfShards = IDC.NumberOfShards + needNewShards()
IDC.SelectedIdentitites = generateRandomPermutations(num, IDC.SelectedIdentitites)
IDC.PeerToShardMap = make(map[*waitnode.WaitNode]int)
numberInOneShard := len(IDC.SelectedIdentitites) / IDC.NumberOfShards
for peerNum := 1; peerNum <= len(IDC.SelectedIdentitites); peerNum++ {
IDC.PeerToShardMap[IDC.SelectedIdentitites[peerNum]] = peerNum / numberInOneShard
}
}
func generateRandomPermutations(num int, SelectedIdentitites []*waitnode.WaitNode) []*waitnode.WaitNode {
src := rand.NewSource(int64(num))
rnd := rand.New(src)
perm := rnd.Perm(len(SelectedIdentitites))
SelectedIdentititesCopy := make([]*waitnode.WaitNode, len(SelectedIdentitites))
for j, i := range perm {
SelectedIdentititesCopy[j] = SelectedIdentitites[i]
}
return SelectedIdentititesCopy
}
// SelectIds
func (IDC *IdentityChain) SelectIds() {
selectNumber := IDC.NumberOfNodesInShard - len(IDC.Identities)
IB := IDC.GetLatestBlock()
currentIDS := IB.GetIdentities()
selectNumber = int(math.Min(float64(len(IDC.PendingIdentities)), float64(selectNumber)))
pending := IDC.PendingIdentities[:selectNumber]
IDC.SelectedIdentitites = append(currentIDS, pending...)
IDC.PendingIdentities = []*waitnode.WaitNode{}
}
func (IDC *IdentityChain) shard() {
return
//Checks how many new shards we need. Currently we say 0.
func needNewShards() int {
return 0
}
// GetLatestBlock gests the latest block at the end of the chain

@ -3,8 +3,6 @@
go build -o bin/benchmark
go build -o bin/txgen client/txgen/main.go
go build -o bin/commander aws-experiment-launch/experiment/commander/main.go
go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go
go build -o bin/profiler profiler/main.go
cd bin

@ -1,5 +1,7 @@
package main
import "fmt"
// import (
// "flag"
@ -8,6 +10,7 @@ package main
// )
func main() {
fmt.Println("hello")
// ip := flag.String("ip", "127.0.0.0", "IP of the node")
// port := flag.String("port", "8080", "port of the node")
// flag.Parse()

Loading…
Cancel
Save