From 20989c506916d4e878a10f012171e1b526e1539e Mon Sep 17 00:00:00 2001 From: ak Date: Thu, 30 Aug 2018 11:26:20 -0700 Subject: [PATCH 01/20] WIP --- identitychain/identityblock.go | 5 ++++ identitychain/identitychain.go | 46 ++++++++++++++++++++++++++++------ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/identitychain/identityblock.go b/identitychain/identityblock.go index b1f72df4f..46be703fe 100644 --- a/identitychain/identityblock.go +++ b/identitychain/identityblock.go @@ -30,6 +30,11 @@ func (b *IdentityBlock) Serialize() []byte { return result.Bytes() } +//Get Identities +func (b *IdentityBlock) GetIdentities() []*waitnode.WaitNode { + return b.Identities +} + // DeserializeBlock deserializes a block func DeserializeBlock(d []byte) *IdentityBlock { var block IdentityBlock diff --git a/identitychain/identitychain.go b/identitychain/identitychain.go index c92193d61..359c079a8 100644 --- a/identitychain/identitychain.go +++ b/identitychain/identitychain.go @@ -16,20 +16,50 @@ var identityPerBlock = 100000 // IdentityChain (Blockchain) keeps Identities per epoch, currently centralized! type IdentityChain struct { - Identities []*IdentityBlock - PendingIdentities []*waitnode.WaitNode - log log.Logger - Peer p2p.Peer + Identities []*IdentityBlock + PendingIdentities []*waitnode.WaitNode + log log.Logger + Peer p2p.Peer + SelectedIdentitites []*waitnode.WaitNode + EpochNum int + PeerToShardMap map[p2p.Peer]int + ShardLeaderMap map[int]p2p.Peer + PubKey string + CurrentEpochStartTime int64 + NumberOfShards int + NumberOfNodesInShard int +} + +func seekRandomNumber(EpochNum int, SelectedIdentitites []*waitnode.WaitNode) int { + return 10 } //GlobalBlockchainConfig stores global level blockchain configurations. type GlobalBlockchainConfig struct { - NumberOfShards int - EpochTimeSecs int16 + NumberOfShards int + EpochTimeSecs int16 + MaxNodesInShard int +} + +//Shard +func (IDC *IdentityChain) Shard() { + num := seekRandomNumber(IDC.EpochNum, IDC.SelectedIdentitites) + fmt.Println(num) +} + +// SelectIds +func (IDC *IdentityChain) SelectIds() { + selectNumber := IDC.NumberOfNodesInShard - len(IDC.Identities) + IB := IDC.GetLatestBlock() + currentIDS := IB.GetIdentities() + pending := IDC.PendingIdentities[:selectNumber] + IDC.SelectedIdentitites = append(currentIDS, pending...) + IDC.PendingIdentities = []*waitnode.WaitNode{} } -func (IDC *IdentityChain) shard() { - return +//Checks how many new shards we need. Currently we say 0. +func needNewShards() int { + return 0 } // GetLatestBlock gests the latest block at the end of the chain From 4ac29baacf245dcd7aa3a7cf69c073a4fd219522 Mon Sep 17 00:00:00 2001 From: ak Date: Thu, 30 Aug 2018 15:50:48 -0700 Subject: [PATCH 02/20] unbreaking travis --- runwait/run_wait.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/runwait/run_wait.go b/runwait/run_wait.go index 32bc38ede..b2a953adb 100644 --- a/runwait/run_wait.go +++ b/runwait/run_wait.go @@ -1,5 +1,7 @@ package main +import "fmt" + // import ( // "flag" @@ -7,12 +9,13 @@ package main // "github.com/simple-rules/harmony-benchmark/waitnode" // ) -// func main() { -// ip := flag.String("ip", "127.0.0.0", "IP of the node") -// port := flag.String("port", "8080", "port of the node") -// flag.Parse() -// peer := p2p.Peer{Ip: *ip, Port: *port} -// idcpeer := p2p.Peer{Ip: "localhost", Port: "9000"} //Hardcoded here. -// node := waitnode.New(peer) -// node.ConnectIdentityChain(idcpeer) -// } +func main() { + fmt.Println("hello") + // ip := flag.String("ip", "127.0.0.0", "IP of the node") + // port := flag.String("port", "8080", "port of the node") + // flag.Parse() + // peer := p2p.Peer{Ip: *ip, Port: *port} + // idcpeer := p2p.Peer{Ip: "localhost", Port: "9000"} //Hardcoded here. + // node := waitnode.New(peer) + // node.ConnectIdentityChain(idcpeer) +} From 889198782ca76e2166d38a8a28e5cef7b0092d8e Mon Sep 17 00:00:00 2001 From: ak Date: Fri, 31 Aug 2018 10:09:29 -0700 Subject: [PATCH 03/20] identitychain select identities --- identitychain/identitychain.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/identitychain/identitychain.go b/identitychain/identitychain.go index 359c079a8..ac87f00e1 100644 --- a/identitychain/identitychain.go +++ b/identitychain/identitychain.go @@ -2,6 +2,7 @@ package identitychain import ( "fmt" + "math" "net" "os" "sync" @@ -52,6 +53,7 @@ func (IDC *IdentityChain) SelectIds() { selectNumber := IDC.NumberOfNodesInShard - len(IDC.Identities) IB := IDC.GetLatestBlock() currentIDS := IB.GetIdentities() + selectNumber = int(math.Min(float64(len(IDC.PendingIdentities)), float64(selectNumber))) pending := IDC.PendingIdentities[:selectNumber] IDC.SelectedIdentitites = append(currentIDS, pending...) IDC.PendingIdentities = []*waitnode.WaitNode{} From 59122b95e1cc68c27a66f06ad26e9fb1a4e2a852 Mon Sep 17 00:00:00 2001 From: ak Date: Fri, 31 Aug 2018 14:13:19 -0700 Subject: [PATCH 04/20] peer to shard map --- identitychain/identitychain.go | 41 ++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/identitychain/identitychain.go b/identitychain/identitychain.go index ac87f00e1..ffd9e1372 100644 --- a/identitychain/identitychain.go +++ b/identitychain/identitychain.go @@ -3,6 +3,7 @@ package identitychain import ( "fmt" "math" + "math/rand" "net" "os" "sync" @@ -23,8 +24,8 @@ type IdentityChain struct { Peer p2p.Peer SelectedIdentitites []*waitnode.WaitNode EpochNum int - PeerToShardMap map[p2p.Peer]int - ShardLeaderMap map[int]p2p.Peer + PeerToShardMap map[*waitnode.WaitNode]int + ShardLeaderMap map[int]*waitnode.WaitNode PubKey string CurrentEpochStartTime int64 NumberOfShards int @@ -32,7 +33,12 @@ type IdentityChain struct { } func seekRandomNumber(EpochNum int, SelectedIdentitites []*waitnode.WaitNode) int { - return 10 + // Broadcast message to all nodes and collect back their numbers, do consensus and get a leader. + // Use leader to generate a random number. + //all here mocked + // interact with "node" and "wait_node" + return rand.Intn(1000) + } //GlobalBlockchainConfig stores global level blockchain configurations. @@ -45,7 +51,34 @@ type GlobalBlockchainConfig struct { //Shard func (IDC *IdentityChain) Shard() { num := seekRandomNumber(IDC.EpochNum, IDC.SelectedIdentitites) - fmt.Println(num) + IDC.CreateShardAssignment(num) + IDC.ElectLeaders() +} + +// +func (IDC *IdentityChain) ElectLeaders() { +} + +//CreateShardAssignment +func (IDC *IdentityChain) CreateShardAssignment(num int) { + IDC.NumberOfShards = IDC.NumberOfShards + needNewShards() + IDC.SelectedIdentitites = generateRandomPermutations(num, IDC.SelectedIdentitites) + IDC.PeerToShardMap = make(map[*waitnode.WaitNode]int) + numberInOneShard := len(IDC.SelectedIdentitites) / IDC.NumberOfShards + for peerNum := 1; peerNum <= len(IDC.SelectedIdentitites); peerNum++ { + IDC.PeerToShardMap[IDC.SelectedIdentitites[peerNum]] = peerNum / numberInOneShard + } +} + +func generateRandomPermutations(num int, SelectedIdentitites []*waitnode.WaitNode) []*waitnode.WaitNode { + src := rand.NewSource(int64(num)) + rnd := rand.New(src) + perm := rnd.Perm(len(SelectedIdentitites)) + SelectedIdentititesCopy := make([]*waitnode.WaitNode, len(SelectedIdentitites)) + for j, i := range perm { + SelectedIdentititesCopy[j] = SelectedIdentitites[i] + } + return SelectedIdentititesCopy } // SelectIds From 27cd6ce4d8a9641a27acbaaf7eadf89610a66f0f Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Sat, 1 Sep 2018 00:30:57 +0000 Subject: [PATCH 05/20] remove the aws experiment scripts from benchmark repo the latest code is in https://github.com/simple-rules/experiment-deploy Signed-off-by: Leo Chen --- aws-experiment-launch/cal_tps.sh | 1 - aws-experiment-launch/collect_public_ips.py | 35 -- aws-experiment-launch/commander_prepare.py | 62 ---- .../configs/userdata-commander.sh | 11 - .../configs/userdata-soldier.sh | 20 -- aws-experiment-launch/configuration-git.txt | 8 - aws-experiment-launch/configuration.txt | 8 - .../create_deploy_soldiers.sh | 28 -- .../create_solider_instances.py | 193 ----------- .../download_log_from_commander.sh | 8 - .../download_log_from_leaders.sh | 17 - .../experiment/commander/main.go | 233 ------------- .../experiment/soldier/main.go | 312 ------------------ .../experiment/soldier/s3/s3.go | 93 ------ .../experiment/utils/utils.go | 30 -- .../generate_distribution_config.py | 37 --- aws-experiment-launch/logs_download.sh | 1 - aws-experiment-launch/report_extractor.py | 96 ------ .../spot-instance/request-spot.sh | 18 - .../spot-instance/run-instances.sh | 10 - aws-experiment-launch/terminate_instances.py | 105 ------ aws-experiment-launch/upload_binaries.py | 15 - aws-experiment-launch/upload_config.py | 10 - aws-experiment-launch/upload_s3.py | 9 - aws-experiment-launch/userdata-commander.sh | 11 - aws-experiment-launch/userdata-soldier.sh | 31 -- aws-experiment-launch/utils/__init__.py | 0 aws-experiment-launch/utils/configuration.txt | 8 - .../utils/launch_template.py | 46 --- aws-experiment-launch/utils/logger.py | 9 - aws-experiment-launch/utils/spot_fleet.py | 119 ------- aws-experiment-launch/utils/utils.py | 210 ------------ aws-experiment-launch/utils/utils_test.py | 31 -- aws-scripts/kill_node.sh | 5 - aws-scripts/parse_json.py | 34 -- aws-scripts/preprocess_peerlist.py | 26 -- aws-scripts/run_instance.sh | 34 -- aws-scripts/say_bye.sh | 1 - aws-scripts/say_hello.sh | 1 - aws-scripts/setup.sh | 43 --- aws-scripts/setup_instances.sh | 1 - 41 files changed, 1970 deletions(-) delete mode 100644 aws-experiment-launch/cal_tps.sh delete mode 100644 aws-experiment-launch/collect_public_ips.py delete mode 100644 aws-experiment-launch/commander_prepare.py delete mode 100755 aws-experiment-launch/configs/userdata-commander.sh delete mode 100644 aws-experiment-launch/configs/userdata-soldier.sh delete mode 100644 aws-experiment-launch/configuration-git.txt delete mode 100644 aws-experiment-launch/configuration.txt delete mode 100755 aws-experiment-launch/create_deploy_soldiers.sh delete mode 100644 aws-experiment-launch/create_solider_instances.py delete mode 100644 aws-experiment-launch/download_log_from_commander.sh delete mode 100755 aws-experiment-launch/download_log_from_leaders.sh delete mode 100644 aws-experiment-launch/experiment/commander/main.go delete mode 100644 aws-experiment-launch/experiment/soldier/main.go delete mode 100644 aws-experiment-launch/experiment/soldier/s3/s3.go delete mode 100644 aws-experiment-launch/experiment/utils/utils.go delete mode 100644 aws-experiment-launch/generate_distribution_config.py delete mode 100755 aws-experiment-launch/logs_download.sh delete mode 100644 aws-experiment-launch/report_extractor.py delete mode 100755 aws-experiment-launch/spot-instance/request-spot.sh delete mode 100755 aws-experiment-launch/spot-instance/run-instances.sh delete mode 100644 aws-experiment-launch/terminate_instances.py delete mode 100644 aws-experiment-launch/upload_binaries.py delete mode 100644 aws-experiment-launch/upload_config.py delete mode 100644 aws-experiment-launch/upload_s3.py delete mode 100755 aws-experiment-launch/userdata-commander.sh delete mode 100644 aws-experiment-launch/userdata-soldier.sh delete mode 100644 aws-experiment-launch/utils/__init__.py delete mode 100644 aws-experiment-launch/utils/configuration.txt delete mode 100644 aws-experiment-launch/utils/launch_template.py delete mode 100644 aws-experiment-launch/utils/logger.py delete mode 100644 aws-experiment-launch/utils/spot_fleet.py delete mode 100644 aws-experiment-launch/utils/utils.py delete mode 100644 aws-experiment-launch/utils/utils_test.py delete mode 100755 aws-scripts/kill_node.sh delete mode 100644 aws-scripts/parse_json.py delete mode 100644 aws-scripts/preprocess_peerlist.py delete mode 100644 aws-scripts/run_instance.sh delete mode 100644 aws-scripts/say_bye.sh delete mode 100755 aws-scripts/say_hello.sh delete mode 100644 aws-scripts/setup.sh delete mode 100755 aws-scripts/setup_instances.sh diff --git a/aws-experiment-launch/cal_tps.sh b/aws-experiment-launch/cal_tps.sh deleted file mode 100644 index 2df68dc90..000000000 --- a/aws-experiment-launch/cal_tps.sh +++ /dev/null @@ -1 +0,0 @@ -for file in $(ls *leader*); do echo $file; cat $file | grep TPS | head -n 2 | cut -f2 -d ":" | cut -f1 -d "," | awk '{ sum += $1; n++ } END { if (n > 0) print sum / n; }'; done diff --git a/aws-experiment-launch/collect_public_ips.py b/aws-experiment-launch/collect_public_ips.py deleted file mode 100644 index 2e8c1e9dd..000000000 --- a/aws-experiment-launch/collect_public_ips.py +++ /dev/null @@ -1,35 +0,0 @@ -import argparse -import os -import random -import sys - -from utils import utils - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='This script helps you to collect public ips') - parser.add_argument('--instance_output', type=str, dest='instance_output', - default='instance_output.txt', - help='the file contains node_name_tag and region number of created instances.') - parser.add_argument('--region_config', type=str, - dest='region_config', default='configuration.txt') - parser.add_argument('--file_output', type=str, - dest='file_output', default='raw_ip.txt') - args = parser.parse_args() - - if not args.instance_output or not os.path.isfile(args.instance_output): - print "%s or %s are not existed" % (args.file_output, args.instance_output) - sys.exit(1) - if args.instance_output: - with open(args.instance_output, "r") as fin, open(args.file_output, "w") as fout: - total_ips = [] - node_name_tag_list = [] - for line in fin.readlines(): - items = line.split(" ") - region_number = items[1].strip() - node_name_tag = items[0].strip() - ip_list = utils.collect_public_ips(region_number, node_name_tag, args.region_config) - total_ips.extend([(ip, node_name_tag) for ip in ip_list]) - random.shuffle(total_ips) - for tuple in total_ips: - fout.write(tuple[0] + " " + tuple[1] + "\n") - print "Done collecting public ips %s" % args.file_output diff --git a/aws-experiment-launch/commander_prepare.py b/aws-experiment-launch/commander_prepare.py deleted file mode 100644 index dfbeb9eb0..000000000 --- a/aws-experiment-launch/commander_prepare.py +++ /dev/null @@ -1,62 +0,0 @@ -import argparse -import logging -import os -import stat -import sys - -from utils import utils - -logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') -LOGGER = logging.getLogger(__file__) -LOGGER.setLevel(logging.INFO) - -PEMS = [ - "virginia-key-benchmark.pem", - "ohio-key-benchmark.pem", - "california-key-benchmark.pem", - "oregon-key-benchmark.pem", - "tokyo-key-benchmark.pem", - "singapore-key-benchmark.pem", - "frankfurt-key-benchmark.pem", - "ireland-key-benchmark.pem", -] - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config') - parser.add_argument('--distribution_config', type=str, - dest='distribution_config', default='distribution_config.txt') - parser.add_argument('--commander_logging', type=str, - dest='commander_logging', default='commander_logging.sh') - - args = parser.parse_args() - - if not os.path.exists(args.distribution_config): - sys.exit(1) - with open(args.distribution_config, "r") as fin: - lines = fin.readlines() - - commander_address = None - commander_region = None - commander_output = None - with open(args.distribution_config, "w") as fout: - for line in lines: - if "commander" in line: - items = [item.strip() for item in line.split(" ")] - commander_address = items[0] - commander_region = int(items[4][0]) - commander_output = "\n".join(items) - else: - fout.write(line.strip() + "\n") - LOGGER.info("Generated %s" % args.distribution_config) - - if not commander_address or not commander_region: - LOGGER.info("Failed to extract commander address and commander region.") - sys.exit(1) - - with open(args.commander_logging, "w") as fout: - fout.write("ssh -i ./keys/%s ec2-user@%s\n" % (PEMS[commander_region - 1], commander_address)) - st = os.stat(args.commander_logging) - os.chmod(args.commander_logging, st.st_mode | stat.S_IEXEC) - LOGGER.info("Generated %s" % args.commander_logging) - LOGGER.info("DONE.") - diff --git a/aws-experiment-launch/configs/userdata-commander.sh b/aws-experiment-launch/configs/userdata-commander.sh deleted file mode 100755 index 165aab479..000000000 --- a/aws-experiment-launch/configs/userdata-commander.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -x -REGION=$(curl 169.254.169.254/latest/meta-data/placement/availability-zone/ | sed 's/[a-z]$//') -#yum update -y #This breaking codedeploy right now -yum install ruby wget -y -cd /home/ec2-user -touch yum-not-updated.txt -wget https://aws-codedeploy-$REGION.s3.amazonaws.com/latest/install -chmod +x ./install -./install auto -mkdir projects -mkdir projects/src \ No newline at end of file diff --git a/aws-experiment-launch/configs/userdata-soldier.sh b/aws-experiment-launch/configs/userdata-soldier.sh deleted file mode 100644 index 108681881..000000000 --- a/aws-experiment-launch/configs/userdata-soldier.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -cd /home/ec2-user -commanderIP= # <- Put the commander IP here. -curl http://$commanderIP:8080/soldier -o soldier -chmod +x ./soldier -curl http://$commanderIP:8080/benchmark -o benchmark -chmod +x ./benchmark -curl http://$commanderIP:8080/txgen -o txgen -chmod +x ./txgen - -# Get My IP -ip=`curl http://169.254.169.254/latest/meta-data/public-ipv4` - -node_port=9000 -soldier_port=1$node_port -# Kill existing soldier -fuser -k -n tcp $soldier_port - -# Run soldier -./soldier -ip $ip -port $node_port > soldier_log 2>&1 & \ No newline at end of file diff --git a/aws-experiment-launch/configuration-git.txt b/aws-experiment-launch/configuration-git.txt deleted file mode 100644 index f279c3cfe..000000000 --- a/aws-experiment-launch/configuration-git.txt +++ /dev/null @@ -1,8 +0,0 @@ -1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-97785bed,sg-04d0b62ee08ce8800 -2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-f63b1193,sg-0789078f1c76defbe -3,us-west-1,california-key-benchmark,california-security-group,california,ami-824c4ee2,sg-0a66ccb6ab9161a14 -4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-f2d3638a,sg-020cb5729fa212d43 -5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-ceafcba8,sg-009aeb97f675c1ad5 -6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-68097514,sg-05f9b60044a19dfb2 -7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-5652ce39,sg-0bb06fcd8b25b5910 -8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-d834aba1,sg-0aa8954acb79fdb58 \ No newline at end of file diff --git a/aws-experiment-launch/configuration.txt b/aws-experiment-launch/configuration.txt deleted file mode 100644 index d713ce4b5..000000000 --- a/aws-experiment-launch/configuration.txt +++ /dev/null @@ -1,8 +0,0 @@ -1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800 -2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe -3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14 -4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43 -5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5 -6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2 -7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910 -8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58 \ No newline at end of file diff --git a/aws-experiment-launch/create_deploy_soldiers.sh b/aws-experiment-launch/create_deploy_soldiers.sh deleted file mode 100755 index 1deb030ef..000000000 --- a/aws-experiment-launch/create_deploy_soldiers.sh +++ /dev/null @@ -1,28 +0,0 @@ -if [ $# -lt 3 ]; then - echo "Please provide # of instances, # of shards, # of clients" - exit 1 -fi - -INSTANCE_NUM=$1 -SHARD_NUM=$2 -CLIENT_NUM=$3 -SLEEP_TIME=10 - -echo "Creating $INSTANCE_NUM instances at 8 regions" -python create_solider_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM - - -echo "Sleep for $SLEEP_TIME seconds" -sleep $SLEEP_TIME - -echo "Rung collecting raw ips" -python collect_public_ips.py --instance_output instance_output.txt - -# sleep 10 -echo "Generate distribution_config" -python generate_distribution_config.py --ip_list_file raw_ip.txt --shard_number $SHARD_NUM --client_number $CLIENT_NUM - -echo "Run commander prepare" -python commander_prepare.py - -aws s3 cp distribution_config.txt s3://unique-bucket-bin/distribution_config.txt --acl public-read-write diff --git a/aws-experiment-launch/create_solider_instances.py b/aws-experiment-launch/create_solider_instances.py deleted file mode 100644 index 7b4e2cdc4..000000000 --- a/aws-experiment-launch/create_solider_instances.py +++ /dev/null @@ -1,193 +0,0 @@ -import argparse -import base64 -import boto3 -import datetime -import json -import sys -import threading -import time -import enum - -#TODO REMOVE UTILS -from utils import utils, spot_fleet, logger - -LOGGER = logger.getLogger(__file__) -REGION_NAME = 'region_name' -REGION_KEY = 'region_key' -REGION_SECURITY_GROUP = 'region_security_group' -REGION_SECURITY_GROUP_ID = 'region_security_group_id' -REGION_HUMAN_NAME = 'region_human_name' -INSTANCE_TYPE = 't2.micro' -REGION_AMI = 'region_ami' - -class InstanceResource(enum.Enum): - ON_DEMAND = 'ondemand' - SPOT_INSTANCE = 'spot' - SPOT_FLEET = 'fleet' - - def __str__(self): - return self.value - -def run_one_region_on_demand_instances(config, region_number, number_of_instances, tag): - ec2_client = utils.create_client(config, region_number) - node_name_tag = create_instances( - config, ec2_client, region_number, number_of_instances, tag) - LOGGER.info("Created %s in region %s" % (node_name_tag, region_number)) - return node_name_tag, ec2_client - -def create_instances(config, ec2_client, region_number, number_of_instances, tag): - node_name_tag = utils.get_node_name_tag2(region_number, tag) - LOGGER.info("Creating node_name_tag: %s" % node_name_tag) - available_zone = utils.get_one_availability_zone(ec2_client) - LOGGER.info("Looking at zone %s to create instances." % available_zone) - - time.sleep(2) - ec2_client.run_instances( - MinCount=number_of_instances, - MaxCount=number_of_instances, - ImageId=config[region_number][utils.REGION_AMI], - Placement={ - 'AvailabilityZone': available_zone, - }, - SecurityGroups=[config[region_number][utils.REGION_SECURITY_GROUP]], - IamInstanceProfile={ - 'Name': utils.IAM_INSTANCE_PROFILE - }, - KeyName=config[region_number][utils.REGION_KEY], - UserData=utils.USER_DATA, - InstanceType=utils.INSTANCE_TYPE, - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'Name', - 'Value': node_name_tag - }, - ] - }, - ], - ) - - retry_count = 10 - while retry_count > 0: - try: - time.sleep(20) - instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag) - LOGGER.info("Waiting for all %d instances in region %s with node_name_tag %s to be in RUNNING" % ( - len(instance_ids), region_number, node_name_tag)) - break - except: - retry_count -= 1 - LOGGER.info("Failed to get instance ids. Retry again.") - retry_count = 10 - while retry_count > 0: - try: - time.sleep(20) - waiter = ec2_client.get_waiter('instance_running') - waiter.wait(InstanceIds=instance_ids) - break - except: - retry_count -= 1 - LOGGER.info("Failed to wait.") - - retry_count = 10 - while retry_count > 0: - time.sleep(20) - LOGGER.info("Waiting ...") - ip_list = utils.collect_public_ips_from_ec2_client( - ec2_client, node_name_tag) - if len(ip_list) == number_of_instances: - LOGGER.info("Created %d instances" % number_of_instances) - return node_name_tag - retry_count -= 10 - LOGGER.info("Can not get %d instances" % number_of_instances) - return node_name_tag - - -LOCK_FOR_RUN_ONE_REGION = threading.Lock() - -def run_for_one_region_on_demand(config, region_number, number_of_instances, fout, fout2): - tag = 0 - number_of_instances = int(number_of_instances) - while number_of_instances > 0: - number_of_creation = min(utils.MAX_INSTANCES_FOR_DEPLOYMENT, number_of_instances) - node_name_tag, ec2_client = run_one_region_on_demand_instances( - config, region_number, number_of_creation, tag) - if node_name_tag: - LOGGER.info("Managed to create instances for region %s with name_name_tag %s" % - (region_number, node_name_tag)) - instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag) - LOCK_FOR_RUN_ONE_REGION.acquire() - try: - fout.write("%s %s\n" % (node_name_tag, region_number)) - for instance_id in instance_ids: - fout2.write(instance_id + " " + node_name_tag + " " + region_number + - " " + config[region_number][utils.REGION_NAME] + "\n") - finally: - LOCK_FOR_RUN_ONE_REGION.release() - else: - LOGGER.info("Failed to create instances for region %s" % region_number) - number_of_instances -= number_of_creation - tag += 1 - -def read_region_config(region_config='configuration.txt'): - global CONFIG - config = {} - with open(region_config, 'r') as f: - for myline in f: - mylist = [item.strip() for item in myline.strip().split(',')] - region_num = mylist[0] - config[region_num] = {} - config[region_num][REGION_NAME] = mylist[1] - config[region_num][REGION_KEY] = mylist[2] - config[region_num][REGION_SECURITY_GROUP] = mylist[3] - config[region_num][REGION_HUMAN_NAME] = mylist[4] - config[region_num][REGION_AMI] = mylist[5] - config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6] - CONFIG = config - return config - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description='This script helps you start instances across multiple regions') - parser.add_argument('--regions', type=str, dest='regions', - default='3', help="Supply a csv list of all regions") - parser.add_argument('--instances', type=str, dest='num_instance_list', - default='1', help='number of instances in respective of region') - parser.add_argument('--region_config', type=str, - dest='region_config', default='configuration.txt') - parser.add_argument('--instance_output', type=str, dest='instance_output', - default='instance_output.txt', help='the file to append or write') - parser.add_argument('--instance_ids_output', type=str, dest='instance_ids_output', - default='instance_ids_output.txt', help='the file to append or write') - parser.add_argument('--instance_resource', dest='instance_resource', type=InstanceResource, - default=InstanceResource.ON_DEMAND, choices=list(InstanceResource)) - parser.add_argument('--append', dest='append', type=bool, default=False, - help='append to the current instance_output') - args = parser.parse_args() - config = read_region_config(args.region_config) - region_list = args.regions.split(',') - num_instance_list = args.num_instance_list.split(',') - instance_resource = args.instance_resource - assert len(region_list) == len(num_instance_list), "number of regions: %d != number of instances per region: %d" % ( - len(region_list), len(num_instance_list)) - - write_mode = "a" if args.append else "w" - with open(args.instance_output, write_mode) as fout, open(args.instance_ids_output, write_mode) as fout2: - thread_pool = [] - for i in range(len(region_list)): - region_number = region_list[i] - number_of_instances = num_instance_list[i] - if instance_resource == InstanceResource.ON_DEMAND: - t = threading.Thread(target=run_for_one_region_on_demand, args=( - config, region_number, number_of_instances, fout, fout2)) - elif instance_resource == InstanceResource.SPOT_FLEET: - t = threading.Thread(target=spot_fleet.run_one_region, args=( - region_number, number_of_instances, fout, fout2)) - LOGGER.info("creating thread for region %s" % region_number) - t.start() - thread_pool.append(t) - for t in thread_pool: - t.join() - LOGGER.info("done.") diff --git a/aws-experiment-launch/download_log_from_commander.sh b/aws-experiment-launch/download_log_from_commander.sh deleted file mode 100644 index f2d5815a8..000000000 --- a/aws-experiment-launch/download_log_from_commander.sh +++ /dev/null @@ -1,8 +0,0 @@ -# Change the commander address -if [ $# -eq 0 ]; then - echo "Please provide ip address of the commander" - exit 1 -fi -ADDRESS=$1 -mkdir -p ./tmp -scp -r -i "california-key-benchmark.pem" ec2-user@$ADDRESS:~/projects/src/harmony-benchmark/bin/upload ./tmp/ diff --git a/aws-experiment-launch/download_log_from_leaders.sh b/aws-experiment-launch/download_log_from_leaders.sh deleted file mode 100755 index 977194aba..000000000 --- a/aws-experiment-launch/download_log_from_leaders.sh +++ /dev/null @@ -1,17 +0,0 @@ -# Make sure to have all keys with mode 600 at harmony-benchmark directory. -IFS=$'\n' -rm -rf ./tmp -mkdir tmp -for address in $(cat ./leader_addresses.txt) -do - echo "trying to download from address $address" - mkdir -p tmp/$address - scp -r -o "StrictHostKeyChecking no" -i ../keys/california-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ - scp -r -o "StrictHostKeyChecking no" -i ../keys/frankfurt-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ - scp -r -o "StrictHostKeyChecking no" -i ../keys/ireland-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ - scp -r -o "StrictHostKeyChecking no" -i ../keys/ohio-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ - scp -r -o "StrictHostKeyChecking no" -i ../keys/oregon-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ - scp -r -o "StrictHostKeyChecking no" -i ../keys/singapore-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ - scp -r -o "StrictHostKeyChecking no" -i ../keys/tokyo-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ - scp -r -o "StrictHostKeyChecking no" -i ../keys/virginia-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/ -done \ No newline at end of file diff --git a/aws-experiment-launch/experiment/commander/main.go b/aws-experiment-launch/experiment/commander/main.go deleted file mode 100644 index b917d573c..000000000 --- a/aws-experiment-launch/experiment/commander/main.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Commander has two modes to setup configuration: Local and S3. - -Local Config Mode - -The Default Mode. - -Add `-mode local` or omit `-mode` to enter local config mode. In this mode, the `commander` will host the config file `config.txt` on the commander machine and `solider`s will download the config file from `http://{commander_ip}:{commander_port}/distribution_config.txt`. - -Remote Config Mode - -Add `-mode remote` to enter remote config mode. In this mode, the `soldier`s will download the config file from a remote URL (use `-config_url {url}` to set the URL). -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "log" - "net" - "net/http" - "os" - "strings" - "time" - - "github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/utils" - "github.com/simple-rules/harmony-benchmark/configr" -) - -type commanderSetting struct { - ip string - port string - mode string - // Options in s3 mode - configURL string - - configr *configr.Configr -} - -type sessionInfo struct { - id string - uploadFolder string -} - -var ( - setting commanderSetting - session sessionInfo -) - -const ( - DistributionFileName = "distribution_config.txt" - DefaultConfigUrl = "https://s3-us-west-2.amazonaws.com/unique-bucket-bin/distribution_config.txt" -) - -func handleCommand(command string) { - args := strings.Split(command, " ") - if len(args) <= 0 { - return - } - - switch cmd := args[0]; cmd { - case "config": - if setting.mode == "s3" { - // In s3 mode, download the config file from configURL first. - if err := utils.DownloadFile(DistributionFileName, setting.configURL); err != nil { - panic(err) - } - } - - err := setting.configr.ReadConfigFile(DistributionFileName) - if err == nil { - log.Printf("The loaded config has %v nodes\n", len(setting.configr.GetConfigEntries())) - } else { - log.Println("Failed to read config file") - } - case "init": - session.id = time.Now().Format("150405-20060102") - // create upload folder - session.uploadFolder = fmt.Sprintf("upload/%s", session.id) - err := os.MkdirAll(session.uploadFolder, os.ModePerm) - if err != nil { - log.Println("Failed to create upload folder", session.uploadFolder) - return - } - log.Println("New session", session.id) - - dictateNodes(fmt.Sprintf("init %v %v %v %v", setting.ip, setting.port, setting.configURL, session.id)) - case "ping", "kill", "log", "log2": - dictateNodes(command) - default: - log.Println("Unknown command") - } -} - -func config(ip string, port string, mode string, configURL string) { - setting.ip = ip - setting.port = port - setting.mode = mode - if mode == "local" { - setting.configURL = fmt.Sprintf("http://%s:%s/%s", ip, port, DistributionFileName) - } else { - setting.configURL = configURL - } - setting.configr = configr.NewConfigr() -} - -func dictateNodes(command string) { - resultChan := make(chan int) - configs := setting.configr.GetConfigEntries() - for _, entry := range configs { - port := "1" + entry.Port // the port number of solider is "1" + node port - addr := strings.Join([]string{entry.IP, port}, ":") - - go func(resultChan chan int) { - resultChan <- dictateNode(addr, command) - }(resultChan) - } - count := len(configs) - res := 0 - for ; count > 0; count-- { - res += <-resultChan - } - - log.Printf("Finished %s with %v nodes\n", command, res) -} - -func dictateNode(addr string, command string) int { - // creates client - conn, err := net.DialTimeout("tcp", addr, 5*time.Second) - if err != nil { - log.Println(err) - return 0 - } - defer conn.Close() - - // send command - _, err = conn.Write([]byte(command)) - if err != nil { - log.Printf("Failed to send command to %s", addr) - return 0 - } - // log.Printf("Send \"%s\" to %s", command, addr) - - // read response - buff := make([]byte, 1024) - if n, err := conn.Read(buff); err == nil { - received := string(buff[:n]) - // log.Printf("Receive from %s: %s", addr, buff[:n]) - if strings.Contains(received, "Failed") { - return 0 - } else { - return 1 - } - } - return 0 -} - -func handleUploadRequest(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - // reject non-post requests - jsonResponse(w, http.StatusBadRequest, "Only post request is accepted.") - return - } - - reader, err := r.MultipartReader() - if err != nil { - jsonResponse(w, http.StatusBadRequest, err.Error()) - return - } - - for { - part, err := reader.NextPart() - if err == io.EOF { - break - } - - dst, err := os.Create(fmt.Sprintf("%s/%s", session.uploadFolder, part.FileName())) - log.Println(part.FileName()) - if err != nil { - jsonResponse(w, http.StatusInternalServerError, err.Error()) - return - } - defer dst.Close() - - if _, err := io.Copy(dst, part); err != nil { - jsonResponse(w, http.StatusInternalServerError, err.Error()) - return - } - } -} - -func jsonResponse(w http.ResponseWriter, code int, message string) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - fmt.Fprint(w, message) - log.Println(message) -} - -func serve() { - if setting.mode == "local" { - // Only host config file if in local mode - http.Handle("/", http.FileServer(http.Dir("./"))) - } - http.HandleFunc("/upload", handleUploadRequest) - err := http.ListenAndServe(":"+setting.port, nil) - if err != nil { - log.Fatalf("Failed to setup server! Error: %s", err.Error()) - } - log.Printf("Start to host upload endpoint at http://%s:%s/upload\n", setting.ip, setting.port) -} - -func main() { - ip := flag.String("ip", "127.0.0.1", "The ip of commander, i.e. this machine") - port := flag.String("port", "8080", "The port which the commander uses to communicate with soldiers") - mode := flag.String("mode", "local", "The config mode, local or s3") - configURL := flag.String("config_url", DefaultConfigUrl, "The config URL") - flag.Parse() - - config(*ip, *port, *mode, *configURL) - - go serve() - - scanner := bufio.NewScanner(os.Stdin) - for true { - log.Printf("Listening to Your Command:") - if !scanner.Scan() { - break - } - handleCommand(scanner.Text()) - } -} diff --git a/aws-experiment-launch/experiment/soldier/main.go b/aws-experiment-launch/experiment/soldier/main.go deleted file mode 100644 index 176f5488c..000000000 --- a/aws-experiment-launch/experiment/soldier/main.go +++ /dev/null @@ -1,312 +0,0 @@ -/* -Soldier is responsible for receiving commands from commander and doing tasks such as starting nodes, uploading logs. - - cd harmony-benchmark/bin - go build -o soldier ../aws-experiment-launch/experiment/soldier/main.go - ./soldier -ip={node_ip} -port={node_port} -*/ -package main - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "mime/multipart" - "net" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/soldier/s3" - "github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/utils" - "github.com/simple-rules/harmony-benchmark/configr" - globalUtils "github.com/simple-rules/harmony-benchmark/utils" -) - -type soliderSetting struct { - ip string - port string -} - -type sessionInfo struct { - id string - commanderIP string - commanderPort string - localConfigFileName string - logFolder string - configr *configr.Configr - myConfig configr.ConfigEntry -} - -const ( - bucketName = "richard-bucket-test" - logFolderPrefix = "../tmp_log/" -) - -var ( - setting soliderSetting - globalSession sessionInfo -) - -func socketServer() { - soldierPort := "1" + setting.port // the soldier port is "1" + node port - listen, err := net.Listen("tcp4", ":"+soldierPort) - if err != nil { - log.Fatalf("Socket listen port %s failed,%s", soldierPort, err) - os.Exit(1) - } - defer listen.Close() - log.Printf("Begin listen for command on port: %s", soldierPort) - - for { - conn, err := listen.Accept() - if err != nil { - log.Fatalln(err) - continue - } - go handler(conn) - } -} - -func handler(conn net.Conn) { - defer conn.Close() - - var ( - buf = make([]byte, 1024) - r = bufio.NewReader(conn) - w = bufio.NewWriter(conn) - ) - -ILOOP: - for { - n, err := r.Read(buf) - data := string(buf[:n]) - - switch err { - case io.EOF: - break ILOOP - case nil: - log.Println("Received command", data) - - handleCommand(data, w) - - log.Println("Waiting for new command...") - - default: - log.Fatalf("Receive data failed:%s", err) - return - } - } -} - -func handleCommand(command string, w *bufio.Writer) { - args := strings.Split(command, " ") - - if len(args) <= 0 { - return - } - - switch command := args[0]; command { - case "ping": - { - handlePingCommand(w) - } - case "init": - { - handleInitCommand(args[1:], w) - } - case "kill": - { - handleKillCommand(w) - } - case "log": - { - handleLogCommand(w) - } - case "log2": - { - handleLog2Command(w) - } - } -} - -func handleInitCommand(args []string, w *bufio.Writer) { - // init ip port config_file sessionID - log.Println("Init command", args) - - // read arguments - ip := args[0] - globalSession.commanderIP = ip - port := args[1] - globalSession.commanderPort = port - configURL := args[2] - sessionID := args[3] - globalSession.id = sessionID - globalSession.logFolder = fmt.Sprintf("%slog-%v", logFolderPrefix, sessionID) - - // create local config file - globalSession.localConfigFileName = fmt.Sprintf("node_config_%v_%v.txt", setting.port, globalSession.id) - utils.DownloadFile(globalSession.localConfigFileName, configURL) - log.Println("Successfully downloaded config") - - globalSession.configr.ReadConfigFile(globalSession.localConfigFileName) - globalSession.myConfig = *globalSession.configr.GetMyConfigEntry(setting.ip, setting.port) - - if err := runInstance(); err == nil { - logAndReply(w, "Done init.") - } else { - logAndReply(w, "Failed.") - } -} - -func handleKillCommand(w *bufio.Writer) { - log.Println("Kill command") - if err := killPort(setting.port); err == nil { - logAndReply(w, "Done kill.") - } else { - logAndReply(w, "Failed.") - } -} - -func killPort(port string) error { - if runtime.GOOS == "windows" { - command := fmt.Sprintf("(Get-NetTCPConnection -LocalPort %s).OwningProcess -Force", port) - return globalUtils.RunCmd("Stop-Process", "-Id", command) - } else { - command := fmt.Sprintf("lsof -i tcp:%s | grep LISTEN | awk '{print $2}' | xargs kill -9", port) - return globalUtils.RunCmd("bash", "-c", command) - } -} - -func handlePingCommand(w *bufio.Writer) { - log.Println("Ping command") - logAndReply(w, "I'm alive") -} - -func handleLogCommand(w *bufio.Writer) { - log.Println("Log command") - - files, err := ioutil.ReadDir(globalSession.logFolder) - if err != nil { - logAndReply(w, fmt.Sprintf("Failed to read log folder. Error: %s", err.Error())) - return - } - - filePaths := make([]string, len(files)) - for i, f := range files { - filePaths[i] = fmt.Sprintf("%s/%s", globalSession.logFolder, f.Name()) - } - - req, err := newUploadFileRequest( - fmt.Sprintf("http://%s:%s/upload", globalSession.commanderIP, globalSession.commanderPort), - "file", - filePaths, - nil) - if err != nil { - logAndReply(w, fmt.Sprintf("Failed to create upload request. Error: %s", err.Error())) - return - } - client := &http.Client{} - _, err = client.Do(req) - if err != nil { - logAndReply(w, fmt.Sprintf("Failed to upload log. Error: %s", err.Error())) - return - } - logAndReply(w, "Upload log done!") -} - -// Creates a new file upload http request with optional extra params -func newUploadFileRequest(uri string, paramName string, paths []string, params map[string]string) (*http.Request, error) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - for _, path := range paths { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - part, err := writer.CreateFormFile(paramName, filepath.Base(path)) - if err != nil { - return nil, err - } - _, err = io.Copy(part, file) - log.Printf(path) - } - - for key, val := range params { - _ = writer.WriteField(key, val) - } - err := writer.Close() - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", uri, body) - req.Header.Set("Content-Type", writer.FormDataContentType()) - return req, err -} - -func logAndReply(w *bufio.Writer, message string) { - log.Println(message) - w.Write([]byte(message)) - w.Flush() -} - -func handleLog2Command(w *bufio.Writer) { - log.Println("Log command") - - files, err := ioutil.ReadDir(globalSession.logFolder) - if err != nil { - logAndReply(w, fmt.Sprintf("Failed to create log folder. Error: %s", err.Error())) - return - } - - filePaths := make([]string, len(files)) - for i, f := range files { - filePaths[i] = fmt.Sprintf("%s/%s", globalSession.logFolder, f.Name()) - } - - // TODO: currently only upload the first file. - _, err = s3.UploadFile(bucketName, filePaths[0], strings.Replace(filePaths[0], logFolderPrefix, "", 1)) - if err != nil { - logAndReply(w, fmt.Sprintf("Failed to create upload request. Error: %s", err.Error())) - return - } - logAndReply(w, "Upload log done!") -} - -func runInstance() error { - os.MkdirAll(globalSession.logFolder, os.ModePerm) - - if globalSession.myConfig.Role == "client" { - return runClient() - } - return runNode() -} - -func runNode() error { - log.Println("running instance") - return globalUtils.RunCmd("./benchmark", "-ip", setting.ip, "-port", setting.port, "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder) -} - -func runClient() error { - log.Println("running client") - return globalUtils.RunCmd("./txgen", "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder) -} - -func main() { - ip := flag.String("ip", "127.0.0.1", "IP of the node.") - port := flag.String("port", "9000", "port of the node.") - flag.Parse() - - setting.ip = *ip - setting.port = *port - - socketServer() -} diff --git a/aws-experiment-launch/experiment/soldier/s3/s3.go b/aws-experiment-launch/experiment/soldier/s3/s3.go deleted file mode 100644 index ef1fb7f28..000000000 --- a/aws-experiment-launch/experiment/soldier/s3/s3.go +++ /dev/null @@ -1,93 +0,0 @@ -package s3 - -import ( - "fmt" - "log" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" -) - -func createSession() *session.Session { - return session.Must(session.NewSession(&aws.Config{ - Region: aws.String(endpoints.UsWest2RegionID), - MaxRetries: aws.Int(3), - })) -} - -func CreateBucket(bucketName string, region string) { - sess := createSession() - svc := s3.New(sess) - input := &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - CreateBucketConfiguration: &s3.CreateBucketConfiguration{ - LocationConstraint: aws.String(region), - }, - } - - result, err := svc.CreateBucket(input) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case s3.ErrCodeBucketAlreadyExists: - fmt.Println(s3.ErrCodeBucketAlreadyExists, aerr.Error()) - case s3.ErrCodeBucketAlreadyOwnedByYou: - fmt.Println(s3.ErrCodeBucketAlreadyOwnedByYou, aerr.Error()) - default: - fmt.Println(aerr.Error()) - } - } else { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - } - return - } - - fmt.Println(result) -} - -func UploadFile(bucketName string, fileName string, key string) (result *s3manager.UploadOutput, err error) { - sess := createSession() - uploader := s3manager.NewUploader(sess) - - f, err := os.Open(fileName) - if err != nil { - log.Println("Failed to open file", err) - return nil, err - } - // Upload the file to S3. - result, err = uploader.Upload(&s3manager.UploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: f, - }) - if err != nil { - log.Println("failed to upload file", err) - return nil, err - } - fmt.Printf("file uploaded to, %s\n", result.Location) - return result, nil -} - -func DownloadFile(bucketName string, fileName string, key string) (n int64, err error) { - sess := createSession() - - downloader := s3manager.NewDownloader(sess) - - f, err := os.Create(fileName) - if err != nil { - return - } - - n, err = downloader.Download(f, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - return -} diff --git a/aws-experiment-launch/experiment/utils/utils.go b/aws-experiment-launch/experiment/utils/utils.go deleted file mode 100644 index 077084cf6..000000000 --- a/aws-experiment-launch/experiment/utils/utils.go +++ /dev/null @@ -1,30 +0,0 @@ -package utils - -import ( - "io" - "net/http" - "os" -) - -func DownloadFile(filepath string, url string) error { - // Create the file - out, err := os.Create(filepath) - if err != nil { - return err - } - defer out.Close() - - // Get the data - resp, err := http.Get(url) - if err != nil { - return err - } - defer resp.Body.Close() - - // Write the body to file - _, err = io.Copy(out, resp.Body) - if err != nil { - return err - } - return nil -} diff --git a/aws-experiment-launch/generate_distribution_config.py b/aws-experiment-launch/generate_distribution_config.py deleted file mode 100644 index 7eade898f..000000000 --- a/aws-experiment-launch/generate_distribution_config.py +++ /dev/null @@ -1,37 +0,0 @@ -import argparse -import logging -import sys - -from utils import utils - -logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') -LOGGER = logging.getLogger(__file__) -LOGGER.setLevel(logging.INFO) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config') - parser.add_argument('--ip_list_file', type=str, dest='ip_list_file', - default='raw_ip.txt', help="the file containing available raw ips") - # If the ip_list_file is None we need to use the region, node_name_tag and region_config to collect raw_ip - parser.add_argument('--region', type=str, dest='region_number', - default="4", help="region number") - parser.add_argument('--node_name_tag', type=str, - dest='node_name_tag', default='4-NODE-23-36-01-2018-07-05') - parser.add_argument('--region_config', type=str, - dest='region_config', default='configuration.txt') - - parser.add_argument('--shard_number', type=int, dest='shard_number', default=1) - parser.add_argument('--client_number', type=int, dest='client_number', default=1) - parser.add_argument('--distribution_config', type=str, - dest='distribution_config', default='distribution_config.txt') - args = parser.parse_args() - - if args.ip_list_file == None: - utils.generate_distribution_config2( - args.region_number, args.node_name_tag, args.region_config, - args.shard_number, args.client_number, args.distribution_config) - else: - utils.generate_distribution_config3(args.shard_number, args.client_number, - args.ip_list_file, args.distribution_config) - LOGGER.info("Done writing %s" % args.distribution_config) diff --git a/aws-experiment-launch/logs_download.sh b/aws-experiment-launch/logs_download.sh deleted file mode 100755 index c8f5880ad..000000000 --- a/aws-experiment-launch/logs_download.sh +++ /dev/null @@ -1 +0,0 @@ -scp -i ../keys/ohio-key-benchmark.pem ec2-user@18.219.217.193:~/projects/src/harmony-benchmark/bin/upload tmp/ diff --git a/aws-experiment-launch/report_extractor.py b/aws-experiment-launch/report_extractor.py deleted file mode 100644 index a6758f61f..000000000 --- a/aws-experiment-launch/report_extractor.py +++ /dev/null @@ -1,96 +0,0 @@ -import json -import sys -import os -import argparse - -def formatFloat(v): - return "%.2f" % v - -def formatPercent(v): - return formatFloat(v) + "%" - -def formatMem(v): - return formatFloat(float(v) / 10**6) + "MB" - -class Profiler: - def __init__(self): - self.tps = 0 - self.tps_max = 0 - self.tps_min = sys.maxsize - self.tps_count = 0 - - self.cpu_percent = 0 - self.cpu_usr = 0 - self.cpu_sys = 0 - self.cpu_count = 0 - - self.mem_rss = 0 - self.mem_rss_max = 0 - self.mem_count = 0 - - def handleTPS(self, obj): - tps = obj["TPS"] - self.tps += tps - self.tps_max = max(self.tps_max, tps) - self.tps_min = min(self.tps_min, tps) - self.tps_count += 1 - - def handleCPU(self, obj): - # http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_times - # https://stackoverflow.com/questions/556405/what-do-real-user-and-sys-mean-in-the-output-of-time1 - # http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_percent - self.cpu_percent += obj["percent"] - times = json.loads(obj["times"]) - self.cpu_usr = times["user"] - self.cpu_sys = times["system"] - self.cpu_count += 1 - - def handleMem(self, obj): - # http://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info - info = json.loads(obj["info"]) - rss = info["rss"] - self.mem_rss += rss - self.mem_rss_max = max(self.mem_rss_max, rss) - self.mem_count += 1 - - def report(self): - print("TPS", - "Avg", formatFloat(self.tps / self.tps_count), - "Min", formatFloat(self.tps_min), - "Max", formatFloat(self.tps_max)) - print("CPU", - "Percent (Avg)", formatPercent(self.cpu_percent / self.cpu_count), - "Time (Usr)", str(self.cpu_usr) + "s", - "Time (Sys)", str(self.cpu_sys) + "s") - print("Mem", - "RSS (Max)", formatMem(self.mem_rss_max), - "RSS (Avg)", formatMem(self.mem_rss / self.mem_count)) - -def profileFile(path): - print(path) - profiler = Profiler() - with open(path) as f: - for line in f: - obj = json.loads(line) - if obj["lvl"] != "info": - continue - if obj["msg"] == "TPS Report": - profiler.handleTPS(obj) - elif obj["msg"] == "CPU Report": - profiler.handleCPU(obj) - elif obj["msg"] == "Mem Report": - profiler.handleMem(obj) - profiler.report() - -# Example: python report_extractor.py --folder ../tmp_log/log-20180713-205431 -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="This script extracts reports from log files") - parser.add_argument("--folder", type=str, dest="folder", - default="", - help="the path to the log folder") - args = parser.parse_args() - - for filename in os.listdir(args.folder): - if "leader" in filename: - profileFile(os.path.join(args.folder, filename)) \ No newline at end of file diff --git a/aws-experiment-launch/spot-instance/request-spot.sh b/aws-experiment-launch/spot-instance/request-spot.sh deleted file mode 100755 index 157942565..000000000 --- a/aws-experiment-launch/spot-instance/request-spot.sh +++ /dev/null @@ -1,18 +0,0 @@ -aws ec2 request-spot-instances \ - --instance-count 1 \ - --block-duration-minutes 60 \ - --launch-specification "{ \ - \"ImageId\": \"ami-f2d3638a\", \ - \"InstanceType\": \"m3.medium\", \ - \"SecurityGroups\": [ \ - \"richard-spot-instance SSH\" \ - ], \ - \"KeyName\": \"richard-spot-instance\", \ - \"IamInstanceProfile\": { \ - \"Name\": \"BenchMarkCodeDeployInstanceProfile\" \ - }, \ - \"UserData\": \"`base64 ../configs/userdata-commander.sh`\" \ - }" \ - #--dry-run # uncomment this line to send a real request. - -# Note: on windows, you need to add `-w 0` to the base64 command" \ No newline at end of file diff --git a/aws-experiment-launch/spot-instance/run-instances.sh b/aws-experiment-launch/spot-instance/run-instances.sh deleted file mode 100755 index c145db9e7..000000000 --- a/aws-experiment-launch/spot-instance/run-instances.sh +++ /dev/null @@ -1,10 +0,0 @@ -aws ec2 run-instances \ - --count 1 \ - --image-id "ami-f2d3638a" \ - --instance-type "t2.micro" \ - --key-name "richard-spot-instance" \ - --security-groups "richard-spot-instance SSH" \ - --iam-instance-profile "{ \ - \"Name\": \"BenchMarkCodeDeployInstanceProfile\" \ - }" \ - --user-data "`base64 ../configs/userdata-commander.sh`" \ No newline at end of file diff --git a/aws-experiment-launch/terminate_instances.py b/aws-experiment-launch/terminate_instances.py deleted file mode 100644 index e85e457d7..000000000 --- a/aws-experiment-launch/terminate_instances.py +++ /dev/null @@ -1,105 +0,0 @@ -import argparse -import logging -import os -import random -import sys -import threading -import boto3 - - - -logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') -LOGGER = logging.getLogger(__file__) -LOGGER.setLevel(logging.INFO) - -REGION_NAME = 'region_name' -REGION_KEY = 'region_key' -REGION_SECURITY_GROUP = 'region_security_group' -REGION_SECURITY_GROUP_ID = 'region_security_group_id' -REGION_HUMAN_NAME = 'region_human_name' -INSTANCE_TYPE = 't2.micro' -REGION_AMI = 'region_ami' - -def read_configuration_file(filename): - config = {} - with open(filename, 'r') as f: - for myline in f: - mylist = myline.strip().split(',') - region_num = mylist[0] - config[region_num] = {} - config[region_num][REGION_NAME] = mylist[1] - config[region_num][REGION_KEY] = mylist[2] - config[region_num][REGION_SECURITY_GROUP] = mylist[3] - config[region_num][REGION_HUMAN_NAME] = mylist[4] - config[region_num][REGION_AMI] = mylist[5] - config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6] - return config - -def get_instance_ids(describe_instances_response): - instance_ids = [] - if describe_instances_response["Reservations"]: - for reservation in describe_instances_response["Reservations"]: - instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId")) - return instance_ids - -def get_instance_ids2(ec2_client, node_name_tag): - time.sleep(5) - filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] - return get_instance_ids(ec2_client.describe_instances(Filters=filters)) - -def create_ec2_client(region_number, region_config): - config = read_configuration_file(region_config) - region_name = config[region_number][REGION_NAME] - session = boto3.Session(region_name=region_name) - return session.client('ec2'), session - -def terminate_instances_by_region(region_number, region_config, node_name_tag): - ec2_client, _ = create_ec2_client(region_number, region_config) - filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] - instance_ids = get_instance_ids(ec2_client.describe_instances(Filters=filters)) - if instance_ids: - ec2_client.terminate_instances(InstanceIds=instance_ids) - LOGGER.info("waiting until instances with tag %s died." % node_name_tag) - waiter = ec2_client.get_waiter('instance_terminated') - waiter.wait(InstanceIds=instance_ids) - LOGGER.info("instances with node name tag %s terminated." % node_name_tag) - else: - pass - LOGGER.warn("there is no instances to terminate") - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='This script helps you to collect public ips') - parser.add_argument('--instance_output', type=str, dest='instance_output', - default='instance_output.txt', - help='the file contains node_name_tag and region number of created instances.') - parser.add_argument('--node_name_tag', type=str, dest='node_name_tag') - parser.add_argument('--region_config', type=str, - dest='region_config', default='configuration.txt') - args = parser.parse_args() - - if args.node_name_tag: - node_name_tag_items = args.node_name_tag.split(",") - region_number_items = [item[:1] for item in node_name_tag_items] - thread_pool = [] - for i in range(len(region_number_items)): - region_number = region_number_items[i] - node_name_tag = node_name_tag_items[i] - t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag)) - t.start() - thread_pool.append(t) - for t in thread_pool: - t.join() - LOGGER.info("done.") - elif args.instance_output: - with open(args.instance_output, "r") as fin: - thread_pool = [] - for line in fin.readlines(): - items = line.split(" ") - region_number = items[1].strip() - node_name_tag = items[0].strip() - t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag)) - t.start() - thread_pool.append(t) - for t in thread_pool: - t.join() - LOGGER.info("done.") diff --git a/aws-experiment-launch/upload_binaries.py b/aws-experiment-launch/upload_binaries.py deleted file mode 100644 index 2a5986f0e..000000000 --- a/aws-experiment-launch/upload_binaries.py +++ /dev/null @@ -1,15 +0,0 @@ -import boto3 -import os -GOHOME ='/Users/alok/Documents/goworkspace/' -s3 = boto3.client('s3') -bucket_name = 'unique-bucket-bin' -#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}) -response = s3.list_buckets() -buckets = [bucket['Name'] for bucket in response['Buckets']] -print("Bucket List: %s" % buckets) -dirname = GOHOME + 'src/harmony-benchmark/bin/' -for myfile in os.listdir(dirname): - with open('distribution_config.txt','r') as f: - f = open(os.path.join(dirname,myfile)) - response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile) - print(response) diff --git a/aws-experiment-launch/upload_config.py b/aws-experiment-launch/upload_config.py deleted file mode 100644 index ecea1d2e0..000000000 --- a/aws-experiment-launch/upload_config.py +++ /dev/null @@ -1,10 +0,0 @@ -import boto3 -import os -#from boto3.session import Session -s3 = boto3.client('s3') -bucket_name = 'unique-bucket-bin' -#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}) -myfile = 'distribution_config.txt' -with open('distribution_config.txt','r') as f: - response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile) - print(response) diff --git a/aws-experiment-launch/upload_s3.py b/aws-experiment-launch/upload_s3.py deleted file mode 100644 index 94869160b..000000000 --- a/aws-experiment-launch/upload_s3.py +++ /dev/null @@ -1,9 +0,0 @@ -import boto3 -s3 = boto3.client('s3') -bucket_name = 'first-try' -s3.create_bucket(Bucket=bucket_name,ACL='public-read-write') -response = s3.list_buckets() -buckets = [bucket['Name'] for bucket in response['Buckets']] -print("Bucket List: %s" % buckets) -filename='myfirst.txt' -s3.upload_file(filename, bucket_name, filename) \ No newline at end of file diff --git a/aws-experiment-launch/userdata-commander.sh b/aws-experiment-launch/userdata-commander.sh deleted file mode 100755 index 165aab479..000000000 --- a/aws-experiment-launch/userdata-commander.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -x -REGION=$(curl 169.254.169.254/latest/meta-data/placement/availability-zone/ | sed 's/[a-z]$//') -#yum update -y #This breaking codedeploy right now -yum install ruby wget -y -cd /home/ec2-user -touch yum-not-updated.txt -wget https://aws-codedeploy-$REGION.s3.amazonaws.com/latest/install -chmod +x ./install -./install auto -mkdir projects -mkdir projects/src \ No newline at end of file diff --git a/aws-experiment-launch/userdata-soldier.sh b/aws-experiment-launch/userdata-soldier.sh deleted file mode 100644 index a8a91c6c2..000000000 --- a/aws-experiment-launch/userdata-soldier.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -yum install ruby -y -cd /home/ec2-user/ -curl http://unique-bucket-bin.s3.amazonaws.com/txgen -o txgen -curl http://unique-bucket-bin.s3.amazonaws.com/soldier -o soldier -curl http://unique-bucket-bin.s3.amazonaws.com/benchmark -o benchmark -chmod +x ./soldier -chmod +x ./txgen -chmod +x ./commander -chmod +x ./kill_node.sh -echo "* soft nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "* hard nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "* soft nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "* hard nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "root soft nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "root hard nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "root soft nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "root hard nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "session required pam_limits.so" | sudo tee -a /etc/pam.d/common-session - -# Get My IP -ip=`curl http://169.254.169.254/latest/meta-data/public-ipv4` - -NODE_PORT=9000 -SOLDIER_PORT=1$NODE_PORT -# Kill existing soldier/node -fuser -k -n tcp $SOLDIER_PORT -fuser -k -n tcp $NODE_PORT - -# Run soldier -./soldier -ip $ip -port $NODE_PORT > soldier_log 2>&1 & diff --git a/aws-experiment-launch/utils/__init__.py b/aws-experiment-launch/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/aws-experiment-launch/utils/configuration.txt b/aws-experiment-launch/utils/configuration.txt deleted file mode 100644 index d713ce4b5..000000000 --- a/aws-experiment-launch/utils/configuration.txt +++ /dev/null @@ -1,8 +0,0 @@ -1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800 -2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe -3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14 -4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43 -5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5 -6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2 -7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910 -8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58 \ No newline at end of file diff --git a/aws-experiment-launch/utils/launch_template.py b/aws-experiment-launch/utils/launch_template.py deleted file mode 100644 index 6c915abc6..000000000 --- a/aws-experiment-launch/utils/launch_template.py +++ /dev/null @@ -1,46 +0,0 @@ - -import utils - - -def get_launch_template_name(region_number): - return 'benchmark-' + utils.CONFIG[region_number][utils.REGION_NAME] - - -def create(ec2_client, region_number): - return ec2_client.create_launch_template( - # DryRun=True, - LaunchTemplateName=get_launch_template_name(region_number), - LaunchTemplateData={ - 'IamInstanceProfile': { - 'Name': utils.IAM_INSTANCE_PROFILE - }, - 'ImageId': utils.CONFIG[region_number][utils.REGION_AMI], - # 'InstanceType': instance_type, - 'KeyName': utils.CONFIG[region_number][utils.REGION_KEY], - 'UserData': utils.USER_DATA_BASE64, - 'SecurityGroupIds': [ - utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID] - ], - # 'InstanceInitiatedShutdownBehavior': 'stop', - 'TagSpecifications': [ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'LaunchTemplate', - 'Value': 'Yes' - } - ] - } - ], - # 'InstanceMarketOptions': { - # 'MarketType': 'spot', - # 'SpotOptions': { - # 'MaxPrice': 'string', - # 'SpotInstanceType': 'one-time'|'persistent', - # 'BlockDurationMinutes': 123, - # 'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate' - # } - # }, - } - ) \ No newline at end of file diff --git a/aws-experiment-launch/utils/logger.py b/aws-experiment-launch/utils/logger.py deleted file mode 100644 index 43bcbdaa1..000000000 --- a/aws-experiment-launch/utils/logger.py +++ /dev/null @@ -1,9 +0,0 @@ -import logging - -logging.basicConfig(level=logging.INFO, - format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') - -def getLogger(file): - LOGGER = logging.getLogger(file) - LOGGER.setLevel(logging.INFO) - return LOGGER \ No newline at end of file diff --git a/aws-experiment-launch/utils/spot_fleet.py b/aws-experiment-launch/utils/spot_fleet.py deleted file mode 100644 index c07d552c6..000000000 --- a/aws-experiment-launch/utils/spot_fleet.py +++ /dev/null @@ -1,119 +0,0 @@ -import utils -import logger -import launch_template -LOGGER = logger.getLogger(__file__) - - -def create_launch_specification(region_number, instanceType): - return { - # Region irrelevant fields - 'IamInstanceProfile': { - 'Name': utils.IAM_INSTANCE_PROFILE - }, - 'InstanceType': instanceType, - 'UserData': utils.USER_DATA_BASE64, - # Region relevant fields - 'SecurityGroups': [ - { - # In certain scenarios, we have to use group id instead of group name - # https://github.com/boto/boto/issues/350#issuecomment-27359492 - 'GroupId': utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID] - } - ], - 'ImageId': utils.CONFIG[region_number][utils.REGION_AMI], - 'KeyName': utils.CONFIG[region_number][utils.REGION_KEY], - 'TagSpecifications': [ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'Name', - 'Value': utils.get_node_name_tag(region_number) - } - ] - } - ], - # 'WeightedCapacity': 123.0, - # 'Placement': { - # # 'AvailabilityZone': get_one_availability_zone(ec2_client) - # } - } - - -def create_launch_specification_list(region_number, instance_type_list): - return list(map(lambda type: create_launch_specification(region_number, type), instance_type_list)) - - -def get_launch_template(region_number, instance_type): - return { - 'LaunchTemplateSpecification': { - 'LaunchTemplateName': launch_template.get_launch_template_name(region_number), - 'Version': '1' - }, - 'Overrides': [ - { - 'InstanceType': instance_type - } - ] - } - - -def get_launch_template_list(region_number, instance_type_list): - return list(map(lambda type: get_launch_template(region_number, type), instance_type_list)) - - -def request_spot_fleet(ec2_client, region_number, number_of_instances, instance_type_list): - LOGGER.info("Requesting spot fleet") - LOGGER.info("Creating node_name_tag: %s" % - utils.get_node_name_tag(region_number)) - # https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet - response = ec2_client.request_spot_fleet( - # DryRun=True, - SpotFleetRequestConfig={ - # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy - 'AllocationStrategy': 'diversified', - 'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole', - 'LaunchSpecifications': create_launch_specification_list(region_number, instance_type_list), - # 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price. - 'TargetCapacity': number_of_instances, - 'Type': 'maintain' - } - ) - return response["SpotFleetRequestId"] - - -def request_spot_fleet_with_on_demand(ec2_client, region_number, number_of_instances, number_of_on_demand, instance_type_list): - LOGGER.info("Requesting spot fleet") - LOGGER.info("Creating node_name_tag: %s" % - utils.get_node_name_tag(region_number)) - # https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet - response = ec2_client.request_spot_fleet( - # DryRun=True, - SpotFleetRequestConfig={ - # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy - 'AllocationStrategy': 'diversified', - 'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole', - 'LaunchTemplateConfigs': get_launch_template_list(region_number, instance_type_list), - # 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price. - 'TargetCapacity': number_of_instances, - 'OnDemandTargetCapacity': number_of_on_demand, - 'Type': 'maintain' - } - ) - return response - -def get_instance_ids(client, request_id): - res = client.describe_spot_fleet_instances( - SpotFleetRequestId=request_id - ) - return [ inst["InstanceId"] for inst in res["ActiveInstances"] ] - -def run_one_region(region_number, number_of_instances, fout, fout2): - client = utils.create_client(utils.CONFIG, region_number) - instance_type_list = ['t2.micro', 't2.small', 'm3.medium'] - # node_name_tag = request_spot_fleet_with_on_demand( - # client, region_number, int(number_of_instances), 1, instance_type_list) - request_id = request_spot_fleet( - client, region_number, int(number_of_instances), instance_type_list) - instance_ids = get_instance_ids(client, request_id) - print(instance_ids) # TODO@ricl, no data here since the request is not fulfilled. \ No newline at end of file diff --git a/aws-experiment-launch/utils/utils.py b/aws-experiment-launch/utils/utils.py deleted file mode 100644 index 365e135fc..000000000 --- a/aws-experiment-launch/utils/utils.py +++ /dev/null @@ -1,210 +0,0 @@ -import boto3 -import datetime -import json -import sys -import time -import base64 -import threading - - -MAX_INTANCES_FOR_WAITER = 100 -MAX_INSTANCES_FOR_DEPLOYMENT = 500 -REGION_NAME = 'region_name' -REGION_KEY = 'region_key' -REGION_SECURITY_GROUP = 'region_security_group' -REGION_SECURITY_GROUP_ID = 'region_security_group_id' -REGION_HUMAN_NAME = 'region_human_name' -INSTANCE_TYPE = 't2.micro' -REGION_AMI = 'region_ami' -IAM_INSTANCE_PROFILE = 'BenchMarkCodeDeployInstanceProfile' -time_stamp = time.time() -CURRENT_SESSION = datetime.datetime.fromtimestamp( - time_stamp).strftime('%H-%M-%S-%Y-%m-%d') -NODE_NAME_SUFFIX = "NODE-" + CURRENT_SESSION - -def get_node_name_tag(region_number): - return region_number + "-" + NODE_NAME_SUFFIX - -def get_node_name_tag2(region_number, tag): - return region_number + "-" + NODE_NAME_SUFFIX + "-" + str(tag) - -with open("userdata-soldier.sh", "r") as userdata_file: - USER_DATA = userdata_file.read() - -# UserData must be base64 encoded for spot instances. -USER_DATA_BASE64 = base64.b64encode(USER_DATA) - -def create_client(config, region_number): - region_name = config[region_number][REGION_NAME] - # Create session. - session = boto3.Session(region_name=region_name) - # Create a client. - return session.client('ec2') - -def read_region_config(region_config='configuration.txt'): - global CONFIG - config = {} - with open(region_config, 'r') as f: - for myline in f: - mylist = [item.strip() for item in myline.strip().split(',')] - region_num = mylist[0] - config[region_num] = {} - config[region_num][REGION_NAME] = mylist[1] - config[region_num][REGION_KEY] = mylist[2] - config[region_num][REGION_SECURITY_GROUP] = mylist[3] - config[region_num][REGION_HUMAN_NAME] = mylist[4] - config[region_num][REGION_AMI] = mylist[5] - config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6] - CONFIG = config - return config - -def get_ip_list(response): - if response.get('Instances', None): - return [instance.get('PublicIpAddress', None) for instance in response['Instances']] - else: - return [] - -def create_ec2_client(region_number, region_config): - config = read_region_config(region_config) - region_name = config[region_number][REGION_NAME] - session = boto3.Session(region_name=region_name) - return session.client('ec2'), session - -def collect_public_ips_from_ec2_client(ec2_client, node_name_tag): - filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] - response = ec2_client.describe_instances(Filters=filters) - ip_list = [] - if response.get('Reservations'): - for reservation in response[u'Reservations']: - ip_list.extend(instance['PublicIpAddress'] for instance in reservation['Instances'] if instance.get('PublicIpAddress')) - return ip_list - -def collect_public_ips(region_number, node_name_tag, region_config): - ec2_client, _ = create_ec2_client(region_number, region_config) - ip_list = collect_public_ips_from_ec2_client(ec2_client, node_name_tag) - return ip_list - -def get_application(codedeploy, application_name): - response = codedeploy.list_applications() - if application_name in response['applications']: - return response - else: - response = codedeploy.create_application( - applicationName=application_name, - computePlatform='Server' - ) - return response - -def create_deployment_group(codedeploy, region_number,application_name, deployment_group_name, node_name_tag): - response = codedeploy.list_deployment_groups(applicationName=application_name) - if response.get('deploymentGroups') and (deployment_group_name in response['deploymentGroups']): - return None - else: - response = codedeploy.create_deployment_group( - applicationName=application_name, - deploymentGroupName=deployment_group_name, - deploymentConfigName='CodeDeployDefault.AllAtOnce', - serviceRoleArn='arn:aws:iam::656503231766:role/BenchMarkCodeDeployServiceRole', - deploymentStyle={ - 'deploymentType': 'IN_PLACE', - 'deploymentOption': 'WITHOUT_TRAFFIC_CONTROL' - }, - ec2TagFilters = [ - { - 'Key': 'Name', - 'Value': node_name_tag, - 'Type': 'KEY_AND_VALUE' - } - ] - ) - return response['deploymentGroupId'] - -def generate_distribution_config2(region_number, node_name_tag, region_config, - shard_number, client_number, distribution_config): - ip_list = collect_public_ips(region_number, node_name_tag, region_config) - generate_distribution_config(shard_number, client_number, ip_list, distribution_config) - -def generate_distribution_config3(shard_number, client_number, ip_list_file, distribution_config): - with open(ip_list_file, "r") as fin: - lines = fin.readlines() - ip_list = [line.strip() for line in lines] - generate_distribution_config(shard_number, client_number, ip_list, distribution_config) - -def generate_distribution_config(shard_number, client_number, ip_list, distribution_config): - if len(ip_list) < shard_number * 2 + client_number + 1: - print("Not enough nodes to generate a config file") - return False - - # Create ip for clients. - commander_id, client_id, leader_id, validator_id = 0, 0, 0, 0 - validator_number = len(ip_list) - client_number - shard_number - 1 - with open(distribution_config, "w") as fout: - for i in range(len(ip_list)): - ip, node_name_tag = ip_list[i].split(" ") - if commander_id < 1: - fout.write("%s 9000 commander %d %s\n" % (ip, commander_id, node_name_tag)) - commander_id = commander_id + 1 - elif leader_id < shard_number: - fout.write("%s 9000 leader %d %s\n" % (ip, leader_id, node_name_tag)) - leader_id = leader_id + 1 - elif validator_id < validator_number: - fout.write("%s 9000 validator %d %s\n" % (ip, validator_id % shard_number, node_name_tag)) - validator_id = validator_id + 1 - else: - fout.write("%s 9000 client %d %s\n" % (ip, client_id % shard_number, node_name_tag)) - client_id = client_id + 1 - -def get_availability_zones(ec2_client): - response = ec2_client.describe_availability_zones() - all_zones = [] - if response.get('AvailabilityZones', None): - all_zones = [info['ZoneName'] for info in response.get('AvailabilityZones') if info['State'] == 'available'] - return all_zones - -def get_one_availability_zone(ec2_client): - time.sleep(1) - all_zones = get_availability_zones(ec2_client) - if len(all_zones) > 0: - return all_zones[0] - else: - return None - -def get_instance_ids2(ec2_client, node_name_tag): - time.sleep(5) - filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] - return get_instance_ids(ec2_client.describe_instances(Filters=filters)) - -# Get instance_ids from describe_instances_response. -def get_instance_ids(describe_instances_response): - instance_ids = [] - if describe_instances_response["Reservations"]: - for reservation in describe_instances_response["Reservations"]: - instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId")) - return instance_ids - -WAITER_LOCK = threading.Lock() -def run_waiter_100_instances_for_status(ec2_client, status, instance_ids): - time.sleep(10) - WAITER_LOCK.acquire() - waiter = ec2_client.get_waiter('instance_running') - WAITER_LOCK.release() - waiter.wait(InstanceIds=instance_ids) - -def run_waiter_for_status(ec2_client, status, instance_ids): - thread_pool = [] - i = 0 - while i < len(instance_ids): - j = i + min(len(instance_ids), i + MAX_INTANCES_FOR_WAITER) - t = threading.Thread(target=run_waiter_100_instances_for_status, args=( - ec2_client, status, instance_ids[i:j])) - t.start() - thread_pool.append(t) - i = i + MAX_INTANCES_FOR_WAITER - for t in thread_pool: - t.join() - -# used for testing only. -# if __name__ == "__main__": -# ip_list = collect_public_ips('4', "4-NODE-23-36-01-2018-07-05", "configuration.txt") -# print ip_list -# generate_distribution_config(2, 1, ip_list, "config_test.txt") diff --git a/aws-experiment-launch/utils/utils_test.py b/aws-experiment-launch/utils/utils_test.py deleted file mode 100644 index 5a4bf6c0b..000000000 --- a/aws-experiment-launch/utils/utils_test.py +++ /dev/null @@ -1,31 +0,0 @@ -import unittest - -from utils import generate_distribution_config - -class TestCreateAndDeploy(unittest.TestCase): - - def test_generate_config_file(self): - ips = ["102.000.000.1", "102.000.000.2", "102.000.000.3", "102.000.000.4", "102.000.000.5", "102.000.000.6"] - generate_distribution_config(2, 2, ips, "config_test.txt") - with open("config_test.txt", "r") as fin: - lines = fin.readlines() - collection = {} - collection['ip'] = [] - collection['client'] = {} - leader_count, validator_count, client_count = 0, 0, 0 - for line in lines: - strs = line.split(" ") - assert(not strs[0] in collection['ip']) - collection['ip'].append(strs[0]) - if strs[2] == "client": - client_count = client_count + 1 - elif strs[2] == "leader": - leader_count = leader_count + 1 - elif strs[2] == "validator": - validator_count = validator_count + 1 - assert(validator_count == 2) - assert(leader_count == 2) - assert(client_count == 2) - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/aws-scripts/kill_node.sh b/aws-scripts/kill_node.sh deleted file mode 100755 index 08b90f223..000000000 --- a/aws-scripts/kill_node.sh +++ /dev/null @@ -1,5 +0,0 @@ -for pid in `/bin/ps -fu $USER| grep "slave.go\|slave -port\|leader\|benchmark_node" | grep -v "grep" | awk '{print $2}'`; -do - echo 'Killed process: '$pid - kill -9 $pid -done diff --git a/aws-scripts/parse_json.py b/aws-scripts/parse_json.py deleted file mode 100644 index 196b10fa5..000000000 --- a/aws-scripts/parse_json.py +++ /dev/null @@ -1,34 +0,0 @@ -import json - -def get_public_ip(all_reservations): - all_public_ip_addresses = [] - for individual_instances in all_reservations: - instance_information = individual_instances['Instances'][0] - if "running" != instance_information["State"]["Name"]: - continue - all_public_ip_addresses.append(instance_information['PublicIpAddress']) - return all_public_ip_addresses - -def make_peers_list(all_reservations,port="9001",filename="config.txt"): - p = get_public_ip(all_reservations) - f = open(filename,"w") - for i in range(len(p)): - if i == 0: - f.write(p[i] + " " + port + " " + "leader"+"\n") - else: - f.write(p[i] + " " + port + " " + "validator"+"\n") - f.close() - -def is_it_running(f): - pass - -if __name__ == "__main__": - json_data=open("aws.json").read() - f = json.loads(json_data) - all_reservations = f['Reservations'] - - make_peers_list(all_reservations) - - - - \ No newline at end of file diff --git a/aws-scripts/preprocess_peerlist.py b/aws-scripts/preprocess_peerlist.py deleted file mode 100644 index 0eee720bc..000000000 --- a/aws-scripts/preprocess_peerlist.py +++ /dev/null @@ -1,26 +0,0 @@ -import requests -amazon_ipv4_url = "http://169.254.169.254/latest/meta-data/public-ipv4" -def get_my_ip(): - return current_ip = requests.get(amazon_ipv4_url).text - -if __name__ == "__main__": - current_ip = requests.get(amazon_ipv4_url).text - f = open("global_nodes.txt","r") - peerList = [] - for myline in f: - mylist = myline.split(" ") - ip = mylist[0] - node = mylist[2] - if str(ip) != str(current_ip): - if node != "transaction": - peerList.append(myline) - else: - if node == "transaction": - h = open("isTransaction.txt","w") - h.write("I am just a transaction generator node") - h.close() - f.close() - g = open("global_peerlist.txt","w") - for myline in peerList: - g.write(myline) - g.close() diff --git a/aws-scripts/run_instance.sh b/aws-scripts/run_instance.sh deleted file mode 100644 index 416620711..000000000 --- a/aws-scripts/run_instance.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -x -echo "Run Instances starts" >> tmplog - -echo "Update systcl" >> tmplog -sudo sysctl net.core.somaxconn=1024 -sudo sysctl net.core.netdev_max_backlog=65536; -sudo sysctl net.ipv4.tcp_tw_reuse=1; -sudo sysctl -w net.ipv4.tcp_rmem='65536 873800 1534217728'; -sudo sysctl -w net.ipv4.tcp_wmem='65536 873800 1534217728'; -sudo sysctl -w net.ipv4.tcp_mem='65536 873800 1534217728'; - -echo "Setup path" >> tmplog -./kill_node.sh -MyHOME=/home/ec2-user -source ~/.bash_profile -export GOROOT=/usr/lib/golang -export GOPATH=$MyHOME/projects -export PATH=$PATH:$GOROOT/bin - -echo "Get ip" >> tmplog -# Get my IP -wget http://169.254.169.254/latest/meta-data/public-ipv4 -ip=$(head -n 1 public-ipv4) -echo "Current IP is >>>" -echo $ip -echo ">>>>" - -echo "Run soldier" >> tmplog -# Run soldier -cd $GOPATH/src/harmony-benchmark/bin/ -node_port=9000 -./soldier -ip $ip -port $node_port > soldier_log 2>&1 & - -echo "Run Instances done" >> tmplog \ No newline at end of file diff --git a/aws-scripts/say_bye.sh b/aws-scripts/say_bye.sh deleted file mode 100644 index 313815e65..000000000 --- a/aws-scripts/say_bye.sh +++ /dev/null @@ -1 +0,0 @@ -echo "Bye" >> tmplog \ No newline at end of file diff --git a/aws-scripts/say_hello.sh b/aws-scripts/say_hello.sh deleted file mode 100755 index 46e68cef9..000000000 --- a/aws-scripts/say_hello.sh +++ /dev/null @@ -1 +0,0 @@ -echo "Hello" >> tmplog \ No newline at end of file diff --git a/aws-scripts/setup.sh b/aws-scripts/setup.sh deleted file mode 100644 index 7ac22634a..000000000 --- a/aws-scripts/setup.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -x -echo "Setup Golang" >> tmplog -#sudo yum update -y - -sudo yum install -y golang -sudo yum install -y git -MyHOME=/home/ec2-user -echo "now setting up go-lang paths" -# GOROOT is the location where Go package is installed on your system -echo "export GOROOT=/usr/lib/golang" >> $MyHOME/.bash_profile - -# GOPATH is the location of your work directory -echo "export GOPATH=$MyHOME/projects" >> $MyHOME/.bash_profile - -# PATH in order to access go binary system wide -echo "export PATH=$PATH:$GOROOT/bin" >> $MyHOME/.bash_profile - -export GOROOT=/usr/lib/golang -export GOPATH=$MyHOME/projects -export PATH=$PATH:$GOROOT/bin -source $MyHOME/.bash_profile - -cd $GOPATH/src/harmony-benchmark -touch 'yum_not_updated.txt' -# go get dependencies -go get ./... -curl --silent http://169.254.169.254/latest/meta-data/public-ipv4 >> bin/myip.txt -# build executables -go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go -go build -o bin/commander aws-experiment-launch/experiment/commander/main.go -go build -o bin/benchmark benchmark.go -go build -o bin/txgen client/txgen/main.go - -# Setup ulimit -echo "* soft nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "* hard nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "* soft nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "* hard nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "root soft nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "root hard nproc 65535" | sudo tee -a /etc/security/limits.conf -echo "root soft nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "root hard nofile 65535" | sudo tee -a /etc/security/limits.conf -echo "session required pam_limits.so" | sudo tee -a /etc/pam.d/common-session diff --git a/aws-scripts/setup_instances.sh b/aws-scripts/setup_instances.sh deleted file mode 100755 index 56e8d6534..000000000 --- a/aws-scripts/setup_instances.sh +++ /dev/null @@ -1 +0,0 @@ -aws ec2 run-instances --image-id ami-e251209a --count 1 --instance-type t2.nano --key-name main --security-group-ids sg-066a8b0ec187c7247 From e7f855c76d59eda859f2a44a52c77ad787f29e46 Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Fri, 31 Aug 2018 18:32:09 -0700 Subject: [PATCH 06/20] clean up --- azure/launchvm.py | 177 ---------------------------------------------- 1 file changed, 177 deletions(-) delete mode 100644 azure/launchvm.py diff --git a/azure/launchvm.py b/azure/launchvm.py deleted file mode 100644 index 302dd867c..000000000 --- a/azure/launchvm.py +++ /dev/null @@ -1,177 +0,0 @@ -from azure.common.credentials import ServicePrincipalCredentials -from azure.mgmt.resource import ResourceManagementClient -from azure.mgmt.compute import ComputeManagementClient -from azure.mgmt.network import NetworkManagementClient -from azure.mgmt.compute.models import DiskCreateOption - -SUBSCRIPTION_ID = '8f969b5c-f8cb-4483-8252-354a929962e0' -GROUP_NAME = 'myResourceGroup' -LOCATION = 'westus' -VM_NAME = 'myomyVM' - -def get_credentials(): - credentials = ServicePrincipalCredentials( - client_id = '3b75dccc-f500-4195-99df-8da994541d03', - secret = 'Nj44R21IECrg8Vp/+3MBsXcmQrHcl0SEIpLjPIeOYc4=', - tenant = '6d22d644-2eec-4dac-9715-7147563a9fe5' - ) - return credentials - -def create_resource_group(resource_group_client): - resource_group_params = { 'location':LOCATION } - resource_group_result = resource_group_client.resource_groups.create_or_update( - GROUP_NAME, - resource_group_params - ) - -def create_availability_set(compute_client): - avset_params = { - 'location': LOCATION, - 'sku': { 'name': 'Aligned' }, - 'platform_fault_domain_count': 3 - } - availability_set_result = compute_client.availability_sets.create_or_update( - GROUP_NAME, - 'myAVSet', - avset_params - ) - -def create_public_ip_address(network_client): - public_ip_addess_params = { - 'location': LOCATION, - 'public_ip_allocation_method': 'Dynamic' - } - creation_result = network_client.public_ip_addresses.create_or_update( - GROUP_NAME, - 'myIPAddress', - public_ip_addess_params - ) - return creation_result.result() - -def create_vnet(network_client): - vnet_params = { - 'location': LOCATION, - 'address_space': { - 'address_prefixes': ['10.0.0.0/16'] - } - } - creation_result = network_client.virtual_networks.create_or_update( - GROUP_NAME, - 'myVNet', - vnet_params - ) - return creation_result.result() - -def create_subnet(network_client): - subnet_params = { - 'address_prefix': '10.0.0.0/24' - } - creation_result = network_client.subnets.create_or_update( - GROUP_NAME, - 'myVNet', - 'mySubnet', - subnet_params - ) - return creation_result.result() - -def create_nic(network_client): - subnet_info = network_client.subnets.get( - GROUP_NAME, - 'myVNet', - 'mySubnet' - ) - publicIPAddress = network_client.public_ip_addresses.get( - GROUP_NAME, - 'myIPAddress' - ) - nic_params = { - 'location': LOCATION, - 'ip_configurations': [{ - 'name': 'myIPConfig', - 'public_ip_address': publicIPAddress, - 'subnet': { - 'id': subnet_info.id - } - }] - } - creation_result = network_client.network_interfaces.create_or_update( - GROUP_NAME, - 'myNic', - nic_params - ) - - return creation_result.result() - -def create_vm(network_client, compute_client): - nic = network_client.network_interfaces.get( - GROUP_NAME, - 'myNic' - ) - avset = compute_client.availability_sets.get( - GROUP_NAME, - 'myAVSet' - ) - vm_parameters = { - 'location': LOCATION, - 'os_profile': { - 'computer_name': VM_NAME, - 'admin_username': 'azureuser', - 'admin_password': 'Azure12345678' - }, - 'hardware_profile': { - 'vm_size': 'Standard_DS1' - }, - 'storage_profile': { - 'image_reference': { - 'publisher': 'MicrosoftWindowsServer', - 'offer': 'WindowsServer', - 'sku': '2012-R2-Datacenter', - 'version': 'latest' - } - }, - 'network_profile': { - 'network_interfaces': [{ - 'id': nic.id - }] - }, - 'availability_set': { - 'id': avset.id - } - } - creation_result = compute_client.virtual_machines.create_or_update( - GROUP_NAME, - VM_NAME, - vm_parameters - ) - return creation_result.result() - -if __name__ == '__main__': - credentials = get_credentials() - resource_group_client = ResourceManagementClient( - credentials, - SUBSCRIPTION_ID - ) - network_client = NetworkManagementClient( - credentials, - SUBSCRIPTION_ID - ) - compute_client = ComputeManagementClient( - credentials, - SUBSCRIPTION_ID - ) - create_resource_group(resource_group_client) - print('Resource group created....') - create_availability_set(compute_client) - print('Availability set created') - creation_result = create_public_ip_address(network_client) - print('Public IP created') - creation_result = create_vnet(network_client) - print('Virtual Net Created') - creation_result = create_subnet(network_client) - print('Subnet created') - creation_result = create_nic(network_client) - print('NIC Created') - creation_result = create_vm(network_client, compute_client) - print("------------------------------------------------------") - print("VM Created") - print(creation_result) From 3924450e51fe2206a20b37a4fb7dfebc2617d9c6 Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Fri, 31 Aug 2018 19:21:00 -0700 Subject: [PATCH 07/20] clean up configr --- configr/main.go | 60 ------------------------------------------------- 1 file changed, 60 deletions(-) diff --git a/configr/main.go b/configr/main.go index c758f21d5..69605cc76 100644 --- a/configr/main.go +++ b/configr/main.go @@ -7,10 +7,7 @@ import ( "strconv" "strings" - "github.com/simple-rules/harmony-benchmark/crypto" - "github.com/simple-rules/harmony-benchmark/crypto/pki" "github.com/simple-rules/harmony-benchmark/p2p" - "github.com/simple-rules/harmony-benchmark/utils" ) type ConfigEntry struct { @@ -100,60 +97,3 @@ func (configr *Configr) ReadConfigFile(filename string) error { configr.config = result return nil } - -// GetShardID Gets the shard id of the node corresponding to this ip and port -func (configr *Configr) GetShardID(ip, port string) string { - for _, entry := range configr.config { - if entry.IP == ip && entry.Port == port { - return entry.ShardID - } - } - return "N/A" -} - -// GetPeers Gets the validator list -func (configr *Configr) GetPeers(ip, port, shardID string) []p2p.Peer { - var peerList []p2p.Peer - for _, entry := range configr.config { - if entry.Role != "validator" || entry.ShardID != shardID { - continue - } - // Get public key deterministically based on ip and port - peer := p2p.Peer{Port: entry.Port, Ip: entry.IP} - setKey(&peer) - peerList = append(peerList, peer) - } - return peerList -} - -// GetLeader Gets the leader of this shard id -func (configr *Configr) GetLeader(shardID string) p2p.Peer { - var leaderPeer p2p.Peer - for _, entry := range configr.config { - if entry.Role == "leader" && entry.ShardID == shardID { - leaderPeer.Ip = entry.IP - leaderPeer.Port = entry.Port - setKey(&leaderPeer) - } - } - return leaderPeer -} - -func (configr *Configr) GetConfigEntries() []ConfigEntry { - return configr.config -} - -func (configr *Configr) GetMyConfigEntry(ip string, port string) *ConfigEntry { - for _, entry := range configr.config { - if entry.IP == ip && entry.Port == port { - return &entry - } - } - return nil -} - -func setKey(peer *p2p.Peer) { - // Get public key deterministically based on ip and port - priKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(utils.GetUniqueIdFromPeer(*peer))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr) - peer.PubKey = pki.GetPublicKeyFromScalar(priKey) -} From 598c32f85b77ea5c85d75abfd78300710680c7bd Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Fri, 31 Aug 2018 21:39:34 -0700 Subject: [PATCH 08/20] move configr/main.go to client/config/config.go --- client/config/config.go | 99 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 client/config/config.go diff --git a/client/config/config.go b/client/config/config.go new file mode 100644 index 000000000..c4f6922ef --- /dev/null +++ b/client/config/config.go @@ -0,0 +1,99 @@ +package config + +import ( + "bufio" + "log" + "os" + "strconv" + "strings" + + "github.com/simple-rules/harmony-benchmark/p2p" +) + +type ConfigEntry struct { + IP string + Port string + Role string + ShardID string +} + +type Config struct { + config []ConfigEntry +} + +func NewConfig() *Config { + configr := Config{} + return &configr +} + +// Gets all the validator peers +func (configr *Config) GetValidators() []p2p.Peer { + var peerList []p2p.Peer + for _, entry := range configr.config { + if entry.Role != "validator" { + continue + } + peer := p2p.Peer{Port: entry.Port, Ip: entry.IP} + peerList = append(peerList, peer) + } + return peerList +} + +// Gets all the leader peers and corresponding shard Ids +func (configr *Config) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) { + var peerList []p2p.Peer + var shardIDs []uint32 + for _, entry := range configr.config { + if entry.Role == "leader" { + peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port}) + val, err := strconv.Atoi(entry.ShardID) + if err == nil { + shardIDs = append(shardIDs, uint32(val)) + } else { + log.Print("[Generator] Error parsing the shard Id ", entry.ShardID) + } + } + } + return peerList, shardIDs +} + +func (configr *Config) GetClientPeer() *p2p.Peer { + for _, entry := range configr.config { + if entry.Role != "client" { + continue + } + peer := p2p.Peer{Port: entry.Port, Ip: entry.IP} + return &peer + } + return nil +} + +// Gets the port of the client node in the config +func (configr *Config) GetClientPort() string { + for _, entry := range configr.config { + if entry.Role == "client" { + return entry.Port + } + } + return "" +} + +// Parse the config file and return a 2d array containing the file data +func (configr *Config) ReadConfigFile(filename string) error { + file, err := os.Open(filename) + defer file.Close() + if err != nil { + log.Fatal("Failed to read config file ", filename) + return err + } + fscanner := bufio.NewScanner(file) + + result := []ConfigEntry{} + for fscanner.Scan() { + p := strings.Split(fscanner.Text(), " ") + entry := ConfigEntry{p[0], p[1], p[2], p[3]} + result = append(result, entry) + } + configr.config = result + return nil +} From 178154bb957fc1b5292f770fe6217947a87d12b3 Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Fri, 31 Aug 2018 21:47:43 -0700 Subject: [PATCH 09/20] cleanup. move configr to client/config --- client/btctxgen/main.go | 4 +- client/txgen/main.go | 4 +- client/wallet/main.go | 15 ++++--- configr/main.go | 99 ----------------------------------------- 4 files changed, 12 insertions(+), 110 deletions(-) delete mode 100644 configr/main.go diff --git a/client/btctxgen/main.go b/client/btctxgen/main.go index 6abac3879..be7944ffa 100644 --- a/client/btctxgen/main.go +++ b/client/btctxgen/main.go @@ -28,7 +28,7 @@ import ( "github.com/simple-rules/harmony-benchmark/blockchain" "github.com/simple-rules/harmony-benchmark/client" "github.com/simple-rules/harmony-benchmark/client/btctxiter" - "github.com/simple-rules/harmony-benchmark/configr" + "github.com/simple-rules/harmony-benchmark/client/config" "github.com/simple-rules/harmony-benchmark/consensus" "github.com/simple-rules/harmony-benchmark/log" "github.com/simple-rules/harmony-benchmark/node" @@ -176,7 +176,7 @@ func main() { flag.Parse() // Read the configs - configr := configr.NewConfigr() + configr := config.NewConfig() configr.ReadConfigFile(*configFile) leaders, shardIDs := configr.GetLeadersAndShardIds() diff --git a/client/txgen/main.go b/client/txgen/main.go index c460dfaef..9f5b6cd8c 100644 --- a/client/txgen/main.go +++ b/client/txgen/main.go @@ -10,7 +10,7 @@ import ( "github.com/simple-rules/harmony-benchmark/blockchain" "github.com/simple-rules/harmony-benchmark/client" - "github.com/simple-rules/harmony-benchmark/configr" + "github.com/simple-rules/harmony-benchmark/client/config" "github.com/simple-rules/harmony-benchmark/consensus" "github.com/simple-rules/harmony-benchmark/crypto/pki" "github.com/simple-rules/harmony-benchmark/log" @@ -255,7 +255,7 @@ func main() { flag.Parse() // Read the configs - configr := configr.NewConfigr() + configr := config.NewConfig() configr.ReadConfigFile(*configFile) leaders, shardIds := configr.GetLeadersAndShardIds() diff --git a/client/wallet/main.go b/client/wallet/main.go index ec54f3a4d..96391d503 100644 --- a/client/wallet/main.go +++ b/client/wallet/main.go @@ -6,20 +6,21 @@ import ( "errors" "flag" "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "time" + "github.com/dedis/kyber" "github.com/simple-rules/harmony-benchmark/blockchain" "github.com/simple-rules/harmony-benchmark/client" - "github.com/simple-rules/harmony-benchmark/configr" + "github.com/simple-rules/harmony-benchmark/client/config" "github.com/simple-rules/harmony-benchmark/crypto" "github.com/simple-rules/harmony-benchmark/crypto/pki" "github.com/simple-rules/harmony-benchmark/node" "github.com/simple-rules/harmony-benchmark/p2p" proto_node "github.com/simple-rules/harmony-benchmark/proto/node" - "io" - "io/ioutil" - "os" - "strconv" - "time" ) func main() { @@ -177,7 +178,7 @@ func main() { } func FetchUtxos() (blockchain.UtxoMap, error) { - configr := configr.NewConfigr() + configr := config.NewConfig() configr.ReadConfigFile("local_config_shards.txt") leaders, _ := configr.GetLeadersAndShardIds() clientPeer := configr.GetClientPeer() diff --git a/configr/main.go b/configr/main.go deleted file mode 100644 index 69605cc76..000000000 --- a/configr/main.go +++ /dev/null @@ -1,99 +0,0 @@ -package configr - -import ( - "bufio" - "log" - "os" - "strconv" - "strings" - - "github.com/simple-rules/harmony-benchmark/p2p" -) - -type ConfigEntry struct { - IP string - Port string - Role string - ShardID string -} - -type Configr struct { - config []ConfigEntry -} - -func NewConfigr() *Configr { - configr := Configr{} - return &configr -} - -// Gets all the validator peers -func (configr *Configr) GetValidators() []p2p.Peer { - var peerList []p2p.Peer - for _, entry := range configr.config { - if entry.Role != "validator" { - continue - } - peer := p2p.Peer{Port: entry.Port, Ip: entry.IP} - peerList = append(peerList, peer) - } - return peerList -} - -// Gets all the leader peers and corresponding shard Ids -func (configr *Configr) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) { - var peerList []p2p.Peer - var shardIDs []uint32 - for _, entry := range configr.config { - if entry.Role == "leader" { - peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port}) - val, err := strconv.Atoi(entry.ShardID) - if err == nil { - shardIDs = append(shardIDs, uint32(val)) - } else { - log.Print("[Generator] Error parsing the shard Id ", entry.ShardID) - } - } - } - return peerList, shardIDs -} - -func (configr *Configr) GetClientPeer() *p2p.Peer { - for _, entry := range configr.config { - if entry.Role != "client" { - continue - } - peer := p2p.Peer{Port: entry.Port, Ip: entry.IP} - return &peer - } - return nil -} - -// Gets the port of the client node in the config -func (configr *Configr) GetClientPort() string { - for _, entry := range configr.config { - if entry.Role == "client" { - return entry.Port - } - } - return "" -} - -// Parse the config file and return a 2d array containing the file data -func (configr *Configr) ReadConfigFile(filename string) error { - file, err := os.Open(filename) - defer file.Close() - if err != nil { - log.Fatal("Failed to read config file ", filename) - return err - } - fscanner := bufio.NewScanner(file) - - result := []ConfigEntry{} - for fscanner.Scan() { - p := strings.Split(fscanner.Text(), " ") - entry := ConfigEntry{p[0], p[1], p[2], p[3]} - result = append(result, entry) - } - configr.config = result - return nil -} From d32c6986f4aebf2335b0eec8eb6a64e0261a388b Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Fri, 31 Aug 2018 21:57:43 -0700 Subject: [PATCH 10/20] refactor configr to config --- client/btctxgen/main.go | 12 ++++++------ client/config/config.go | 24 ++++++++++++------------ client/txgen/main.go | 12 ++++++------ client/wallet/main.go | 10 +++++----- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/client/btctxgen/main.go b/client/btctxgen/main.go index be7944ffa..0aae65b44 100644 --- a/client/btctxgen/main.go +++ b/client/btctxgen/main.go @@ -28,7 +28,7 @@ import ( "github.com/simple-rules/harmony-benchmark/blockchain" "github.com/simple-rules/harmony-benchmark/client" "github.com/simple-rules/harmony-benchmark/client/btctxiter" - "github.com/simple-rules/harmony-benchmark/client/config" + client_config "github.com/simple-rules/harmony-benchmark/client/config" "github.com/simple-rules/harmony-benchmark/consensus" "github.com/simple-rules/harmony-benchmark/log" "github.com/simple-rules/harmony-benchmark/node" @@ -176,9 +176,9 @@ func main() { flag.Parse() // Read the configs - configr := config.NewConfig() - configr.ReadConfigFile(*configFile) - leaders, shardIDs := configr.GetLeadersAndShardIds() + config := client_config.NewConfig() + config.ReadConfigFile(*configFile) + leaders, shardIDs := config.GetLeadersAndShardIds() // Do cross shard tx if there are more than one shard setting.crossShard = len(shardIDs) > 1 @@ -204,7 +204,7 @@ func main() { } // Client/txgenerator server node setup - clientPort := configr.GetClientPort() + clientPort := config.GetClientPort() consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{}) clientNode := node.New(consensusObj, nil) @@ -254,6 +254,6 @@ func main() { // Send a stop message to stop the nodes at the end msg := proto_node.ConstructStopMessage() - peers := append(configr.GetValidators(), leaders...) + peers := append(config.GetValidators(), leaders...) p2p.BroadcastMessage(peers, msg) } diff --git a/client/config/config.go b/client/config/config.go index c4f6922ef..40aa35b53 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -22,14 +22,14 @@ type Config struct { } func NewConfig() *Config { - configr := Config{} - return &configr + config := Config{} + return &config } // Gets all the validator peers -func (configr *Config) GetValidators() []p2p.Peer { +func (config *Config) GetValidators() []p2p.Peer { var peerList []p2p.Peer - for _, entry := range configr.config { + for _, entry := range config.config { if entry.Role != "validator" { continue } @@ -40,10 +40,10 @@ func (configr *Config) GetValidators() []p2p.Peer { } // Gets all the leader peers and corresponding shard Ids -func (configr *Config) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) { +func (config *Config) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) { var peerList []p2p.Peer var shardIDs []uint32 - for _, entry := range configr.config { + for _, entry := range config.config { if entry.Role == "leader" { peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port}) val, err := strconv.Atoi(entry.ShardID) @@ -57,8 +57,8 @@ func (configr *Config) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) { return peerList, shardIDs } -func (configr *Config) GetClientPeer() *p2p.Peer { - for _, entry := range configr.config { +func (config *Config) GetClientPeer() *p2p.Peer { + for _, entry := range config.config { if entry.Role != "client" { continue } @@ -69,8 +69,8 @@ func (configr *Config) GetClientPeer() *p2p.Peer { } // Gets the port of the client node in the config -func (configr *Config) GetClientPort() string { - for _, entry := range configr.config { +func (config *Config) GetClientPort() string { + for _, entry := range config.config { if entry.Role == "client" { return entry.Port } @@ -79,7 +79,7 @@ func (configr *Config) GetClientPort() string { } // Parse the config file and return a 2d array containing the file data -func (configr *Config) ReadConfigFile(filename string) error { +func (config *Config) ReadConfigFile(filename string) error { file, err := os.Open(filename) defer file.Close() if err != nil { @@ -94,6 +94,6 @@ func (configr *Config) ReadConfigFile(filename string) error { entry := ConfigEntry{p[0], p[1], p[2], p[3]} result = append(result, entry) } - configr.config = result + config.config = result return nil } diff --git a/client/txgen/main.go b/client/txgen/main.go index 9f5b6cd8c..57a78b755 100644 --- a/client/txgen/main.go +++ b/client/txgen/main.go @@ -10,7 +10,7 @@ import ( "github.com/simple-rules/harmony-benchmark/blockchain" "github.com/simple-rules/harmony-benchmark/client" - "github.com/simple-rules/harmony-benchmark/client/config" + client_config "github.com/simple-rules/harmony-benchmark/client/config" "github.com/simple-rules/harmony-benchmark/consensus" "github.com/simple-rules/harmony-benchmark/crypto/pki" "github.com/simple-rules/harmony-benchmark/log" @@ -255,9 +255,9 @@ func main() { flag.Parse() // Read the configs - configr := config.NewConfig() - configr.ReadConfigFile(*configFile) - leaders, shardIds := configr.GetLeadersAndShardIds() + config := client_config.NewConfig() + config.ReadConfigFile(*configFile) + leaders, shardIds := config.GetLeadersAndShardIds() setting.numOfAddress = 10000 // Do cross shard tx if there are more than one shard @@ -283,7 +283,7 @@ func main() { } // Client/txgenerator server node setup - clientPort := configr.GetClientPort() + clientPort := config.GetClientPort() consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{}) clientNode := node.New(consensusObj, nil) @@ -360,6 +360,6 @@ func main() { // Send a stop message to stop the nodes at the end msg := proto_node.ConstructStopMessage() - peers := append(configr.GetValidators(), leaders...) + peers := append(config.GetValidators(), leaders...) p2p.BroadcastMessage(peers, msg) } diff --git a/client/wallet/main.go b/client/wallet/main.go index 96391d503..96cc439d6 100644 --- a/client/wallet/main.go +++ b/client/wallet/main.go @@ -15,7 +15,7 @@ import ( "github.com/dedis/kyber" "github.com/simple-rules/harmony-benchmark/blockchain" "github.com/simple-rules/harmony-benchmark/client" - "github.com/simple-rules/harmony-benchmark/client/config" + client_config "github.com/simple-rules/harmony-benchmark/client/config" "github.com/simple-rules/harmony-benchmark/crypto" "github.com/simple-rules/harmony-benchmark/crypto/pki" "github.com/simple-rules/harmony-benchmark/node" @@ -178,10 +178,10 @@ func main() { } func FetchUtxos() (blockchain.UtxoMap, error) { - configr := config.NewConfig() - configr.ReadConfigFile("local_config_shards.txt") - leaders, _ := configr.GetLeadersAndShardIds() - clientPeer := configr.GetClientPeer() + config := client_config.NewConfig() + config.ReadConfigFile("local_config_shards.txt") + leaders, _ := config.GetLeadersAndShardIds() + clientPeer := config.GetClientPeer() walletNode := node.New(nil, nil) walletNode.Client = client.NewClient(&leaders) go walletNode.StartServer(clientPeer.Port) From 87af35c6edfb26acf90dcd90c50d2ca72593e84c Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Fri, 31 Aug 2018 22:10:19 -0700 Subject: [PATCH 11/20] change travis --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index cd15cda6d..8153e7cae 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,4 +11,6 @@ install: - go build -v ./... notifications: slack: + on_success: always + on_failure: always secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY= From 3d7aa77aa740ffe683fa6295d3c9a2bfeda9eec4 Mon Sep 17 00:00:00 2001 From: Minh Doan Date: Fri, 31 Aug 2018 22:25:02 -0700 Subject: [PATCH 12/20] try to fix travis-ci --- .travis.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8153e7cae..7a9cb9074 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,5 @@ install: - ./.travis.gofmt.sh - go build -v ./... notifications: - slack: - on_success: always - on_failure: always - secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY= + slack: harmonyone:gggCd1QQopsQAW8JYgBWiH7M + # secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY= From 7a575101020b158eac3612ad6a764f558dbd42bb Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Sat, 1 Sep 2018 05:37:44 +0000 Subject: [PATCH 13/20] clean up code after removal of aws-experiment-launch directory move kill_node.sh to experiment-deploy repo Signed-off-by: Leo Chen --- go_executable_build.sh | 5 ----- kill_node.sh | 5 ----- 2 files changed, 10 deletions(-) delete mode 100755 kill_node.sh diff --git a/go_executable_build.sh b/go_executable_build.sh index ba0995629..c90532e0d 100755 --- a/go_executable_build.sh +++ b/go_executable_build.sh @@ -3,8 +3,6 @@ GOOS=linux GOARCH=amd64 env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/benchmark benchmark.go -env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go -env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/commander aws-experiment-launch/experiment/commander/main.go env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/txgen client/txgen/main.go AWSCLI=aws @@ -13,7 +11,4 @@ if [ "$1" != "" ]; then fi $AWSCLI s3 cp bin/benchmark s3://unique-bucket-bin/benchmark --acl public-read-write -$AWSCLI s3 cp bin/soldier s3://unique-bucket-bin/soldier --acl public-read-write -$AWSCLI s3 cp bin/commander s3://unique-bucket-bin/commander --acl public-read-write $AWSCLI s3 cp bin/txgen s3://unique-bucket-bin/txgen --acl public-read-write -$AWSCLI s3 cp kill_node.sh s3://unique-bucket-bin/kill_node.sh --acl public-read-write diff --git a/kill_node.sh b/kill_node.sh deleted file mode 100755 index 1f6d9e4bd..000000000 --- a/kill_node.sh +++ /dev/null @@ -1,5 +0,0 @@ -for pid in `/bin/ps -fu $USER| grep "benchmark\|txgen\|soldier\|commander\|profiler" | grep -v "grep" | awk '{print $2}'`; -do - echo 'Killed process: '$pid - kill -9 $pid -done From 591846068d804d6073de667b3fceed9db4f3a831 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Sat, 1 Sep 2018 06:47:13 +0000 Subject: [PATCH 14/20] fix config.go for commander/soldier add back two functions to fix the build errors Signed-off-by: Leo Chen --- client/config/config.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/client/config/config.go b/client/config/config.go index 40aa35b53..7fd24f89f 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -97,3 +97,16 @@ func (config *Config) ReadConfigFile(filename string) error { config.config = result return nil } + +func (config *Config) GetMyConfigEntry(ip string, port string) *ConfigEntry { + for _, entry := range config.config { + if entry.IP == ip && entry.Port == port { + return &entry + } + } + return nil +} + +func (config *Config) GetConfigEntries() []ConfigEntry { + return config.config +} From fa1fdecfbdac82eabe560596532f5e50a44280ca Mon Sep 17 00:00:00 2001 From: Richard Liu Date: Fri, 31 Aug 2018 23:56:09 -0700 Subject: [PATCH 15/20] added blockLatency. --- consensus/consensus_leader.go | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/consensus_leader.go b/consensus/consensus_leader.go index 37dd1b986..a526ec109 100644 --- a/consensus/consensus_leader.go +++ b/consensus/consensus_leader.go @@ -425,6 +425,7 @@ func (consensus *Consensus) reportMetrics(block blockchain.Block) { "nodeCount": {strconv.Itoa(len(consensus.validators) + 1)}, "latestBlockHash": {hex.EncodeToString(consensus.blockHash[:])}, "latestTxHash": {hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-1][:])}, + "blockLatency": {strconv.Itoa(int(timeElapsed / time.Millisecond))}, } body := bytes.NewBufferString(form.Encode()) From 24ea87f1def90245d3f75456df4f8aa0b2827f0f Mon Sep 17 00:00:00 2001 From: Richard Liu Date: Sat, 1 Sep 2018 00:31:40 -0700 Subject: [PATCH 16/20] latestTxHashes. --- consensus/consensus_leader.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/consensus/consensus_leader.go b/consensus/consensus_leader.go index a526ec109..5005cbd2a 100644 --- a/consensus/consensus_leader.go +++ b/consensus/consensus_leader.go @@ -424,8 +424,12 @@ func (consensus *Consensus) reportMetrics(block blockchain.Block) { "txCount": {strconv.Itoa(int(numOfTxs))}, "nodeCount": {strconv.Itoa(len(consensus.validators) + 1)}, "latestBlockHash": {hex.EncodeToString(consensus.blockHash[:])}, - "latestTxHash": {hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-1][:])}, - "blockLatency": {strconv.Itoa(int(timeElapsed / time.Millisecond))}, + "latestTxHashes": { + hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-1][:]), + hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-2][:]), + hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-3][:]), + }, + "blockLatency": {strconv.Itoa(int(timeElapsed / time.Millisecond))}, } body := bytes.NewBufferString(form.Encode()) From 3a060dc5e4f703c9e37c32e7c0933a380f20f474 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Sat, 1 Sep 2018 07:37:46 +0000 Subject: [PATCH 17/20] add back kill_node.sh Signed-off-by: Leo Chen --- kill_node.sh | 5 +++++ 1 file changed, 5 insertions(+) create mode 100755 kill_node.sh diff --git a/kill_node.sh b/kill_node.sh new file mode 100755 index 000000000..1f6d9e4bd --- /dev/null +++ b/kill_node.sh @@ -0,0 +1,5 @@ +for pid in `/bin/ps -fu $USER| grep "benchmark\|txgen\|soldier\|commander\|profiler" | grep -v "grep" | awk '{print $2}'`; +do + echo 'Killed process: '$pid + kill -9 $pid +done From 7311b45859f359bf48d802ff2cd104b5f6910f34 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Sat, 1 Sep 2018 07:44:04 +0000 Subject: [PATCH 18/20] Revert "fix config.go for commander/soldier" This reverts commit 591846068d804d6073de667b3fceed9db4f3a831. --- client/config/config.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/client/config/config.go b/client/config/config.go index 7fd24f89f..40aa35b53 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -97,16 +97,3 @@ func (config *Config) ReadConfigFile(filename string) error { config.config = result return nil } - -func (config *Config) GetMyConfigEntry(ip string, port string) *ConfigEntry { - for _, entry := range config.config { - if entry.IP == ip && entry.Port == port { - return &entry - } - } - return nil -} - -func (config *Config) GetConfigEntries() []ConfigEntry { - return config.config -} From 708fc46a9d1da40bd087e744c4093afaeec186e1 Mon Sep 17 00:00:00 2001 From: Richard Liu Date: Sat, 1 Sep 2018 15:32:10 -0700 Subject: [PATCH 19/20] make pwd the bin folder. --- benchmark.go | 2 +- deploy.sh | 7 ++++--- run_experiment.sh | 2 -- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/benchmark.go b/benchmark.go index fc4521cba..4c2621856 100644 --- a/benchmark.go +++ b/benchmark.go @@ -33,7 +33,7 @@ func attackDetermination(attackedMode int) bool { } func startProfiler(shardID string, logFolder string) { - err := utils.RunCmd("./bin/profiler", "-pid", strconv.Itoa(os.Getpid()), "-shard_id", shardID, "-log_folder", logFolder) + err := utils.RunCmd("./profiler", "-pid", strconv.Itoa(os.Getpid()), "-shard_id", shardID, "-log_folder", logFolder) if err != nil { log.Error("Failed to start profiler") } diff --git a/deploy.sh b/deploy.sh index 737dfb755..0b83ad720 100755 --- a/deploy.sh +++ b/deploy.sh @@ -38,6 +38,7 @@ db_supported=$2 go build -o bin/benchmark go build -o bin/txgen client/txgen/main.go go build -o bin/profiler profiler/main.go +cd bin # Create a tmp folder for logs t=`date +"%Y%m%d-%H%M%S"` @@ -51,9 +52,9 @@ while IFS='' read -r line || [[ -n "$line" ]]; do #echo $ip $port $mode if [ "$mode" != "client" ]; then if [ -z "$db_supported" ]; then - ./bin/benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder& + ./benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder& else - ./bin/benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder -db_supported& + ./benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder -db_supported& fi fi done < $config @@ -61,5 +62,5 @@ done < $config txgen_disabled=$3 # Generate transactions if [ -z "$txgen_disabled" ]; then - ./bin/txgen -config_file $config -log_folder $log_folder + ./txgen -config_file $config -log_folder $log_folder fi diff --git a/run_experiment.sh b/run_experiment.sh index b3bd99f8e..00c35d818 100755 --- a/run_experiment.sh +++ b/run_experiment.sh @@ -3,8 +3,6 @@ go build -o bin/benchmark go build -o bin/txgen client/txgen/main.go -go build -o bin/commander aws-experiment-launch/experiment/commander/main.go -go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go go build -o bin/profiler profiler/main.go cd bin From ea1035cdeec53c194727af5cd37d59d10156e102 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Sun, 2 Sep 2018 00:53:13 +0000 Subject: [PATCH 20/20] enable build/upload profiler Signed-off-by: Leo Chen --- go_executable_build.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/go_executable_build.sh b/go_executable_build.sh index c90532e0d..49fdf186b 100755 --- a/go_executable_build.sh +++ b/go_executable_build.sh @@ -4,6 +4,7 @@ GOOS=linux GOARCH=amd64 env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/benchmark benchmark.go env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/txgen client/txgen/main.go +env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/profiler profiler/main.go AWSCLI=aws if [ "$1" != "" ]; then @@ -12,3 +13,4 @@ fi $AWSCLI s3 cp bin/benchmark s3://unique-bucket-bin/benchmark --acl public-read-write $AWSCLI s3 cp bin/txgen s3://unique-bucket-bin/txgen --acl public-read-write +$AWSCLI s3 cp bin/profiler s3://unique-bucket-bin/profiler --acl public-read-write