commit
3f825b1926
@ -0,0 +1,32 @@ |
||||
import argparse |
||||
import os |
||||
import random |
||||
import sys |
||||
|
||||
from utils import utils |
||||
|
||||
if __name__ == "__main__": |
||||
parser = argparse.ArgumentParser(description='This script helps you to collect public ips') |
||||
parser.add_argument('--instance_output', type=str, dest='instance_output', |
||||
default='instance_output.txt', |
||||
help='the file contains node_name_tag and region number of created instances.') |
||||
parser.add_argument('--region_config', type=str, |
||||
dest='region_config', default='configuration.txt') |
||||
parser.add_argument('--file_output', type=str, |
||||
dest='file_output', default='raw_ip.txt') |
||||
args = parser.parse_args() |
||||
|
||||
if not args.instance_output or not os.path.isfile(args.instance_output): |
||||
print "%s or %s are not existed" % (args.file_output, args.instance_output) |
||||
sys.exit(1) |
||||
if args.instance_output: |
||||
with open(args.instance_output, "r") as fin, open(args.file_output, "w") as fout: |
||||
for line in fin.readlines(): |
||||
items = line.split(" ") |
||||
region_number = items[1].strip() |
||||
node_name_tag = items[0].strip() |
||||
ip_list = utils.collect_public_ips(region_number, node_name_tag, args.region_config) |
||||
random.shuffle(ip_list) |
||||
for ip in ip_list: |
||||
fout.write(ip + " " + node_name_tag + "\n") |
||||
print "Done collecting public ips %s" % args.file_output |
@ -0,0 +1 @@ |
||||
python collect_public_ips.py --instance_output instance_output.txt --file_output raw_ip.txt |
@ -0,0 +1,80 @@ |
||||
import argparse |
||||
import logging |
||||
import os |
||||
import stat |
||||
import sys |
||||
|
||||
from utils import utils |
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') |
||||
LOGGER = logging.getLogger(__file__) |
||||
LOGGER.setLevel(logging.INFO) |
||||
|
||||
PEMS = [ |
||||
"virginia-key-benchmark.pem", |
||||
"ohio-key-benchmark.pem", |
||||
"california-key-benchmark.pem", |
||||
"oregon-key-benchmark.pem", |
||||
"tokyo-key-benchmark.pem", |
||||
"singapore-key-benchmark.pem", |
||||
"frankfurt-key-benchmark.pem", |
||||
"ireland-key-benchmark.pem", |
||||
] |
||||
|
||||
if __name__ == "__main__": |
||||
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config') |
||||
parser.add_argument('--distribution_config', type=str, |
||||
dest='distribution_config', default='distribution_config.txt') |
||||
parser.add_argument('--commander_logging', type=str, |
||||
dest='commander_logging', default='commander_logging.sh') |
||||
parser.add_argument('--logs_download', type=str, |
||||
dest='logs_download', default='logs_download.sh') |
||||
parser.add_argument('--commander_info', type=str, |
||||
dest='commander_info', default='commander_info.txt') |
||||
|
||||
args = parser.parse_args() |
||||
|
||||
if not os.path.exists(args.distribution_config): |
||||
sys.exit(1) |
||||
with open(args.distribution_config, "r") as fin: |
||||
lines = fin.readlines() |
||||
|
||||
commander_address = None |
||||
commander_region = None |
||||
commander_output = None |
||||
with open(args.distribution_config, "w") as fout: |
||||
for line in lines: |
||||
if "commander" in line: |
||||
items = [item.strip() for item in line.split(" ")] |
||||
commander_address = items[0] |
||||
commander_region = int(items[4][0]) |
||||
commander_output = "\n".join(items) |
||||
else: |
||||
fout.write(line.strip() + "\n") |
||||
if not commander_address or not commander_region: |
||||
LOGGER.info("Failed to extract commander address and commander region.") |
||||
sys.exit(1) |
||||
|
||||
with open(args.commander_info, "w") as fout: |
||||
fout.write(commander_output) |
||||
|
||||
LOGGER.info("Generated %s" % args.distribution_config) |
||||
LOGGER.info("Generated %s" % args.commander_info) |
||||
with open(args.commander_logging, "w") as fout: |
||||
fout.write("scp -i ../keys/%s %s ec2-user@%s:/tmp/distribution_config.txt\n" % (PEMS[commander_region - 1], args.distribution_config, commander_address)) |
||||
fout.write("scp -i ../keys/%s %s ec2-user@%s:/tmp/commander_info.txt\n" % (PEMS[commander_region - 1], args.commander_info, commander_address)) |
||||
fout.write("if [ $? -eq 0 ]; then\n\t") |
||||
fout.write("ssh -i ../keys/%s ec2-user@%s\n" % (PEMS[commander_region - 1], commander_address)) |
||||
fout.write("else\n\techo \"Failed to send %s to the commander machine\"\nfi\n" % args.distribution_config) |
||||
st = os.stat(args.commander_logging) |
||||
os.chmod(args.commander_logging, st.st_mode | stat.S_IEXEC) |
||||
LOGGER.info("Generated %s" % args.commander_logging) |
||||
|
||||
with open(args.logs_download, "w") as fout: |
||||
fout.write("scp -i ../keys/%s ec2-user@%s:~/projects/src/harmony-benchmark/bin/upload tmp/\n" % (PEMS[commander_region - 1], commander_address)) |
||||
st = os.stat(args.logs_download) |
||||
os.chmod(args.logs_download, st.st_mode | stat.S_IEXEC) |
||||
LOGGER.info("Generated %s" % args.logs_download) |
||||
|
||||
LOGGER.info("DONE.") |
||||
|
@ -1,8 +1,8 @@ |
||||
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8 |
||||
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9 |
||||
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83 |
||||
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1 |
||||
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896 |
||||
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579 |
||||
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097 |
||||
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac |
||||
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800 |
||||
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe |
||||
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14 |
||||
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43 |
||||
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5 |
||||
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2 |
||||
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910 |
||||
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58 |
@ -0,0 +1,20 @@ |
||||
if [ $# -lt 3 ]; then |
||||
echo "Please provide # of instances, # of shards, # of clients" |
||||
exit 1 |
||||
fi |
||||
|
||||
INSTANCE_NUM=$1 |
||||
SHARD_NUM=$2 |
||||
CLIENT_NUM=$3 |
||||
|
||||
echo "Creating $INSTANCE_NUM instances at 8 regions" |
||||
python create_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM |
||||
|
||||
echo "Rung collecint raw ips" |
||||
python collect_public_ips.py --instance_output instance_output.txt |
||||
|
||||
echo "Generate distribution_config" |
||||
python generate_distribution_config.py --ip_list_file raw_ip.txt --shard_num $SHARD_NUM --client_num $CLIENT_NUM |
||||
|
||||
echo "Deploy" |
||||
python deploy.py |
@ -0,0 +1,15 @@ |
||||
INSTANCE_NUM=1 |
||||
SHARD_NUM=1 |
||||
CLIENT_NUM=1 |
||||
|
||||
# echo "Creating $INSTANCE_NUM instances at 8 regions" |
||||
# python create_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM |
||||
|
||||
echo "Rung collecint raw ips" |
||||
python collect_public_ips.py --instance_output instance_output.txt |
||||
|
||||
echo "Generate distribution_config" |
||||
python generate_distribution_config.py --ip_list_file raw_ip.txt --shard_num $SHARD_NUM --client_num $CLIENT_NUM |
||||
|
||||
echo "Deploy" |
||||
python deploy.py |
@ -0,0 +1,154 @@ |
||||
import argparse |
||||
import base64 |
||||
import boto3 |
||||
import datetime |
||||
import json |
||||
import sys |
||||
import threading |
||||
import time |
||||
|
||||
from utils import utils, spot_fleet, logger |
||||
|
||||
LOGGER = logger.getLogger(__file__) |
||||
|
||||
|
||||
class InstanceResource: |
||||
ON_DEMAND = 1 |
||||
SPOT_INSTANCE = 2 |
||||
SPOT_FLEET = 3 |
||||
|
||||
|
||||
def run_one_region_instances(config, region_number, number_of_instances, instance_resource=InstanceResource.ON_DEMAND): |
||||
region_name = config[region_number][utils.REGION_NAME] |
||||
# Create session. |
||||
session = boto3.Session(region_name=region_name) |
||||
# Create a client. |
||||
ec2_client = session.client('ec2') |
||||
|
||||
if instance_resource == InstanceResource.ON_DEMAND: |
||||
node_name_tag = create_instances( |
||||
config, ec2_client, region_number, int(number_of_instances)) |
||||
LOGGER.info("Created %s in region %s" % (node_name_tag, region_number)) |
||||
return node_name_tag, ec2_client |
||||
elif instance_resource == InstanceResource.SPOT_FLEET: |
||||
instance_type_list = ['t2.micro', 't2.small', 'm3.medium'] |
||||
node_name_tag = spot_fleet.request_spot_fleet_with_on_demand( |
||||
config, ec2_client, region_number, int(number_of_instances), 1, instance_type_list) |
||||
# node_name_tag = spot_fleet.request_spot_fleet( |
||||
# config, ec2_client, region_number, int(number_of_instances), instance_type_list) |
||||
return node_name_tag, ec2_client |
||||
else: |
||||
return None, None |
||||
|
||||
|
||||
def create_instances(config, ec2_client, region_number, number_of_instances): |
||||
node_name_tag = utils.get_node_name_tag(region_number) |
||||
LOGGER.info("Creating node_name_tag: %s" % node_name_tag) |
||||
available_zone = utils.get_one_availability_zone(ec2_client) |
||||
LOGGER.info("Looking at zone %s to create instances." % available_zone) |
||||
|
||||
ec2_client.run_instances( |
||||
MinCount=number_of_instances, |
||||
MaxCount=number_of_instances, |
||||
ImageId=config[region_number][utils.REGION_AMI], |
||||
Placement={ |
||||
'AvailabilityZone': available_zone, |
||||
}, |
||||
SecurityGroups=[config[region_number][utils.REGION_SECURITY_GROUP]], |
||||
IamInstanceProfile={ |
||||
'Name': utils.IAM_INSTANCE_PROFILE |
||||
}, |
||||
KeyName=config[region_number][utils.REGION_KEY], |
||||
UserData=utils.USER_DATA, |
||||
InstanceType=utils.INSTANCE_TYPE, |
||||
TagSpecifications=[ |
||||
{ |
||||
'ResourceType': 'instance', |
||||
'Tags': [ |
||||
{ |
||||
'Key': 'Name', |
||||
'Value': node_name_tag |
||||
}, |
||||
] |
||||
}, |
||||
], |
||||
) |
||||
|
||||
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag) |
||||
LOGGER.info("Waiting for all %d instances in region %s to be in RUNNING" % ( |
||||
len(instance_ids), region_number)) |
||||
waiter = ec2_client.get_waiter('instance_running') |
||||
waiter.wait(InstanceIds=instance_ids) |
||||
|
||||
count = 0 |
||||
while count < 40: |
||||
time.sleep(5) |
||||
LOGGER.info("Waiting ...") |
||||
ip_list = utils.collect_public_ips_from_ec2_client( |
||||
ec2_client, node_name_tag) |
||||
if len(ip_list) == number_of_instances: |
||||
LOGGER.info("Created %d instances" % number_of_instances) |
||||
return node_name_tag |
||||
count = count + 1 |
||||
LOGGER.info("Can not create %d instances" % number_of_instances) |
||||
return None |
||||
|
||||
|
||||
lock = threading.Lock() |
||||
|
||||
|
||||
def run_for_one_region(config, region_number, number_of_instances, instance_resouce, fout, fout2): |
||||
node_name_tag, ec2_client = run_one_region_instances( |
||||
config, region_number, number_of_instances, instance_resouce) |
||||
if node_name_tag: |
||||
LOGGER.info("Managed to create instances for region %s" % |
||||
region_number) |
||||
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag) |
||||
lock.acquire() |
||||
try: |
||||
fout.write("%s %s\n" % (node_name_tag, region_number)) |
||||
for instance_id in instance_ids: |
||||
fout2.write(instance_id + " " + node_name_tag + " " + region_number + |
||||
" " + config[region_number][utils.REGION_NAME] + "\n") |
||||
finally: |
||||
lock.release() |
||||
else: |
||||
LOGGER.info("Failed to create instances for region %s" % region_number) |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
parser = argparse.ArgumentParser( |
||||
description='This script helps you start instances across multiple regions') |
||||
parser.add_argument('--regions', type=str, dest='regions', |
||||
default='3', help="Supply a csv list of all regions") |
||||
parser.add_argument('--instances', type=str, dest='num_instance_list', |
||||
default='1', help='number of instances in respective of region') |
||||
parser.add_argument('--region_config', type=str, |
||||
dest='region_config', default='configuration.txt') |
||||
parser.add_argument('--instance_output', type=str, dest='instance_output', |
||||
default='instance_output.txt', help='the file to append or write') |
||||
parser.add_argument('--instance_ids_output', type=str, dest='instance_ids_output', |
||||
default='instance_ids_output.txt', help='the file to append or write') |
||||
parser.add_argument('--append', dest='append', type=bool, default=False, |
||||
help='append to the current instance_output') |
||||
args = parser.parse_args() |
||||
config = utils.read_region_config(args.region_config) |
||||
region_list = args.regions.split(',') |
||||
num_instance_list = args.num_instance_list.split(',') |
||||
assert len(region_list) == len(num_instance_list), "number of regions: %d != number of instances per region: %d" % ( |
||||
len(region_list), len(num_instance_list)) |
||||
|
||||
write_mode = "a" if args.append else "w" |
||||
with open(args.instance_output, write_mode) as fout, open(args.instance_ids_output, write_mode) as fout2: |
||||
thread_pool = [] |
||||
for i in range(len(region_list)): |
||||
region_number = region_list[i] |
||||
number_of_instances = num_instance_list[i] |
||||
t = threading.Thread(target=run_for_one_region, args=( |
||||
config, region_number, number_of_instances, InstanceResource.SPOT_FLEET, fout, fout2)) |
||||
LOGGER.info("creating thread for region %s" % region_number) |
||||
t.start() |
||||
thread_pool.append(t) |
||||
for t in thread_pool: |
||||
t.join() |
||||
LOGGER.info("done.") |
@ -0,0 +1 @@ |
||||
python create_instances.py --regions 1,3 --instances 3,3 |
@ -0,0 +1,7 @@ |
||||
if [ $# -eq 0 ]; then |
||||
echo "Please provide # of instances" |
||||
exit 1 |
||||
fi |
||||
INSTANCE_NUM=$1 |
||||
echo "Creating $INSTANCE_NUM instances at 8 regions" |
||||
python create_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM |
@ -0,0 +1,6 @@ |
||||
#This script is used for debugging and testing as we only created 2 instances. |
||||
#Be aware that the default output will be instance_output_2.txt |
||||
INSTANCE_NUM=2 |
||||
|
||||
echo "Creating $$INSTANCE_NUM instances at 8 regions" |
||||
python create_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM --instance_output instance_output_2.txt |
@ -0,0 +1,4 @@ |
||||
INSTANCE_NUM=50 |
||||
|
||||
echo "Creating $INSTANCE_NUM instances at 8 regions" |
||||
python create_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM --instance_output instance_output_50.txt |
@ -0,0 +1,163 @@ |
||||
import argparse |
||||
import base64 |
||||
import boto3 |
||||
import datetime |
||||
import json |
||||
import logging |
||||
import os |
||||
import subprocess |
||||
import sys |
||||
import threading |
||||
import time |
||||
|
||||
from utils import utils |
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') |
||||
LOGGER = logging.getLogger(__file__) |
||||
LOGGER.setLevel(logging.INFO) |
||||
|
||||
class InstanceResource: |
||||
ON_DEMAND = 1 |
||||
SPOT_INSTANCE = 2 |
||||
SPOT_FLEET = 3 |
||||
|
||||
with open("user-data.sh", "r") as userdata_file: |
||||
USER_DATA = userdata_file.read() |
||||
|
||||
# UserData must be base64 encoded for spot instances. |
||||
USER_DATA_BASE64 = base64.b64encode(USER_DATA) |
||||
|
||||
IAM_INSTANCE_PROFILE = 'BenchMarkCodeDeployInstanceProfile' |
||||
REPO = "simple-rules/harmony-benchmark" |
||||
APPLICATION_NAME = 'benchmark-experiments' |
||||
time_stamp = time.time() |
||||
CURRENT_SESSION = datetime.datetime.fromtimestamp( |
||||
time_stamp).strftime('%H-%M-%S-%Y-%m-%d') |
||||
PLACEMENT_GROUP = "PLACEMENT-" + CURRENT_SESSION |
||||
NODE_NAME_SUFFIX = "NODE-" + CURRENT_SESSION |
||||
|
||||
def run_one_region_codedeploy(region_number, region_config, node_name_tag, commit_id): |
||||
ec2_client, session = utils.create_ec2_client(region_number, region_config) |
||||
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] |
||||
instance_ids = utils.get_instance_ids(ec2_client.describe_instances(Filters=filters)) |
||||
|
||||
LOGGER.info("Number of instances: %d" % len(instance_ids)) |
||||
|
||||
LOGGER.info("Waiting for %d instances in region %s to be in RUNNING" % (len(instance_ids), region_number)) |
||||
waiter = ec2_client.get_waiter('instance_running') |
||||
waiter.wait(InstanceIds=instance_ids) |
||||
|
||||
print("Waiting for %d instances in region %s with status OK"% (len(instance_ids), region_number)) |
||||
waiter = ec2_client.get_waiter('instance_status_ok') |
||||
waiter.wait(InstanceIds=instance_ids) |
||||
|
||||
print("Waiting for %d instances in region %s with system in OK"% (len(instance_ids), region_number)) |
||||
waiter = ec2_client.get_waiter('system_status_ok') |
||||
waiter.wait(InstanceIds=instance_ids) |
||||
|
||||
codedeploy = session.client('codedeploy') |
||||
application_name = APPLICATION_NAME |
||||
deployment_group_name = APPLICATION_NAME + "-" + commit_id[:6] + "-" + CURRENT_SESSION |
||||
repo = REPO |
||||
|
||||
LOGGER.info("Setting up to deploy commit_id %s on region %s" % (commit_id, region_number)) |
||||
utils.get_application(codedeploy, application_name) |
||||
deployment_group_id = utils.create_deployment_group( |
||||
codedeploy, region_number, application_name, deployment_group_name, node_name_tag) |
||||
if deployment_group_id: |
||||
LOGGER.info("Created deployment group with id %s" % deployment_group_id) |
||||
else: |
||||
LOGGER.info("Created deployment group with name %s was created" % deployment_group_name) |
||||
deployment_id, status = deploy(codedeploy, application_name, deployment_group_name, repo, commit_id) |
||||
return region_number, deployment_id, status |
||||
|
||||
|
||||
def deploy(codedeploy, application_name, deployment_group, repo, commit_id): |
||||
"""Deploy new code at specified revision to instance. |
||||
|
||||
arguments: |
||||
- repo: GitHub repository path from which to get the code |
||||
- commit_id: commit ID to be deployed |
||||
- wait: wait until the CodeDeploy finishes |
||||
""" |
||||
LOGGER.info("Launching CodeDeploy with commit " + commit_id) |
||||
response = codedeploy.create_deployment( |
||||
applicationName=application_name, |
||||
deploymentGroupName=deployment_group, |
||||
deploymentConfigName='CodeDeployDefault.AllAtOnce', |
||||
description='benchmark experiments', |
||||
revision={ |
||||
'revisionType': 'GitHub', |
||||
'gitHubLocation': { |
||||
'repository': repo, |
||||
'commitId': commit_id, |
||||
} |
||||
} |
||||
) |
||||
if response: |
||||
LOGGER.info("Deployment returned with deployment id: " + response["deploymentId"]) |
||||
deployment_id = response["deploymentId"] |
||||
else: |
||||
LOGGER.error("Deployment failed.") |
||||
return None, None |
||||
start_time = time.time() |
||||
status = None |
||||
while time.time() - start_time < 600: |
||||
response = codedeploy.get_deployment(deploymentId=deployment_id) |
||||
if response and response.get('deploymentInfo'): |
||||
status = response['deploymentInfo']['status'] |
||||
if status in ('Succeeded', 'Failed', 'Stopped'): |
||||
break |
||||
if status: |
||||
LOGGER.info("Deployment status " + status) |
||||
else: |
||||
LOGGER.info("Deployment status: time out") |
||||
return deployment_id, status |
||||
|
||||
def run_one_region_codedeploy_wrapper(region_number, region_config, node_name_tag, commit_id): |
||||
region_number, deployment_id, status = run_one_region_codedeploy(region_number, region_config, node_name_tag, commit_id) |
||||
LOGGER.info("deployment of region %s finished with deployment id %s with status %s" % (region_number, deployment_id, status)) |
||||
|
||||
def launch_code_deploy(region_list, region_config, commit_id): |
||||
thread_pool = [] |
||||
for region_tuppple in region_list: |
||||
# node_name_tag comes first. |
||||
node_name_tag, region_number = region_tuppple |
||||
t = threading.Thread(target=run_one_region_codedeploy_wrapper, args=( |
||||
region_number, region_config, node_name_tag, commit_id)) |
||||
t.start() |
||||
thread_pool.append(t) |
||||
for t in thread_pool: |
||||
t.join() |
||||
LOGGER.info("Finished.") |
||||
|
||||
def get_head_commit_id(): |
||||
git_head_hash = None |
||||
try: |
||||
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'], shell=False, stdout=subprocess.PIPE) |
||||
git_head_hash = process.communicate()[0].strip() |
||||
finally: |
||||
return git_head_hash |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
parser = argparse.ArgumentParser( |
||||
description='This script helps you start instances across multiple regions') |
||||
parser.add_argument('--instance_output', type=str, dest='instance_output', |
||||
default='instance_output.txt', |
||||
help='the file contains node_name_tag and region number of created instances.') |
||||
parser.add_argument('--region_config', type=str, dest='region_config', default='configuration.txt') |
||||
parser.add_argument('--commit_id', type=str, dest='commit_id', |
||||
default='f092d25d7a814622079fe92e9b36e10e46bc0d97') |
||||
args = parser.parse_args() |
||||
LOGGER.info("********* MAKE SURE YOU'RE RUNNING under harmony-benchmark code base *********") |
||||
commit_id = get_head_commit_id() or args.commit_id |
||||
|
||||
if not os.path.isfile(args.instance_output) or not commit_id: |
||||
LOGGER.info("%s does not exist" % args.instance_output) |
||||
sys.exit(1) |
||||
|
||||
with open(args.instance_output, "r") as fin: |
||||
region_list = [line.split(" ") for line in fin.readlines()] |
||||
region_list = [(item[0].strip(), item[1].strip()) for item in region_list] |
||||
launch_code_deploy(region_list, args.region_config, commit_id) |
@ -0,0 +1 @@ |
||||
python deploy.py --instance_output instance_output.txt |
@ -0,0 +1,8 @@ |
||||
# Change the commander address |
||||
if [ $# -eq 0 ]; then |
||||
echo "Please provide ip address of the commander" |
||||
exit 1 |
||||
fi |
||||
ADDRESS=$1 |
||||
mkdir -p ./tmp |
||||
scp -r -i "california-key-benchmark.pem" ec2-user@$ADDRESS:~/projects/src/harmony-benchmark/bin/upload ./tmp/ |
@ -0,0 +1,16 @@ |
||||
# Make sure to have all keys with mode 600 at harmony-benchmark directory. |
||||
IFS=$'\n' |
||||
rm -rf ./tmp |
||||
for address in $(cat ./leader_addresses.txt) |
||||
do |
||||
echo "trying to download from address $address" |
||||
mkdir -p tmp/$address |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/california-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/frankfurt-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/ireland-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/ohio-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/oregon-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/singapore-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/tokyo-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
scp -r -o "StrictHostKeyChecking no" -i ../keys/virginia-key-benchmark.pem ec2-user@$address:~/projects/src/harmony-benchmark/tmp_log/* ./tmp/$address/ |
||||
done |
@ -0,0 +1,37 @@ |
||||
import argparse |
||||
import logging |
||||
import sys |
||||
|
||||
from utils import utils |
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') |
||||
LOGGER = logging.getLogger(__file__) |
||||
LOGGER.setLevel(logging.INFO) |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config') |
||||
parser.add_argument('--ip_list_file', type=str, dest='ip_list_file', |
||||
default='raw_ip.txt', help="the file containing available raw ips") |
||||
# If the ip_list_file is None we need to use the region, node_name_tag and region_config to collect raw_ip |
||||
parser.add_argument('--region', type=str, dest='region_number', |
||||
default="4", help="region number") |
||||
parser.add_argument('--node_name_tag', type=str, |
||||
dest='node_name_tag', default='4-NODE-23-36-01-2018-07-05') |
||||
parser.add_argument('--region_config', type=str, |
||||
dest='region_config', default='configuration.txt') |
||||
|
||||
parser.add_argument('--shard_number', type=int, dest='shard_number', default=1) |
||||
parser.add_argument('--client_number', type=int, dest='client_number', default=1) |
||||
parser.add_argument('--distribution_config', type=str, |
||||
dest='distribution_config', default='distribution_config.txt') |
||||
args = parser.parse_args() |
||||
|
||||
if args.ip_list_file == None: |
||||
utils.generate_distribution_config2( |
||||
args.region_number, args.node_name_tag, args.region_config, |
||||
args.shard_number, args.client_number, args.distribution_config) |
||||
else: |
||||
utils.generate_distribution_config3(args.shard_number, args.client_number, |
||||
args.ip_list_file, args.distribution_config) |
||||
LOGGER.info("Done writing %s" % args.distribution_config) |
@ -0,0 +1 @@ |
||||
python generate_distribution_config.py --ip_list_file raw_ip.txt --shard_num 2 --client_num 2 |
@ -0,0 +1,9 @@ |
||||
if [ $# -eq 0 ]; then |
||||
echo "Please the directory of the log" |
||||
exit 1 |
||||
fi |
||||
DIR=$1 |
||||
for file in $(ls $DIR/*leader*) |
||||
do |
||||
cat $file | egrep -o "TPS=[0-9]+" | cut -f2 -d "=" | awk '{ sum += $1; n++ } END { if (n > 0) print sum / n; }'; |
||||
done |
@ -0,0 +1,63 @@ |
||||
import argparse |
||||
import logging |
||||
import os |
||||
import random |
||||
import sys |
||||
import threading |
||||
|
||||
from utils import utils |
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') |
||||
LOGGER = logging.getLogger(__file__) |
||||
LOGGER.setLevel(logging.INFO) |
||||
|
||||
def terminate_instances_by_region(region_number, region_config, node_name_tag): |
||||
ec2_client, _ = utils.create_ec2_client(region_number, region_config) |
||||
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] |
||||
instance_ids = utils.get_instance_ids(ec2_client.describe_instances(Filters=filters)) |
||||
if instance_ids: |
||||
ec2_client.terminate_instances(InstanceIds=instance_ids) |
||||
LOGGER.info("waiting until instances with tag %s died." % node_name_tag) |
||||
waiter = ec2_client.get_waiter('instance_terminated') |
||||
waiter.wait(InstanceIds=instance_ids) |
||||
LOGGER.info("instances with node name tag %s terminated." % node_name_tag) |
||||
else: |
||||
pass |
||||
LOGGER.warn("there is no instances to terminate") |
||||
|
||||
if __name__ == "__main__": |
||||
parser = argparse.ArgumentParser(description='This script helps you to collect public ips') |
||||
parser.add_argument('--instance_output', type=str, dest='instance_output', |
||||
default='instance_output.txt', |
||||
help='the file contains node_name_tag and region number of created instances.') |
||||
parser.add_argument('--node_name_tag', type=str, dest='node_name_tag') |
||||
parser.add_argument('--region_number', type=str, dest='region_number') |
||||
parser.add_argument('--region_config', type=str, |
||||
dest='region_config', default='configuration.txt') |
||||
args = parser.parse_args() |
||||
|
||||
if not args.instance_output or not os.path.isfile(args.instance_output): |
||||
LOGGER.info("%s is not existed" % args.instance_output) |
||||
sys.exit(1) |
||||
if args.region_number and args.node_name_tag: |
||||
ec2_client, session = utils.create_ec2_client(args.region_number, args.region_config) |
||||
filters = [{'Name': 'tag:Name','Values': [args.node_name_tag]}] |
||||
instance_ids = utils.get_instance_ids(ec2_client.describe_instances(Filters=filters)) |
||||
ec2_client.terminate_instances(InstanceIds=instance_ids) |
||||
LOGGER.info("waiting until instances with tag %s died." % args.node_name_tag) |
||||
waiter = ec2_client.get_waiter('instance_terminated') |
||||
waiter.wait(InstanceIds=instance_ids) |
||||
elif args.instance_output: |
||||
with open(args.instance_output, "r") as fin: |
||||
thread_pool = [] |
||||
for line in fin.readlines(): |
||||
items = line.split(" ") |
||||
region_number = items[1].strip() |
||||
node_name_tag = items[0].strip() |
||||
t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag)) |
||||
t.start() |
||||
thread_pool.append(t) |
||||
for t in thread_pool: |
||||
t.join() |
||||
LOGGER.info("done.") |
@ -0,0 +1,8 @@ |
||||
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800 |
||||
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe |
||||
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14 |
||||
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43 |
||||
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5 |
||||
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2 |
||||
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910 |
||||
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58 |
@ -0,0 +1,46 @@ |
||||
|
||||
import utils |
||||
|
||||
|
||||
def get_launch_template_name(config, region_number): |
||||
return 'benchmark-' + config[region_number][utils.REGION_NAME] |
||||
|
||||
|
||||
def create(config, ec2_client, region_number): |
||||
return ec2_client.create_launch_template( |
||||
# DryRun=True, |
||||
LaunchTemplateName=get_launch_template_name(config, region_number), |
||||
LaunchTemplateData={ |
||||
'IamInstanceProfile': { |
||||
'Name': utils.IAM_INSTANCE_PROFILE |
||||
}, |
||||
'ImageId': config[region_number][utils.REGION_AMI], |
||||
# 'InstanceType': instance_type, |
||||
'KeyName': config[region_number][utils.REGION_KEY], |
||||
'UserData': utils.USER_DATA_BASE64, |
||||
'SecurityGroupIds': [ |
||||
config[region_number][utils.REGION_SECURITY_GROUP_ID] |
||||
], |
||||
# 'InstanceInitiatedShutdownBehavior': 'stop', |
||||
'TagSpecifications': [ |
||||
{ |
||||
'ResourceType': 'instance', |
||||
'Tags': [ |
||||
{ |
||||
'Key': 'LaunchTemplate', |
||||
'Value': 'Yes' |
||||
} |
||||
] |
||||
} |
||||
], |
||||
# 'InstanceMarketOptions': { |
||||
# 'MarketType': 'spot', |
||||
# 'SpotOptions': { |
||||
# 'MaxPrice': 'string', |
||||
# 'SpotInstanceType': 'one-time'|'persistent', |
||||
# 'BlockDurationMinutes': 123, |
||||
# 'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate' |
||||
# } |
||||
# }, |
||||
} |
||||
) |
@ -0,0 +1,9 @@ |
||||
import logging |
||||
|
||||
logging.basicConfig(level=logging.INFO, |
||||
format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s') |
||||
|
||||
def getLogger(file): |
||||
LOGGER = logging.getLogger(file) |
||||
LOGGER.setLevel(logging.INFO) |
||||
return LOGGER |
@ -0,0 +1,103 @@ |
||||
import utils |
||||
import logger |
||||
import launch_template |
||||
LOGGER = logger.getLogger(__file__) |
||||
|
||||
|
||||
def create_launch_specification(config, region_number, instanceType): |
||||
return { |
||||
# Region irrelevant fields |
||||
'IamInstanceProfile': { |
||||
'Name': utils.IAM_INSTANCE_PROFILE |
||||
}, |
||||
'InstanceType': instanceType, |
||||
'UserData': utils.USER_DATA_BASE64, |
||||
# Region relevant fields |
||||
'SecurityGroups': [ |
||||
{ |
||||
# In certain scenarios, we have to use group id instead of group name |
||||
# https://github.com/boto/boto/issues/350#issuecomment-27359492 |
||||
'GroupId': config[region_number][utils.REGION_SECURITY_GROUP_ID] |
||||
} |
||||
], |
||||
'ImageId': config[region_number][utils.REGION_AMI], |
||||
'KeyName': config[region_number][utils.REGION_KEY], |
||||
'TagSpecifications': [ |
||||
{ |
||||
'ResourceType': 'instance', |
||||
'Tags': [ |
||||
{ |
||||
'Key': 'Name', |
||||
'Value': utils.get_node_name_tag(region_number) |
||||
} |
||||
] |
||||
} |
||||
], |
||||
# 'WeightedCapacity': 123.0, |
||||
# 'Placement': { |
||||
# # 'AvailabilityZone': get_one_availability_zone(ec2_client) |
||||
# } |
||||
} |
||||
|
||||
|
||||
def create_launch_specification_list(config, region_number, instance_type_list): |
||||
return list(map(lambda type: create_launch_specification(config, region_number, type), instance_type_list)) |
||||
|
||||
|
||||
def get_launch_template(config, region_number, instance_type): |
||||
return { |
||||
'LaunchTemplateSpecification': { |
||||
'LaunchTemplateName': launch_template.get_launch_template_name(config, region_number), |
||||
'Version': '1' |
||||
}, |
||||
'Overrides': [ |
||||
{ |
||||
'InstanceType': instance_type |
||||
} |
||||
] |
||||
} |
||||
|
||||
|
||||
def get_launch_template_list(config, region_number, instance_type_list): |
||||
return list(map(lambda type: get_launch_template(config, region_number, type), instance_type_list)) |
||||
|
||||
|
||||
def request_spot_fleet(config, ec2_client, region_number, number_of_instances, instance_type_list): |
||||
LOGGER.info("Requesting spot fleet") |
||||
LOGGER.info("Creating node_name_tag: %s" % |
||||
utils.get_node_name_tag(region_number)) |
||||
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet |
||||
response = ec2_client.request_spot_fleet( |
||||
# DryRun=True, |
||||
SpotFleetRequestConfig={ |
||||
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy |
||||
'AllocationStrategy': 'diversified', |
||||
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole', |
||||
'LaunchSpecifications': create_launch_specification_list(config, region_number, instance_type_list), |
||||
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price. |
||||
'TargetCapacity': number_of_instances, |
||||
'Type': 'maintain' |
||||
} |
||||
) |
||||
return response |
||||
|
||||
|
||||
def request_spot_fleet_with_on_demand(config, ec2_client, region_number, number_of_instances, number_of_on_demand, instance_type_list): |
||||
LOGGER.info("Requesting spot fleet") |
||||
LOGGER.info("Creating node_name_tag: %s" % |
||||
utils.get_node_name_tag(region_number)) |
||||
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet |
||||
response = ec2_client.request_spot_fleet( |
||||
# DryRun=True, |
||||
SpotFleetRequestConfig={ |
||||
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy |
||||
'AllocationStrategy': 'diversified', |
||||
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole', |
||||
'LaunchTemplateConfigs': get_launch_template_list(config, region_number, instance_type_list), |
||||
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price. |
||||
'TargetCapacity': number_of_instances, |
||||
'OnDemandTargetCapacity': number_of_on_demand, |
||||
'Type': 'maintain' |
||||
} |
||||
) |
||||
return response |
@ -0,0 +1,172 @@ |
||||
import boto3 |
||||
import datetime |
||||
import json |
||||
import sys |
||||
import time |
||||
import base64 |
||||
|
||||
|
||||
REGION_NAME = 'region_name' |
||||
REGION_KEY = 'region_key' |
||||
REGION_SECURITY_GROUP = 'region_security_group' |
||||
REGION_SECURITY_GROUP_ID = 'region_security_group_id' |
||||
REGION_HUMAN_NAME = 'region_human_name' |
||||
INSTANCE_TYPE = 't2.micro' |
||||
REGION_AMI = 'region_ami' |
||||
IAM_INSTANCE_PROFILE = 'BenchMarkCodeDeployInstanceProfile' |
||||
time_stamp = time.time() |
||||
CURRENT_SESSION = datetime.datetime.fromtimestamp( |
||||
time_stamp).strftime('%H-%M-%S-%Y-%m-%d') |
||||
NODE_NAME_SUFFIX = "NODE-" + CURRENT_SESSION |
||||
|
||||
def get_node_name_tag(region_number): |
||||
return region_number + "-" + NODE_NAME_SUFFIX |
||||
|
||||
with open("user-data.sh", "r") as userdata_file: |
||||
USER_DATA = userdata_file.read() |
||||
|
||||
# UserData must be base64 encoded for spot instances. |
||||
USER_DATA_BASE64 = base64.b64encode(USER_DATA) |
||||
|
||||
def read_region_config(region_config='configuration.txt'): |
||||
config = {} |
||||
with open(region_config, 'r') as f: |
||||
for myline in f: |
||||
mylist = [item.strip() for item in myline.strip().split(',')] |
||||
region_num = mylist[0] |
||||
config[region_num] = {} |
||||
config[region_num][REGION_NAME] = mylist[1] |
||||
config[region_num][REGION_KEY] = mylist[2] |
||||
config[region_num][REGION_SECURITY_GROUP] = mylist[3] |
||||
config[region_num][REGION_HUMAN_NAME] = mylist[4] |
||||
config[region_num][REGION_AMI] = mylist[5] |
||||
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6] |
||||
return config |
||||
|
||||
def get_ip_list(response): |
||||
if response.get('Instances', None): |
||||
return [instance.get('PublicIpAddress', None) for instance in response['Instances']] |
||||
else: |
||||
return [] |
||||
|
||||
def create_ec2_client(region_number, region_config): |
||||
config = read_region_config(region_config) |
||||
region_name = config[region_number][REGION_NAME] |
||||
session = boto3.Session(region_name=region_name) |
||||
return session.client('ec2'), session |
||||
|
||||
def collect_public_ips_from_ec2_client(ec2_client, node_name_tag): |
||||
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] |
||||
response = ec2_client.describe_instances(Filters=filters) |
||||
ip_list = [] |
||||
if response.get('Reservations'): |
||||
for reservation in response[u'Reservations']: |
||||
ip_list.extend(instance['PublicIpAddress'] for instance in reservation['Instances'] if instance.get('PublicIpAddress')) |
||||
return ip_list |
||||
|
||||
def collect_public_ips(region_number, node_name_tag, region_config): |
||||
ec2_client, _ = create_ec2_client(region_number, region_config) |
||||
ip_list = collect_public_ips_from_ec2_client(ec2_client, node_name_tag) |
||||
return ip_list |
||||
|
||||
def get_application(codedeploy, application_name): |
||||
response = codedeploy.list_applications() |
||||
if application_name in response['applications']: |
||||
return response |
||||
else: |
||||
response = codedeploy.create_application( |
||||
applicationName=application_name, |
||||
computePlatform='Server' |
||||
) |
||||
return response |
||||
|
||||
def create_deployment_group(codedeploy, region_number,application_name, deployment_group_name, node_name_tag): |
||||
response = codedeploy.list_deployment_groups(applicationName=application_name) |
||||
if response.get('deploymentGroups') and (deployment_group_name in response['deploymentGroups']): |
||||
return None |
||||
else: |
||||
response = codedeploy.create_deployment_group( |
||||
applicationName=application_name, |
||||
deploymentGroupName=deployment_group_name, |
||||
deploymentConfigName='CodeDeployDefault.AllAtOnce', |
||||
serviceRoleArn='arn:aws:iam::656503231766:role/BenchMarkCodeDeployServiceRole', |
||||
deploymentStyle={ |
||||
'deploymentType': 'IN_PLACE', |
||||
'deploymentOption': 'WITHOUT_TRAFFIC_CONTROL' |
||||
}, |
||||
ec2TagFilters = [ |
||||
{ |
||||
'Key': 'Name', |
||||
'Value': node_name_tag, |
||||
'Type': 'KEY_AND_VALUE' |
||||
} |
||||
] |
||||
) |
||||
return response['deploymentGroupId'] |
||||
|
||||
def generate_distribution_config2(region_number, node_name_tag, region_config, |
||||
shard_number, client_number, distribution_config): |
||||
ip_list = collect_public_ips(region_number, node_name_tag, region_config) |
||||
generate_distribution_config(shard_number, client_number, ip_list, distribution_config) |
||||
|
||||
def generate_distribution_config3(shard_number, client_number, ip_list_file, distribution_config): |
||||
with open(ip_list_file, "r") as fin: |
||||
lines = fin.readlines() |
||||
ip_list = [line.strip() for line in lines] |
||||
generate_distribution_config(shard_number, client_number, ip_list, distribution_config) |
||||
|
||||
def generate_distribution_config(shard_number, client_number, ip_list, distribution_config): |
||||
if len(ip_list) < shard_number * 2 + client_number + 1: |
||||
print("Not enough nodes to generate a config file") |
||||
return False |
||||
|
||||
# Create ip for clients. |
||||
client_id, leader_id, validator_id, commander_id = 0, 0, 0, 0 |
||||
validator_number = len(ip_list) - client_number - shard_number - 1 |
||||
with open(distribution_config, "w") as fout: |
||||
for i in range(len(ip_list)): |
||||
ip, node_name_tag = ip_list[i].split(" ") |
||||
if commander_id < 1: |
||||
fout.write("%s 9000 commander %d %s\n" % (ip, commander_id % shard_number, node_name_tag)) |
||||
commander_id = commander_id + 1 |
||||
elif validator_id < validator_number: |
||||
fout.write("%s 9000 validator %d %s\n" % (ip, validator_id % shard_number, node_name_tag)) |
||||
validator_id = validator_id + 1 |
||||
elif leader_id < shard_number: |
||||
fout.write("%s 9000 leader %d %s\n" % (ip, leader_id, node_name_tag)) |
||||
leader_id = leader_id + 1 |
||||
else: |
||||
fout.write("%s 9000 client %d %s\n" % (ip, client_id % shard_number, node_name_tag)) |
||||
client_id = client_id + 1 |
||||
|
||||
def get_availability_zones(ec2_client): |
||||
response = ec2_client.describe_availability_zones() |
||||
all_zones = [] |
||||
if response.get('AvailabilityZones', None): |
||||
all_zones = [info['ZoneName'] for info in response.get('AvailabilityZones') if info['State'] == 'available'] |
||||
return all_zones |
||||
|
||||
def get_one_availability_zone(ec2_client): |
||||
all_zones = get_availability_zones(ec2_client) |
||||
if len(all_zones) > 0: |
||||
return all_zones[0] |
||||
else: |
||||
return None |
||||
|
||||
def get_instance_ids2(ec2_client, node_name_tag): |
||||
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}] |
||||
return get_instance_ids(ec2_client.describe_instances(Filters=filters)) |
||||
|
||||
# Get instance_ids from describe_instances_response. |
||||
def get_instance_ids(describe_instances_response): |
||||
instance_ids = [] |
||||
if describe_instances_response["Reservations"]: |
||||
for reservation in describe_instances_response["Reservations"]: |
||||
instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId")) |
||||
return instance_ids |
||||
|
||||
# used for testing only. |
||||
# if __name__ == "__main__": |
||||
# ip_list = collect_public_ips('4', "4-NODE-23-36-01-2018-07-05", "configuration.txt") |
||||
# print ip_list |
||||
# generate_distribution_config(2, 1, ip_list, "config_test.txt") |
@ -0,0 +1,31 @@ |
||||
import unittest |
||||
|
||||
from utils import generate_distribution_config |
||||
|
||||
class TestCreateAndDeploy(unittest.TestCase): |
||||
|
||||
def test_generate_config_file(self): |
||||
ips = ["102.000.000.1", "102.000.000.2", "102.000.000.3", "102.000.000.4", "102.000.000.5", "102.000.000.6"] |
||||
generate_distribution_config(2, 2, ips, "config_test.txt") |
||||
with open("config_test.txt", "r") as fin: |
||||
lines = fin.readlines() |
||||
collection = {} |
||||
collection['ip'] = [] |
||||
collection['client'] = {} |
||||
leader_count, validator_count, client_count = 0, 0, 0 |
||||
for line in lines: |
||||
strs = line.split(" ") |
||||
assert(not strs[0] in collection['ip']) |
||||
collection['ip'].append(strs[0]) |
||||
if strs[2] == "client": |
||||
client_count = client_count + 1 |
||||
elif strs[2] == "leader": |
||||
leader_count = leader_count + 1 |
||||
elif strs[2] == "validator": |
||||
validator_count = validator_count + 1 |
||||
assert(validator_count == 2) |
||||
assert(leader_count == 2) |
||||
assert(client_count == 2) |
||||
|
||||
if __name__ == '__main__': |
||||
unittest.main() |
@ -0,0 +1,34 @@ |
||||
#!/bin/bash -x |
||||
echo "Run Instances starts" >> tmplog |
||||
|
||||
echo "Update systcl" >> tmplog |
||||
sudo sysctl net.core.somaxconn=1024 |
||||
sudo sysctl net.core.netdev_max_backlog=65536; |
||||
sudo sysctl net.ipv4.tcp_tw_reuse=1; |
||||
sudo sysctl -w net.ipv4.tcp_rmem='65536 873800 1534217728'; |
||||
sudo sysctl -w net.ipv4.tcp_wmem='65536 873800 1534217728'; |
||||
sudo sysctl -w net.ipv4.tcp_mem='65536 873800 1534217728'; |
||||
|
||||
echo "Setup path" >> tmplog |
||||
./kill_node.sh |
||||
MyHOME=/home/ec2-user |
||||
source ~/.bash_profile |
||||
export GOROOT=/usr/lib/golang |
||||
export GOPATH=$MyHOME/projects |
||||
export PATH=$PATH:$GOROOT/bin |
||||
|
||||
echo "Get ip" >> tmplog |
||||
# Get my IP |
||||
wget http://169.254.169.254/latest/meta-data/public-ipv4 |
||||
ip=$(head -n 1 public-ipv4) |
||||
echo "Current IP is >>>" |
||||
echo $ip |
||||
echo ">>>>" |
||||
|
||||
echo "Run soldier" >> tmplog |
||||
# Run soldier |
||||
cd $GOPATH/src/harmony-benchmark/bin/ |
||||
node_port=9000 |
||||
./soldier -ip $ip -port $node_port > soldier_log 2>&1 & |
||||
|
||||
echo "Run Instances done" >> tmplog |
@ -1,4 +0,0 @@ |
||||
#!/bin/bash -x |
||||
echo "Run Instances" >> tmplog |
||||
cd /home/ec2-user/projects/src/harmony-benchmark |
||||
./deploy_one_instance.sh global_nodes.txt |
@ -1,38 +0,0 @@ |
||||
#!/bin/bash -x |
||||
##The commented suffix is for linux |
||||
##Reference: https://github.com/Zilliqa/Zilliqa/blob/master/tests/Node/test_node_simple.sh |
||||
sudo sysctl net.core.somaxconn=1024 |
||||
sudo sysctl net.core.netdev_max_backlog=65536; |
||||
sudo sysctl net.ipv4.tcp_tw_reuse=1; |
||||
sudo sysctl -w net.ipv4.tcp_rmem='65536 873800 1534217728'; |
||||
sudo sysctl -w net.ipv4.tcp_wmem='65536 873800 1534217728'; |
||||
sudo sysctl -w net.ipv4.tcp_mem='65536 873800 1534217728'; |
||||
|
||||
./kill_node.sh |
||||
MyHOME=/home/ec2-user |
||||
source ~/.bash_profile |
||||
export GOROOT=/usr/lib/golang |
||||
export GOPATH=$MyHOME/projects |
||||
export PATH=$PATH:$GOROOT/bin |
||||
wget http://169.254.169.254/latest/meta-data/public-ipv4 #Calling for public IPv4 |
||||
current_ip=$(head -n 1 public-ipv4) |
||||
echo "Current IP is >>>" |
||||
echo $current_ip |
||||
echo ">>>>" |
||||
python aws-scripts/preprocess_peerlist.py |
||||
FILE='isTransaction.txt' |
||||
config=$1 |
||||
|
||||
t=`date +"%Y%m%d-%H%M%S"` |
||||
log_folder="logs/log-$t" |
||||
|
||||
if [ ! -d $log_folder ] |
||||
then |
||||
mkdir -p $log_folder |
||||
fi |
||||
|
||||
if [ -f $FILE ]; then |
||||
go run ./client/txgen/main.go -config_file $config -log_folder $log_folder& |
||||
else |
||||
go run ./benchmark.go -ip $current_ip -config_file $config -log_folder $log_folder& |
||||
fi |
@ -0,0 +1,18 @@ |
||||
package node |
||||
|
||||
import ( |
||||
"harmony-benchmark/blockchain" |
||||
"strconv" |
||||
) |
||||
|
||||
// AddTestingAddresses creates in genesis block numAddress transactions which assign 1000 token to each address in [0 - numAddress)
|
||||
// This is used by client code.
|
||||
// TODO: Consider to remove it later when moving to production.
|
||||
func (node *Node) AddTestingAddresses(numAddress int) { |
||||
txs := make([]*blockchain.Transaction, numAddress) |
||||
for i := range txs { |
||||
txs[i] = blockchain.NewCoinbaseTX(strconv.Itoa(i), "", node.Consensus.ShardID) |
||||
} |
||||
node.blockchain.Blocks[0].Transactions = append(node.blockchain.Blocks[0].Transactions, txs...) |
||||
node.UtxoPool.Update(txs) |
||||
} |
@ -0,0 +1,10 @@ |
||||
cd ~/projects/src/harmony-benchmark/ |
||||
# Compile |
||||
sudo go build -o bin/commander aws-experiment-launch/experiment/commander/main.go |
||||
cd bin |
||||
sudo cp /tmp/distribution_config.txt . |
||||
sudo cp /tmp/commander_info.txt . |
||||
# Take ip address |
||||
IP=`head -n 1 commander_info.txt` |
||||
# Run commander |
||||
sudo ./commander -ip $IP -port 9000 -config_file distribution_config.txt |
Loading…
Reference in new issue