remove new-pipeline dir

pull/43/head
Minh Doan 6 years ago
parent 561009dfee
commit fc3397ae16
  1. 2
      aws-experiment-launch/commander_prepare.py
  2. 23
      aws-experiment-launch/new-pipeline/california-key-benchmark.pem
  3. 35
      aws-experiment-launch/new-pipeline/collect_public_ips.py
  4. 62
      aws-experiment-launch/new-pipeline/commander_prepare.py
  5. 8
      aws-experiment-launch/new-pipeline/configuration-git.txt
  6. 8
      aws-experiment-launch/new-pipeline/configuration.txt
  7. 28
      aws-experiment-launch/new-pipeline/create_deploy_soldiers.sh
  8. 193
      aws-experiment-launch/new-pipeline/create_solider_instances.py
  9. 37
      aws-experiment-launch/new-pipeline/generate_distribution_config.py
  10. 105
      aws-experiment-launch/new-pipeline/terminate_instances.py
  11. 15
      aws-experiment-launch/new-pipeline/upload_binaries.py
  12. 10
      aws-experiment-launch/new-pipeline/upload_config.py
  13. 9
      aws-experiment-launch/new-pipeline/upload_s3.py
  14. 11
      aws-experiment-launch/new-pipeline/userdata-commander.sh
  15. 23
      aws-experiment-launch/new-pipeline/userdata-soldier.sh
  16. 0
      aws-experiment-launch/new-pipeline/utils/__init__.py
  17. 8
      aws-experiment-launch/new-pipeline/utils/configuration.txt
  18. 46
      aws-experiment-launch/new-pipeline/utils/launch_template.py
  19. 9
      aws-experiment-launch/new-pipeline/utils/logger.py
  20. 119
      aws-experiment-launch/new-pipeline/utils/spot_fleet.py
  21. 210
      aws-experiment-launch/new-pipeline/utils/utils.py
  22. 31
      aws-experiment-launch/new-pipeline/utils/utils_test.py
  23. 23
      aws-experiment-launch/new-pipeline/virginia-key-benchmark.pem

@ -54,7 +54,7 @@ if __name__ == "__main__":
sys.exit(1)
with open(args.commander_logging, "w") as fout:
fout.write("ssh -i ../keys/%s ec2-user@%s\n" % (PEMS[commander_region - 1], commander_address))
fout.write("ssh -i ./keys/%s ec2-user@%s\n" % (PEMS[commander_region - 1], commander_address))
st = os.stat(args.commander_logging)
os.chmod(args.commander_logging, st.st_mode | stat.S_IEXEC)
LOGGER.info("Generated %s" % args.commander_logging)

@ -1,23 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAo0qyKcemMIx2KEbidscUCP8KH8v43wKiZuBJbNxFaWelbszmIuBi0/w5Y6Ys
Lv80/A6HjnM2TnGCUsPef6wji5ApkbwIOuWMYq56UKeAdBG44EA5oLSuhB7FAMgGx/iKh9Dempzf
jnhK9dZ/DBZuADwzWzEC2+3flQ/ruFK97FOekHZa6oVOHS6nENc3sb9TBN5vTPnchzxbzntVCzuf
1UyZxLOTGJygbKEqMeVZTgWbyyWzAvS71yizQhZlpMyUU2EAcrFPeB7Yzc3Lw9HgIJooTIbWB8sZ
xSZrPzbuS8ZH6k6uTSjs80Mn1wClo+tjfaIvrX5z1da3TDJneIa3FwIDAQABAoIBABUTkBl6boX3
q0yZkaN5IKnkmV7vSeknAuU6b9/AMqlfOmgpxE6eHcN028dz7GIIiMM78N0/G7RWTFb4dyCNjGz4
J5Vq6rkHZe5azOjaJedVXkJ7p7SJfbkJ662lI9LtzpunJwRX2TcfabV/gRiDUKFxI8PSeKcAdgpc
arV44JC/KGJB3FKBuyj/5p+y5eYdRKQq/2WFoKiumJMC1I6AUtso2Rr5cLjDaFoM80XON9QtNnnG
eQ7mM7RzCpCLzqRcAJ224a1f75SaLsHtZTkG0kGF+dzNqf9zmaps9Q6sPJk82rV8dtJMjj/BDoqT
JoEPcoOrN+nsnHFYBqCrE5wftwECgYEA+zBxiqI7/by48SdIkCbwDA8MI1b4F570j/Kq4hPdugOC
a1UIb9vTK4favUx3x1ZvrwKiiUww+mUl/+YGQiohrP3mfbQqk2c5Sai7+oiSYAZbvGmjFbbW0fnw
kS3PdhG1Cu7vzGHP21OoZL+cUuc2xqHgu74RbTrVPPKJpnwmt4ECgYEApmtMu5IohK40iL2owP5Z
5FkgmhX+rUMYCFZW5KgcJ4HXnESteiX+mJdCxNHFpf1dX2wWu8FCHkZBzMNm7dEUvH062r48Qdhf
FkTAQJa70JfkMaKvP/b2XXY2FE5v3+0IX2GgTZq+ljLy6TwUqrzEe9ZYuR0T6yhiHEzWHFC9epcC
gYAIeG+7vxDK06xg5XQ6+DUsEu3T20DuevvARuygRw85EJk7SVjJfN44H5Kuw39eh3moqywT+S/F
odeQM4+Od6E/FijJoGO3KMnzw+7i2ewltNEvH1jSiKHh3s3P932Be8NhJQGNINdDjCSnkRaSbR2Z
wSe07QnlZxB9x6gApzsfAQKBgHtCzDG9vs8PLI//UeeoPLJ6JUCbe+ee40UBlhiEP14vuJ/g6lCn
pZwlpOmcqfUY5TjRGUF1keBUj1X3gNwyWVaGtrnf30ex2DPpNjBgtLl6VyVAZudMpwtwi/ucYRaN
PP9QkugH7pPXJr4DpxcS+8DxcQOi+ubZIVnuu0N7tev/AoGBAO8wPte0VjZX3NAg/X+7I568iS/7
mZ0X2ReWgNyilvgxcf9nCOr6KMze/sUqDV81BxfnciuyEDWWP2QTLEKB+mJWJahfY6Qy7mcI5TnF
UYSXZ7b2WU2bk6QF4TkabWMO73KfFry2W3g0YhNLjaKj2zPcPp6FbvhNy/2X6YqazZOA
-----END RSA PRIVATE KEY-----

@ -1,35 +0,0 @@
import argparse
import os
import random
import sys
from utils import utils
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to collect public ips')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt',
help='the file contains node_name_tag and region number of created instances.')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--file_output', type=str,
dest='file_output', default='raw_ip.txt')
args = parser.parse_args()
if not args.instance_output or not os.path.isfile(args.instance_output):
print "%s or %s are not existed" % (args.file_output, args.instance_output)
sys.exit(1)
if args.instance_output:
with open(args.instance_output, "r") as fin, open(args.file_output, "w") as fout:
total_ips = []
node_name_tag_list = []
for line in fin.readlines():
items = line.split(" ")
region_number = items[1].strip()
node_name_tag = items[0].strip()
ip_list = utils.collect_public_ips(region_number, node_name_tag, args.region_config)
total_ips.extend([(ip, node_name_tag) for ip in ip_list])
random.shuffle(total_ips)
for tuple in total_ips:
fout.write(tuple[0] + " " + tuple[1] + "\n")
print "Done collecting public ips %s" % args.file_output

@ -1,62 +0,0 @@
import argparse
import logging
import os
import stat
import sys
from utils import utils
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
PEMS = [
"virginia-key-benchmark.pem",
"ohio-key-benchmark.pem",
"california-key-benchmark.pem",
"oregon-key-benchmark.pem",
"tokyo-key-benchmark.pem",
"singapore-key-benchmark.pem",
"frankfurt-key-benchmark.pem",
"ireland-key-benchmark.pem",
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config')
parser.add_argument('--distribution_config', type=str,
dest='distribution_config', default='distribution_config.txt')
parser.add_argument('--commander_logging', type=str,
dest='commander_logging', default='commander_logging.sh')
args = parser.parse_args()
if not os.path.exists(args.distribution_config):
sys.exit(1)
with open(args.distribution_config, "r") as fin:
lines = fin.readlines()
commander_address = None
commander_region = None
commander_output = None
with open(args.distribution_config, "w") as fout:
for line in lines:
if "commander" in line:
items = [item.strip() for item in line.split(" ")]
commander_address = items[0]
commander_region = int(items[4][0])
commander_output = "\n".join(items)
else:
fout.write(line.strip() + "\n")
LOGGER.info("Generated %s" % args.distribution_config)
if not commander_address or not commander_region:
LOGGER.info("Failed to extract commander address and commander region.")
sys.exit(1)
with open(args.commander_logging, "w") as fout:
fout.write("ssh -i ../keys/%s ec2-user@%s\n" % (PEMS[commander_region - 1], commander_address))
st = os.stat(args.commander_logging)
os.chmod(args.commander_logging, st.st_mode | stat.S_IEXEC)
LOGGER.info("Generated %s" % args.commander_logging)
LOGGER.info("DONE.")

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-97785bed,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-f63b1193,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-824c4ee2,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-f2d3638a,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-ceafcba8,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-68097514,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-5652ce39,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-d834aba1,sg-0aa8954acb79fdb58

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58

@ -1,28 +0,0 @@
if [ $# -lt 3 ]; then
echo "Please provide # of instances, # of shards, # of clients"
exit 1
fi
INSTANCE_NUM=$1
SHARD_NUM=$2
CLIENT_NUM=$3
SLEEP_TIME=10
echo "Creating $INSTANCE_NUM instances at 8 regions"
python create_solider_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM
echo "Sleep for $SLEEP_TIME seconds"
sleep $SLEEP_TIME
echo "Rung collecint raw ips"
python collect_public_ips.py --instance_output instance_output.txt
# sleep 10
echo "Generate distribution_config"
python generate_distribution_config.py --ip_list_file raw_ip.txt --shard_number $SHARD_NUM --client_number $CLIENT_NUM
echo "Run commander prepare"
python commander_prepare.py
aws s3 cp distribution_config.txt s3://unique-bucket-bin/distribution_config.txt --acl public-read-write

@ -1,193 +0,0 @@
import argparse
import base64
import boto3
import datetime
import json
import sys
import threading
import time
import enum
#TODO REMOVE UTILS
from utils import utils, spot_fleet, logger
LOGGER = logger.getLogger(__file__)
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
class InstanceResource(enum.Enum):
ON_DEMAND = 'ondemand'
SPOT_INSTANCE = 'spot'
SPOT_FLEET = 'fleet'
def __str__(self):
return self.value
def run_one_region_on_demand_instances(config, region_number, number_of_instances, tag):
ec2_client = utils.create_client(config, region_number)
node_name_tag = create_instances(
config, ec2_client, region_number, number_of_instances, tag)
LOGGER.info("Created %s in region %s" % (node_name_tag, region_number))
return node_name_tag, ec2_client
def create_instances(config, ec2_client, region_number, number_of_instances, tag):
node_name_tag = utils.get_node_name_tag2(region_number, tag)
LOGGER.info("Creating node_name_tag: %s" % node_name_tag)
available_zone = utils.get_one_availability_zone(ec2_client)
LOGGER.info("Looking at zone %s to create instances." % available_zone)
time.sleep(2)
ec2_client.run_instances(
MinCount=number_of_instances,
MaxCount=number_of_instances,
ImageId=config[region_number][utils.REGION_AMI],
Placement={
'AvailabilityZone': available_zone,
},
SecurityGroups=[config[region_number][utils.REGION_SECURITY_GROUP]],
IamInstanceProfile={
'Name': utils.IAM_INSTANCE_PROFILE
},
KeyName=config[region_number][utils.REGION_KEY],
UserData=utils.USER_DATA,
InstanceType=utils.INSTANCE_TYPE,
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': node_name_tag
},
]
},
],
)
retry_count = 10
while retry_count > 0:
try:
time.sleep(20)
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag)
LOGGER.info("Waiting for all %d instances in region %s with node_name_tag %s to be in RUNNING" % (
len(instance_ids), region_number, node_name_tag))
break
except:
retry_count -= 1
LOGGER.info("Failed to get instance ids. Retry again.")
retry_count = 10
while retry_count > 0:
try:
time.sleep(20)
waiter = ec2_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
break
except:
retry_count -= 1
LOGGER.info("Failed to wait.")
retry_count = 10
while retry_count > 0:
time.sleep(10)
LOGGER.info("Waiting ...")
ip_list = utils.collect_public_ips_from_ec2_client(
ec2_client, node_name_tag)
if len(ip_list) == number_of_instances:
LOGGER.info("Created %d instances" % number_of_instances)
return node_name_tag
retry_count -= 10
LOGGER.info("Can not create %d instances" % number_of_instances)
return None
LOCK_FOR_RUN_ONE_REGION = threading.Lock()
def run_for_one_region_on_demand(config, region_number, number_of_instances, fout, fout2):
tag = 0
number_of_instances = int(number_of_instances)
while number_of_instances > 0:
number_of_creation = min(utils.MAX_INSTANCES_FOR_DEPLOYMENT, number_of_instances)
node_name_tag, ec2_client = run_one_region_on_demand_instances(
config, region_number, number_of_creation, tag)
if node_name_tag:
LOGGER.info("Managed to create instances for region %s with name_name_tag %s" %
(region_number, node_name_tag))
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag)
LOCK_FOR_RUN_ONE_REGION.acquire()
try:
fout.write("%s %s\n" % (node_name_tag, region_number))
for instance_id in instance_ids:
fout2.write(instance_id + " " + node_name_tag + " " + region_number +
" " + config[region_number][utils.REGION_NAME] + "\n")
finally:
LOCK_FOR_RUN_ONE_REGION.release()
else:
LOGGER.info("Failed to create instances for region %s" % region_number)
number_of_instances -= number_of_creation
tag += 1
def read_region_config(region_config='configuration.txt'):
global CONFIG
config = {}
with open(region_config, 'r') as f:
for myline in f:
mylist = [item.strip() for item in myline.strip().split(',')]
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
CONFIG = config
return config
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='This script helps you start instances across multiple regions')
parser.add_argument('--regions', type=str, dest='regions',
default='3', help="Supply a csv list of all regions")
parser.add_argument('--instances', type=str, dest='num_instance_list',
default='1', help='number of instances in respective of region')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt', help='the file to append or write')
parser.add_argument('--instance_ids_output', type=str, dest='instance_ids_output',
default='instance_ids_output.txt', help='the file to append or write')
parser.add_argument('--instance_resource', dest='instance_resource', type=InstanceResource,
default=InstanceResource.ON_DEMAND, choices=list(InstanceResource))
parser.add_argument('--append', dest='append', type=bool, default=False,
help='append to the current instance_output')
args = parser.parse_args()
config = read_region_config(args.region_config)
region_list = args.regions.split(',')
num_instance_list = args.num_instance_list.split(',')
instance_resource = args.instance_resource
assert len(region_list) == len(num_instance_list), "number of regions: %d != number of instances per region: %d" % (
len(region_list), len(num_instance_list))
write_mode = "a" if args.append else "w"
with open(args.instance_output, write_mode) as fout, open(args.instance_ids_output, write_mode) as fout2:
thread_pool = []
for i in range(len(region_list)):
region_number = region_list[i]
number_of_instances = num_instance_list[i]
if instance_resource == InstanceResource.ON_DEMAND:
t = threading.Thread(target=run_for_one_region_on_demand, args=(
config, region_number, number_of_instances, fout, fout2))
elif instance_resource == InstanceResource.SPOT_FLEET:
t = threading.Thread(target=spot_fleet.run_one_region, args=(
region_number, number_of_instances, fout, fout2))
LOGGER.info("creating thread for region %s" % region_number)
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")

@ -1,37 +0,0 @@
import argparse
import logging
import sys
from utils import utils
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config')
parser.add_argument('--ip_list_file', type=str, dest='ip_list_file',
default='raw_ip.txt', help="the file containing available raw ips")
# If the ip_list_file is None we need to use the region, node_name_tag and region_config to collect raw_ip
parser.add_argument('--region', type=str, dest='region_number',
default="4", help="region number")
parser.add_argument('--node_name_tag', type=str,
dest='node_name_tag', default='4-NODE-23-36-01-2018-07-05')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--shard_number', type=int, dest='shard_number', default=1)
parser.add_argument('--client_number', type=int, dest='client_number', default=1)
parser.add_argument('--distribution_config', type=str,
dest='distribution_config', default='distribution_config.txt')
args = parser.parse_args()
if args.ip_list_file == None:
utils.generate_distribution_config2(
args.region_number, args.node_name_tag, args.region_config,
args.shard_number, args.client_number, args.distribution_config)
else:
utils.generate_distribution_config3(args.shard_number, args.client_number,
args.ip_list_file, args.distribution_config)
LOGGER.info("Done writing %s" % args.distribution_config)

@ -1,105 +0,0 @@
import argparse
import logging
import os
import random
import sys
import threading
import boto3
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
def read_configuration_file(filename):
config = {}
with open(filename, 'r') as f:
for myline in f:
mylist = myline.strip().split(',')
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
return config
def get_instance_ids(describe_instances_response):
instance_ids = []
if describe_instances_response["Reservations"]:
for reservation in describe_instances_response["Reservations"]:
instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId"))
return instance_ids
def get_instance_ids2(ec2_client, node_name_tag):
time.sleep(5)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
return get_instance_ids(ec2_client.describe_instances(Filters=filters))
def create_ec2_client(region_number, region_config):
config = read_configuration_file(region_config)
region_name = config[region_number][REGION_NAME]
session = boto3.Session(region_name=region_name)
return session.client('ec2'), session
def terminate_instances_by_region(region_number, region_config, node_name_tag):
ec2_client, _ = create_ec2_client(region_number, region_config)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
instance_ids = get_instance_ids(ec2_client.describe_instances(Filters=filters))
if instance_ids:
ec2_client.terminate_instances(InstanceIds=instance_ids)
LOGGER.info("waiting until instances with tag %s died." % node_name_tag)
waiter = ec2_client.get_waiter('instance_terminated')
waiter.wait(InstanceIds=instance_ids)
LOGGER.info("instances with node name tag %s terminated." % node_name_tag)
else:
pass
LOGGER.warn("there is no instances to terminate")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to collect public ips')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt',
help='the file contains node_name_tag and region number of created instances.')
parser.add_argument('--node_name_tag', type=str, dest='node_name_tag')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
args = parser.parse_args()
if args.node_name_tag:
node_name_tag_items = args.node_name_tag.split(",")
region_number_items = [item[:1] for item in node_name_tag_items]
thread_pool = []
for i in range(len(region_number_items)):
region_number = region_number_items[i]
node_name_tag = node_name_tag_items[i]
t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag))
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")
elif args.instance_output:
with open(args.instance_output, "r") as fin:
thread_pool = []
for line in fin.readlines():
items = line.split(" ")
region_number = items[1].strip()
node_name_tag = items[0].strip()
t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag))
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")

@ -1,15 +0,0 @@
import boto3
import os
GOHOME ='/Users/alok/Documents/goworkspace/'
s3 = boto3.client('s3')
bucket_name = 'unique-bucket-bin'
#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
response = s3.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
print("Bucket List: %s" % buckets)
dirname = GOHOME + 'src/harmony-benchmark/bin/'
for myfile in os.listdir(dirname):
with open('distribution_config.txt','r') as f:
f = open(os.path.join(dirname,myfile))
response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile)
print(response)

@ -1,10 +0,0 @@
import boto3
import os
#from boto3.session import Session
s3 = boto3.client('s3')
bucket_name = 'unique-bucket-bin'
#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
myfile = 'distribution_config.txt'
with open('distribution_config.txt','r') as f:
response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile)
print(response)

@ -1,9 +0,0 @@
import boto3
s3 = boto3.client('s3')
bucket_name = 'first-try'
s3.create_bucket(Bucket=bucket_name,ACL='public-read-write')
response = s3.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
print("Bucket List: %s" % buckets)
filename='myfirst.txt'
s3.upload_file(filename, bucket_name, filename)

@ -1,11 +0,0 @@
#!/bin/bash -x
REGION=$(curl 169.254.169.254/latest/meta-data/placement/availability-zone/ | sed 's/[a-z]$//')
#yum update -y #This breaking codedeploy right now
yum install ruby wget -y
cd /home/ec2-user
touch yum-not-updated.txt
wget https://aws-codedeploy-$REGION.s3.amazonaws.com/latest/install
chmod +x ./install
./install auto
mkdir projects
mkdir projects/src

@ -1,23 +0,0 @@
#!/bin/bash
yum install ruby -y
cd /home/ec2-user/
curl http://unique-bucket-bin.s3.amazonaws.com/txgen -o txgen
curl http://unique-bucket-bin.s3.amazonaws.com/soldier -o soldier
curl http://unique-bucket-bin.s3.amazonaws.com/commander -o commander
curl http://unique-bucket-bin.s3.amazonaws.com/benchmark -o benchmark
curl http://unique-bucket-bin.s3.amazonaws.com/kill_node.sh -o kill_node.sh
chmod +x ./soldier
chmod +x ./txgen
chmod +x ./benchmark
chmod +x ./commander
chmod +x ./kill_node.sh
# Get My IP
ip=`curl http://169.254.169.254/latest/meta-data/public-ipv4`
SOLDIER_PORT=9000
# Kill existing soldier
fuser -k -n tcp $SOLDIER_PORT
# Run soldier
./soldier -ip $ip -port $SOLDIER_PORT > soldier_log 2>&1 &

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58

@ -1,46 +0,0 @@
import utils
def get_launch_template_name(region_number):
return 'benchmark-' + utils.CONFIG[region_number][utils.REGION_NAME]
def create(ec2_client, region_number):
return ec2_client.create_launch_template(
# DryRun=True,
LaunchTemplateName=get_launch_template_name(region_number),
LaunchTemplateData={
'IamInstanceProfile': {
'Name': utils.IAM_INSTANCE_PROFILE
},
'ImageId': utils.CONFIG[region_number][utils.REGION_AMI],
# 'InstanceType': instance_type,
'KeyName': utils.CONFIG[region_number][utils.REGION_KEY],
'UserData': utils.USER_DATA_BASE64,
'SecurityGroupIds': [
utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID]
],
# 'InstanceInitiatedShutdownBehavior': 'stop',
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'LaunchTemplate',
'Value': 'Yes'
}
]
}
],
# 'InstanceMarketOptions': {
# 'MarketType': 'spot',
# 'SpotOptions': {
# 'MaxPrice': 'string',
# 'SpotInstanceType': 'one-time'|'persistent',
# 'BlockDurationMinutes': 123,
# 'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate'
# }
# },
}
)

@ -1,9 +0,0 @@
import logging
logging.basicConfig(level=logging.INFO,
format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
def getLogger(file):
LOGGER = logging.getLogger(file)
LOGGER.setLevel(logging.INFO)
return LOGGER

@ -1,119 +0,0 @@
import utils
import logger
import launch_template
LOGGER = logger.getLogger(__file__)
def create_launch_specification(region_number, instanceType):
return {
# Region irrelevant fields
'IamInstanceProfile': {
'Name': utils.IAM_INSTANCE_PROFILE
},
'InstanceType': instanceType,
'UserData': utils.USER_DATA_BASE64,
# Region relevant fields
'SecurityGroups': [
{
# In certain scenarios, we have to use group id instead of group name
# https://github.com/boto/boto/issues/350#issuecomment-27359492
'GroupId': utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID]
}
],
'ImageId': utils.CONFIG[region_number][utils.REGION_AMI],
'KeyName': utils.CONFIG[region_number][utils.REGION_KEY],
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': utils.get_node_name_tag(region_number)
}
]
}
],
# 'WeightedCapacity': 123.0,
# 'Placement': {
# # 'AvailabilityZone': get_one_availability_zone(ec2_client)
# }
}
def create_launch_specification_list(region_number, instance_type_list):
return list(map(lambda type: create_launch_specification(region_number, type), instance_type_list))
def get_launch_template(region_number, instance_type):
return {
'LaunchTemplateSpecification': {
'LaunchTemplateName': launch_template.get_launch_template_name(region_number),
'Version': '1'
},
'Overrides': [
{
'InstanceType': instance_type
}
]
}
def get_launch_template_list(region_number, instance_type_list):
return list(map(lambda type: get_launch_template(region_number, type), instance_type_list))
def request_spot_fleet(ec2_client, region_number, number_of_instances, instance_type_list):
LOGGER.info("Requesting spot fleet")
LOGGER.info("Creating node_name_tag: %s" %
utils.get_node_name_tag(region_number))
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet
response = ec2_client.request_spot_fleet(
# DryRun=True,
SpotFleetRequestConfig={
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy
'AllocationStrategy': 'diversified',
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole',
'LaunchSpecifications': create_launch_specification_list(region_number, instance_type_list),
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.
'TargetCapacity': number_of_instances,
'Type': 'maintain'
}
)
return response["SpotFleetRequestId"]
def request_spot_fleet_with_on_demand(ec2_client, region_number, number_of_instances, number_of_on_demand, instance_type_list):
LOGGER.info("Requesting spot fleet")
LOGGER.info("Creating node_name_tag: %s" %
utils.get_node_name_tag(region_number))
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet
response = ec2_client.request_spot_fleet(
# DryRun=True,
SpotFleetRequestConfig={
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy
'AllocationStrategy': 'diversified',
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole',
'LaunchTemplateConfigs': get_launch_template_list(region_number, instance_type_list),
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.
'TargetCapacity': number_of_instances,
'OnDemandTargetCapacity': number_of_on_demand,
'Type': 'maintain'
}
)
return response
def get_instance_ids(client, request_id):
res = client.describe_spot_fleet_instances(
SpotFleetRequestId=request_id
)
return [ inst["InstanceId"] for inst in res["ActiveInstances"] ]
def run_one_region(region_number, number_of_instances, fout, fout2):
client = utils.create_client(utils.CONFIG, region_number)
instance_type_list = ['t2.micro', 't2.small', 'm3.medium']
# node_name_tag = request_spot_fleet_with_on_demand(
# client, region_number, int(number_of_instances), 1, instance_type_list)
request_id = request_spot_fleet(
client, region_number, int(number_of_instances), instance_type_list)
instance_ids = get_instance_ids(client, request_id)
print(instance_ids) # TODO@ricl, no data here since the request is not fulfilled.

@ -1,210 +0,0 @@
import boto3
import datetime
import json
import sys
import time
import base64
import threading
MAX_INTANCES_FOR_WAITER = 100
MAX_INSTANCES_FOR_DEPLOYMENT = 500
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
IAM_INSTANCE_PROFILE = 'BenchMarkCodeDeployInstanceProfile'
time_stamp = time.time()
CURRENT_SESSION = datetime.datetime.fromtimestamp(
time_stamp).strftime('%H-%M-%S-%Y-%m-%d')
NODE_NAME_SUFFIX = "NODE-" + CURRENT_SESSION
def get_node_name_tag(region_number):
return region_number + "-" + NODE_NAME_SUFFIX
def get_node_name_tag2(region_number, tag):
return region_number + "-" + NODE_NAME_SUFFIX + "-" + str(tag)
with open("userdata-soldier.sh", "r") as userdata_file:
USER_DATA = userdata_file.read()
# UserData must be base64 encoded for spot instances.
USER_DATA_BASE64 = base64.b64encode(USER_DATA)
def create_client(config, region_number):
region_name = config[region_number][REGION_NAME]
# Create session.
session = boto3.Session(region_name=region_name)
# Create a client.
return session.client('ec2')
def read_region_config(region_config='configuration.txt'):
global CONFIG
config = {}
with open(region_config, 'r') as f:
for myline in f:
mylist = [item.strip() for item in myline.strip().split(',')]
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
CONFIG = config
return config
def get_ip_list(response):
if response.get('Instances', None):
return [instance.get('PublicIpAddress', None) for instance in response['Instances']]
else:
return []
def create_ec2_client(region_number, region_config):
config = read_region_config(region_config)
region_name = config[region_number][REGION_NAME]
session = boto3.Session(region_name=region_name)
return session.client('ec2'), session
def collect_public_ips_from_ec2_client(ec2_client, node_name_tag):
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
response = ec2_client.describe_instances(Filters=filters)
ip_list = []
if response.get('Reservations'):
for reservation in response[u'Reservations']:
ip_list.extend(instance['PublicIpAddress'] for instance in reservation['Instances'] if instance.get('PublicIpAddress'))
return ip_list
def collect_public_ips(region_number, node_name_tag, region_config):
ec2_client, _ = create_ec2_client(region_number, region_config)
ip_list = collect_public_ips_from_ec2_client(ec2_client, node_name_tag)
return ip_list
def get_application(codedeploy, application_name):
response = codedeploy.list_applications()
if application_name in response['applications']:
return response
else:
response = codedeploy.create_application(
applicationName=application_name,
computePlatform='Server'
)
return response
def create_deployment_group(codedeploy, region_number,application_name, deployment_group_name, node_name_tag):
response = codedeploy.list_deployment_groups(applicationName=application_name)
if response.get('deploymentGroups') and (deployment_group_name in response['deploymentGroups']):
return None
else:
response = codedeploy.create_deployment_group(
applicationName=application_name,
deploymentGroupName=deployment_group_name,
deploymentConfigName='CodeDeployDefault.AllAtOnce',
serviceRoleArn='arn:aws:iam::656503231766:role/BenchMarkCodeDeployServiceRole',
deploymentStyle={
'deploymentType': 'IN_PLACE',
'deploymentOption': 'WITHOUT_TRAFFIC_CONTROL'
},
ec2TagFilters = [
{
'Key': 'Name',
'Value': node_name_tag,
'Type': 'KEY_AND_VALUE'
}
]
)
return response['deploymentGroupId']
def generate_distribution_config2(region_number, node_name_tag, region_config,
shard_number, client_number, distribution_config):
ip_list = collect_public_ips(region_number, node_name_tag, region_config)
generate_distribution_config(shard_number, client_number, ip_list, distribution_config)
def generate_distribution_config3(shard_number, client_number, ip_list_file, distribution_config):
with open(ip_list_file, "r") as fin:
lines = fin.readlines()
ip_list = [line.strip() for line in lines]
generate_distribution_config(shard_number, client_number, ip_list, distribution_config)
def generate_distribution_config(shard_number, client_number, ip_list, distribution_config):
if len(ip_list) < shard_number * 2 + client_number + 1:
print("Not enough nodes to generate a config file")
return False
# Create ip for clients.
commander_id, client_id, leader_id, validator_id = 0, 0, 0, 0
validator_number = len(ip_list) - client_number - shard_number - 1
with open(distribution_config, "w") as fout:
for i in range(len(ip_list)):
ip, node_name_tag = ip_list[i].split(" ")
if commander_id < 1:
fout.write("%s 9000 commander %d %s\n" % (ip, commander_id, node_name_tag))
commander_id = commander_id + 1
elif leader_id < shard_number:
fout.write("%s 9000 leader %d %s\n" % (ip, leader_id, node_name_tag))
leader_id = leader_id + 1
elif validator_id < validator_number:
fout.write("%s 9000 validator %d %s\n" % (ip, validator_id % shard_number, node_name_tag))
validator_id = validator_id + 1
else:
fout.write("%s 9000 client %d %s\n" % (ip, client_id % shard_number, node_name_tag))
client_id = client_id + 1
def get_availability_zones(ec2_client):
response = ec2_client.describe_availability_zones()
all_zones = []
if response.get('AvailabilityZones', None):
all_zones = [info['ZoneName'] for info in response.get('AvailabilityZones') if info['State'] == 'available']
return all_zones
def get_one_availability_zone(ec2_client):
time.sleep(1)
all_zones = get_availability_zones(ec2_client)
if len(all_zones) > 0:
return all_zones[0]
else:
return None
def get_instance_ids2(ec2_client, node_name_tag):
time.sleep(5)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
return get_instance_ids(ec2_client.describe_instances(Filters=filters))
# Get instance_ids from describe_instances_response.
def get_instance_ids(describe_instances_response):
instance_ids = []
if describe_instances_response["Reservations"]:
for reservation in describe_instances_response["Reservations"]:
instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId"))
return instance_ids
WAITER_LOCK = threading.Lock()
def run_waiter_100_instances_for_status(ec2_client, status, instance_ids):
time.sleep(10)
WAITER_LOCK.acquire()
waiter = ec2_client.get_waiter('instance_running')
WAITER_LOCK.release()
waiter.wait(InstanceIds=instance_ids)
def run_waiter_for_status(ec2_client, status, instance_ids):
thread_pool = []
i = 0
while i < len(instance_ids):
j = i + min(len(instance_ids), i + MAX_INTANCES_FOR_WAITER)
t = threading.Thread(target=run_waiter_100_instances_for_status, args=(
ec2_client, status, instance_ids[i:j]))
t.start()
thread_pool.append(t)
i = i + MAX_INTANCES_FOR_WAITER
for t in thread_pool:
t.join()
# used for testing only.
# if __name__ == "__main__":
# ip_list = collect_public_ips('4', "4-NODE-23-36-01-2018-07-05", "configuration.txt")
# print ip_list
# generate_distribution_config(2, 1, ip_list, "config_test.txt")

@ -1,31 +0,0 @@
import unittest
from utils import generate_distribution_config
class TestCreateAndDeploy(unittest.TestCase):
def test_generate_config_file(self):
ips = ["102.000.000.1", "102.000.000.2", "102.000.000.3", "102.000.000.4", "102.000.000.5", "102.000.000.6"]
generate_distribution_config(2, 2, ips, "config_test.txt")
with open("config_test.txt", "r") as fin:
lines = fin.readlines()
collection = {}
collection['ip'] = []
collection['client'] = {}
leader_count, validator_count, client_count = 0, 0, 0
for line in lines:
strs = line.split(" ")
assert(not strs[0] in collection['ip'])
collection['ip'].append(strs[0])
if strs[2] == "client":
client_count = client_count + 1
elif strs[2] == "leader":
leader_count = leader_count + 1
elif strs[2] == "validator":
validator_count = validator_count + 1
assert(validator_count == 2)
assert(leader_count == 2)
assert(client_count == 2)
if __name__ == '__main__':
unittest.main()

@ -1,23 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAl+WnKM7roDvX949vK+27oIHiw885tdFhKkpF7wm0hbqHSXrUc5LUzZJ6x5K4
QE6uslG/EDxbndRP6Gm8P99XarB4zGDWXeYnwdOs+XcNd1d8VGwvw8/CHd+Lndk/lJi0fhuRSRuZ
mtnyVe8T6ffgdIpYBwr81vC75UL918QGZO9VvczgWh3MvAcT2rRhLZByyobgb3YHpSfVw+khkTfh
/xOT1sOPsV1AClAEb2IJr1fPcNVJ/HfMbx+npBJEo8/YUIoOWx64AzaDnGNSL+EG9VSaua5pv/aP
ulZq4hOFF2mSna26ZllUiNfX66wG/28XPT7CH49Du6lJhX67vfoByQIDAQABAoIBAB9ceA3R4w47
LNTdFWvxcoqqJoKdF2I1r6UGnrfCvjXaFDKUnKacCznCY30vCsGkPSdt/0+/1n6YpkLrCcX6O9c4
RkiUOc47RbwcExuSfAJG2tGnYJlCHOcED5RwlVL90E41LOL2QtmY5BLgALGOzxXgtloF57wNCg5E
rZpNy7As86Bql4ZvsJZm4pBAcEeIrJan1pSyJQDmXGnVlD3AXNkFbMhU7o+jL9Jmp8DFMykwmX5D
SYWr6KJfFmdJA5gYBVs9rpw2kgKpvFih2EEWdvyqfpN6yMjUHxn+mmOY6AlvtQcgpsYLReLQULeR
HJSzP5NgMce6L+LEfdvKTgGTzwECgYEA5VLSDmTgU+MedkTJ2ndRdyp5KO6dxkeZRY9z2tcvwq1R
WfgxfB1xA393ennoSuKchSF5ux+wP/+Kd5KR5IPNUfSOcSfqONxUZXC5kiL5T3fbBhlXrBkvAFeo
jf4f13lhuVd7q2nfsODwhkXLKPTYD0UOXv5T9ZwzM6BwGHNVASECgYEAqZEZNLx6PbjZnPCUEqj7
/JCgSCbeZBT5h0CV7hBGPiHK5Rh9GMUZcgkf5oFPhKvuACXovVRckmPlUPGFA1d4YooqKK07h9AH
UD5aGeaF02TZ/Wcx7/c++z5SSP4mWgFmm5mF7j4HyG8L7+SeZnk8Cr0lQnUQjeudWcD0pXNc46kC
gYEA4bu/t5mG1Cxp7ONTivAOye2OLEOOHQKXlkhaM53Ejq5KuPQ9yb21ZEsR0ga0KRqeFdJY4GCV
/rlNlNw7LiSrdDP2Dt7KiUQg82Wd1YMFxDrn5z4E9gwoHv8qlPMEZvc9wdKikp36KkORn6qOC/fP
wmAogEt7W/Zw1C3mibrlfuECgYB/IGeTYBMNnz6XKM76xnJVQmDId0uYIRmKy2McnlrrTg59f8Sa
A3s2QZ0HBb+GKhg/SCybdVoJ7pcH/mrIvJVm20sMQAQ3qEEzaevHK0r918iO8QVBcU5osqqiStiZ
9h7lXU5J9XuES6zZZ79R+GaJbpE2p+mHXTQVDFLekAT+MQKBgQDae3IGvM7xb18U3zP+tBD5JL4K
eWFEd1cgZefoyRlc1d0/p87S4pGESzNYdGwl9kQsAMsxUUyOp2Xv253b+hZszfoZdPcSx3mir/IB
OBXPtsb64WHKqvYoH3BUT6ufZOtJFx6lsR1uPP4ewehF+/2DaH8QL2ZdwrXd678hBI9P4A==
-----END RSA PRIVATE KEY-----
Loading…
Cancel
Save