merging conflicts

pull/62/head
ak 6 years ago
commit f431a1c870
  1. 4
      .travis.yml
  2. 1
      aws-experiment-launch/cal_tps.sh
  3. 35
      aws-experiment-launch/collect_public_ips.py
  4. 62
      aws-experiment-launch/commander_prepare.py
  5. 11
      aws-experiment-launch/configs/userdata-commander.sh
  6. 20
      aws-experiment-launch/configs/userdata-soldier.sh
  7. 8
      aws-experiment-launch/configuration-git.txt
  8. 8
      aws-experiment-launch/configuration.txt
  9. 28
      aws-experiment-launch/create_deploy_soldiers.sh
  10. 193
      aws-experiment-launch/create_solider_instances.py
  11. 8
      aws-experiment-launch/download_log_from_commander.sh
  12. 17
      aws-experiment-launch/download_log_from_leaders.sh
  13. 233
      aws-experiment-launch/experiment/commander/main.go
  14. 312
      aws-experiment-launch/experiment/soldier/main.go
  15. 93
      aws-experiment-launch/experiment/soldier/s3/s3.go
  16. 30
      aws-experiment-launch/experiment/utils/utils.go
  17. 37
      aws-experiment-launch/generate_distribution_config.py
  18. 1
      aws-experiment-launch/logs_download.sh
  19. 96
      aws-experiment-launch/report_extractor.py
  20. 18
      aws-experiment-launch/spot-instance/request-spot.sh
  21. 10
      aws-experiment-launch/spot-instance/run-instances.sh
  22. 105
      aws-experiment-launch/terminate_instances.py
  23. 15
      aws-experiment-launch/upload_binaries.py
  24. 10
      aws-experiment-launch/upload_config.py
  25. 9
      aws-experiment-launch/upload_s3.py
  26. 11
      aws-experiment-launch/userdata-commander.sh
  27. 31
      aws-experiment-launch/userdata-soldier.sh
  28. 0
      aws-experiment-launch/utils/__init__.py
  29. 8
      aws-experiment-launch/utils/configuration.txt
  30. 46
      aws-experiment-launch/utils/launch_template.py
  31. 9
      aws-experiment-launch/utils/logger.py
  32. 119
      aws-experiment-launch/utils/spot_fleet.py
  33. 210
      aws-experiment-launch/utils/utils.py
  34. 31
      aws-experiment-launch/utils/utils_test.py
  35. 5
      aws-scripts/kill_node.sh
  36. 34
      aws-scripts/parse_json.py
  37. 26
      aws-scripts/preprocess_peerlist.py
  38. 34
      aws-scripts/run_instance.sh
  39. 1
      aws-scripts/say_bye.sh
  40. 1
      aws-scripts/say_hello.sh
  41. 43
      aws-scripts/setup.sh
  42. 1
      aws-scripts/setup_instances.sh
  43. 177
      azure/launchvm.py
  44. 17
      benchmark.go
  45. 43
      blockchain/block.go
  46. 16
      blockchain/blockchain.go
  47. 104
      blockchain/transaction.go
  48. 65
      blockchain/utxopool.go
  49. 12
      client/btctxgen/main.go
  50. 50
      client/client.go
  51. 99
      client/config/config.go
  52. 30
      client/txgen/main.go
  53. 278
      client/wallet/main.go
  54. 159
      configr/main.go
  55. 2
      consensus/consensus.go
  56. 64
      consensus/consensus_leader.go
  57. 14
      crypto/pki/utils.go
  58. 5
      deploy.sh
  59. 5
      go_executable_build.sh
  60. 14
      node/node.go
  61. 144
      node/node_handler.go
  62. 20
      p2p/helper.go
  63. 53
      p2p/helper_test.go
  64. 55
      profiler/main.go
  65. 48
      profiler/profiler.go
  66. 6
      proto/node/node.go
  67. 5
      run_experiment.sh
  68. 11
      utils/bytes.go

@ -10,5 +10,5 @@ install:
- ./.travis.gofmt.sh
- go build -v ./...
notifications:
slack:
secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY=
slack: harmonyone:gggCd1QQopsQAW8JYgBWiH7M
# secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY=

@ -1 +0,0 @@
for file in $(ls *leader*); do echo $file; cat $file | grep TPS | head -n 2 | cut -f2 -d ":" | cut -f1 -d "," | awk '{ sum += $1; n++ } END { if (n > 0) print sum / n; }'; done

@ -1,35 +0,0 @@
import argparse
import os
import random
import sys
from utils import utils
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to collect public ips')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt',
help='the file contains node_name_tag and region number of created instances.')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--file_output', type=str,
dest='file_output', default='raw_ip.txt')
args = parser.parse_args()
if not args.instance_output or not os.path.isfile(args.instance_output):
print "%s or %s are not existed" % (args.file_output, args.instance_output)
sys.exit(1)
if args.instance_output:
with open(args.instance_output, "r") as fin, open(args.file_output, "w") as fout:
total_ips = []
node_name_tag_list = []
for line in fin.readlines():
items = line.split(" ")
region_number = items[1].strip()
node_name_tag = items[0].strip()
ip_list = utils.collect_public_ips(region_number, node_name_tag, args.region_config)
total_ips.extend([(ip, node_name_tag) for ip in ip_list])
random.shuffle(total_ips)
for tuple in total_ips:
fout.write(tuple[0] + " " + tuple[1] + "\n")
print "Done collecting public ips %s" % args.file_output

@ -1,62 +0,0 @@
import argparse
import logging
import os
import stat
import sys
from utils import utils
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
PEMS = [
"virginia-key-benchmark.pem",
"ohio-key-benchmark.pem",
"california-key-benchmark.pem",
"oregon-key-benchmark.pem",
"tokyo-key-benchmark.pem",
"singapore-key-benchmark.pem",
"frankfurt-key-benchmark.pem",
"ireland-key-benchmark.pem",
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config')
parser.add_argument('--distribution_config', type=str,
dest='distribution_config', default='distribution_config.txt')
parser.add_argument('--commander_logging', type=str,
dest='commander_logging', default='commander_logging.sh')
args = parser.parse_args()
if not os.path.exists(args.distribution_config):
sys.exit(1)
with open(args.distribution_config, "r") as fin:
lines = fin.readlines()
commander_address = None
commander_region = None
commander_output = None
with open(args.distribution_config, "w") as fout:
for line in lines:
if "commander" in line:
items = [item.strip() for item in line.split(" ")]
commander_address = items[0]
commander_region = int(items[4][0])
commander_output = "\n".join(items)
else:
fout.write(line.strip() + "\n")
LOGGER.info("Generated %s" % args.distribution_config)
if not commander_address or not commander_region:
LOGGER.info("Failed to extract commander address and commander region.")
sys.exit(1)
with open(args.commander_logging, "w") as fout:
fout.write("ssh -i ./keys/%s ec2-user@%s\n" % (PEMS[commander_region - 1], commander_address))
st = os.stat(args.commander_logging)
os.chmod(args.commander_logging, st.st_mode | stat.S_IEXEC)
LOGGER.info("Generated %s" % args.commander_logging)
LOGGER.info("DONE.")

@ -1,11 +0,0 @@
#!/bin/bash -x
REGION=$(curl 169.254.169.254/latest/meta-data/placement/availability-zone/ | sed 's/[a-z]$//')
#yum update -y #This breaking codedeploy right now
yum install ruby wget -y
cd /home/ec2-user
touch yum-not-updated.txt
wget https://aws-codedeploy-$REGION.s3.amazonaws.com/latest/install
chmod +x ./install
./install auto
mkdir projects
mkdir projects/src

@ -1,20 +0,0 @@
#!/bin/bash
cd /home/ec2-user
commanderIP= # <- Put the commander IP here.
curl http://$commanderIP:8080/soldier -o soldier
chmod +x ./soldier
curl http://$commanderIP:8080/benchmark -o benchmark
chmod +x ./benchmark
curl http://$commanderIP:8080/txgen -o txgen
chmod +x ./txgen
# Get My IP
ip=`curl http://169.254.169.254/latest/meta-data/public-ipv4`
node_port=9000
soldier_port=1$node_port
# Kill existing soldier
fuser -k -n tcp $soldier_port
# Run soldier
./soldier -ip $ip -port $node_port > soldier_log 2>&1 &

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-97785bed,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-f63b1193,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-824c4ee2,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-f2d3638a,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-ceafcba8,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-68097514,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-5652ce39,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-d834aba1,sg-0aa8954acb79fdb58

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58

@ -1,28 +0,0 @@
if [ $# -lt 3 ]; then
echo "Please provide # of instances, # of shards, # of clients"
exit 1
fi
INSTANCE_NUM=$1
SHARD_NUM=$2
CLIENT_NUM=$3
SLEEP_TIME=10
echo "Creating $INSTANCE_NUM instances at 8 regions"
python create_solider_instances.py --regions 1,2,3,4,5,6,7,8 --instances $INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM,$INSTANCE_NUM
echo "Sleep for $SLEEP_TIME seconds"
sleep $SLEEP_TIME
echo "Rung collecting raw ips"
python collect_public_ips.py --instance_output instance_output.txt
# sleep 10
echo "Generate distribution_config"
python generate_distribution_config.py --ip_list_file raw_ip.txt --shard_number $SHARD_NUM --client_number $CLIENT_NUM
echo "Run commander prepare"
python commander_prepare.py
aws s3 cp distribution_config.txt s3://unique-bucket-bin/distribution_config.txt --acl public-read-write

@ -1,193 +0,0 @@
import argparse
import base64
import boto3
import datetime
import json
import sys
import threading
import time
import enum
#TODO REMOVE UTILS
from utils import utils, spot_fleet, logger
LOGGER = logger.getLogger(__file__)
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
class InstanceResource(enum.Enum):
ON_DEMAND = 'ondemand'
SPOT_INSTANCE = 'spot'
SPOT_FLEET = 'fleet'
def __str__(self):
return self.value
def run_one_region_on_demand_instances(config, region_number, number_of_instances, tag):
ec2_client = utils.create_client(config, region_number)
node_name_tag = create_instances(
config, ec2_client, region_number, number_of_instances, tag)
LOGGER.info("Created %s in region %s" % (node_name_tag, region_number))
return node_name_tag, ec2_client
def create_instances(config, ec2_client, region_number, number_of_instances, tag):
node_name_tag = utils.get_node_name_tag2(region_number, tag)
LOGGER.info("Creating node_name_tag: %s" % node_name_tag)
available_zone = utils.get_one_availability_zone(ec2_client)
LOGGER.info("Looking at zone %s to create instances." % available_zone)
time.sleep(2)
ec2_client.run_instances(
MinCount=number_of_instances,
MaxCount=number_of_instances,
ImageId=config[region_number][utils.REGION_AMI],
Placement={
'AvailabilityZone': available_zone,
},
SecurityGroups=[config[region_number][utils.REGION_SECURITY_GROUP]],
IamInstanceProfile={
'Name': utils.IAM_INSTANCE_PROFILE
},
KeyName=config[region_number][utils.REGION_KEY],
UserData=utils.USER_DATA,
InstanceType=utils.INSTANCE_TYPE,
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': node_name_tag
},
]
},
],
)
retry_count = 10
while retry_count > 0:
try:
time.sleep(20)
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag)
LOGGER.info("Waiting for all %d instances in region %s with node_name_tag %s to be in RUNNING" % (
len(instance_ids), region_number, node_name_tag))
break
except:
retry_count -= 1
LOGGER.info("Failed to get instance ids. Retry again.")
retry_count = 10
while retry_count > 0:
try:
time.sleep(20)
waiter = ec2_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
break
except:
retry_count -= 1
LOGGER.info("Failed to wait.")
retry_count = 10
while retry_count > 0:
time.sleep(20)
LOGGER.info("Waiting ...")
ip_list = utils.collect_public_ips_from_ec2_client(
ec2_client, node_name_tag)
if len(ip_list) == number_of_instances:
LOGGER.info("Created %d instances" % number_of_instances)
return node_name_tag
retry_count -= 10
LOGGER.info("Can not get %d instances" % number_of_instances)
return node_name_tag
LOCK_FOR_RUN_ONE_REGION = threading.Lock()
def run_for_one_region_on_demand(config, region_number, number_of_instances, fout, fout2):
tag = 0
number_of_instances = int(number_of_instances)
while number_of_instances > 0:
number_of_creation = min(utils.MAX_INSTANCES_FOR_DEPLOYMENT, number_of_instances)
node_name_tag, ec2_client = run_one_region_on_demand_instances(
config, region_number, number_of_creation, tag)
if node_name_tag:
LOGGER.info("Managed to create instances for region %s with name_name_tag %s" %
(region_number, node_name_tag))
instance_ids = utils.get_instance_ids2(ec2_client, node_name_tag)
LOCK_FOR_RUN_ONE_REGION.acquire()
try:
fout.write("%s %s\n" % (node_name_tag, region_number))
for instance_id in instance_ids:
fout2.write(instance_id + " " + node_name_tag + " " + region_number +
" " + config[region_number][utils.REGION_NAME] + "\n")
finally:
LOCK_FOR_RUN_ONE_REGION.release()
else:
LOGGER.info("Failed to create instances for region %s" % region_number)
number_of_instances -= number_of_creation
tag += 1
def read_region_config(region_config='configuration.txt'):
global CONFIG
config = {}
with open(region_config, 'r') as f:
for myline in f:
mylist = [item.strip() for item in myline.strip().split(',')]
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
CONFIG = config
return config
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='This script helps you start instances across multiple regions')
parser.add_argument('--regions', type=str, dest='regions',
default='3', help="Supply a csv list of all regions")
parser.add_argument('--instances', type=str, dest='num_instance_list',
default='1', help='number of instances in respective of region')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt', help='the file to append or write')
parser.add_argument('--instance_ids_output', type=str, dest='instance_ids_output',
default='instance_ids_output.txt', help='the file to append or write')
parser.add_argument('--instance_resource', dest='instance_resource', type=InstanceResource,
default=InstanceResource.ON_DEMAND, choices=list(InstanceResource))
parser.add_argument('--append', dest='append', type=bool, default=False,
help='append to the current instance_output')
args = parser.parse_args()
config = read_region_config(args.region_config)
region_list = args.regions.split(',')
num_instance_list = args.num_instance_list.split(',')
instance_resource = args.instance_resource
assert len(region_list) == len(num_instance_list), "number of regions: %d != number of instances per region: %d" % (
len(region_list), len(num_instance_list))
write_mode = "a" if args.append else "w"
with open(args.instance_output, write_mode) as fout, open(args.instance_ids_output, write_mode) as fout2:
thread_pool = []
for i in range(len(region_list)):
region_number = region_list[i]
number_of_instances = num_instance_list[i]
if instance_resource == InstanceResource.ON_DEMAND:
t = threading.Thread(target=run_for_one_region_on_demand, args=(
config, region_number, number_of_instances, fout, fout2))
elif instance_resource == InstanceResource.SPOT_FLEET:
t = threading.Thread(target=spot_fleet.run_one_region, args=(
region_number, number_of_instances, fout, fout2))
LOGGER.info("creating thread for region %s" % region_number)
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")

@ -1,8 +0,0 @@
# Change the commander address
if [ $# -eq 0 ]; then
echo "Please provide ip address of the commander"
exit 1
fi
ADDRESS=$1
mkdir -p ./tmp
scp -r -i "california-key-benchmark.pem" ec2-user@$ADDRESS:~/projects/src/harmony-benchmark/bin/upload ./tmp/

@ -1,17 +0,0 @@
# Make sure to have all keys with mode 600 at harmony-benchmark directory.
IFS=$'\n'
rm -rf ./tmp
mkdir tmp
for address in $(cat ./leader_addresses.txt)
do
echo "trying to download from address $address"
mkdir -p tmp/$address
scp -r -o "StrictHostKeyChecking no" -i ../keys/california-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/frankfurt-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/ireland-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/ohio-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/oregon-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/singapore-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/tokyo-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
scp -r -o "StrictHostKeyChecking no" -i ../keys/virginia-key-benchmark.pem ec2-user@$address:/home/tmp_log/* ./tmp/$address/
done

@ -1,233 +0,0 @@
/*
Commander has two modes to setup configuration: Local and S3.
Local Config Mode
The Default Mode.
Add `-mode local` or omit `-mode` to enter local config mode. In this mode, the `commander` will host the config file `config.txt` on the commander machine and `solider`s will download the config file from `http://{commander_ip}:{commander_port}/distribution_config.txt`.
Remote Config Mode
Add `-mode remote` to enter remote config mode. In this mode, the `soldier`s will download the config file from a remote URL (use `-config_url {url}` to set the URL).
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"strings"
"time"
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/utils"
"github.com/simple-rules/harmony-benchmark/configr"
)
type commanderSetting struct {
ip string
port string
mode string
// Options in s3 mode
configURL string
configr *configr.Configr
}
type sessionInfo struct {
id string
uploadFolder string
}
var (
setting commanderSetting
session sessionInfo
)
const (
DistributionFileName = "distribution_config.txt"
DefaultConfigUrl = "https://s3-us-west-2.amazonaws.com/unique-bucket-bin/distribution_config.txt"
)
func handleCommand(command string) {
args := strings.Split(command, " ")
if len(args) <= 0 {
return
}
switch cmd := args[0]; cmd {
case "config":
if setting.mode == "s3" {
// In s3 mode, download the config file from configURL first.
if err := utils.DownloadFile(DistributionFileName, setting.configURL); err != nil {
panic(err)
}
}
err := setting.configr.ReadConfigFile(DistributionFileName)
if err == nil {
log.Printf("The loaded config has %v nodes\n", len(setting.configr.GetConfigEntries()))
} else {
log.Println("Failed to read config file")
}
case "init":
session.id = time.Now().Format("150405-20060102")
// create upload folder
session.uploadFolder = fmt.Sprintf("upload/%s", session.id)
err := os.MkdirAll(session.uploadFolder, os.ModePerm)
if err != nil {
log.Println("Failed to create upload folder", session.uploadFolder)
return
}
log.Println("New session", session.id)
dictateNodes(fmt.Sprintf("init %v %v %v %v", setting.ip, setting.port, setting.configURL, session.id))
case "ping", "kill", "log", "log2":
dictateNodes(command)
default:
log.Println("Unknown command")
}
}
func config(ip string, port string, mode string, configURL string) {
setting.ip = ip
setting.port = port
setting.mode = mode
if mode == "local" {
setting.configURL = fmt.Sprintf("http://%s:%s/%s", ip, port, DistributionFileName)
} else {
setting.configURL = configURL
}
setting.configr = configr.NewConfigr()
}
func dictateNodes(command string) {
resultChan := make(chan int)
configs := setting.configr.GetConfigEntries()
for _, entry := range configs {
port := "1" + entry.Port // the port number of solider is "1" + node port
addr := strings.Join([]string{entry.IP, port}, ":")
go func(resultChan chan int) {
resultChan <- dictateNode(addr, command)
}(resultChan)
}
count := len(configs)
res := 0
for ; count > 0; count-- {
res += <-resultChan
}
log.Printf("Finished %s with %v nodes\n", command, res)
}
func dictateNode(addr string, command string) int {
// creates client
conn, err := net.DialTimeout("tcp", addr, 5*time.Second)
if err != nil {
log.Println(err)
return 0
}
defer conn.Close()
// send command
_, err = conn.Write([]byte(command))
if err != nil {
log.Printf("Failed to send command to %s", addr)
return 0
}
// log.Printf("Send \"%s\" to %s", command, addr)
// read response
buff := make([]byte, 1024)
if n, err := conn.Read(buff); err == nil {
received := string(buff[:n])
// log.Printf("Receive from %s: %s", addr, buff[:n])
if strings.Contains(received, "Failed") {
return 0
} else {
return 1
}
}
return 0
}
func handleUploadRequest(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
// reject non-post requests
jsonResponse(w, http.StatusBadRequest, "Only post request is accepted.")
return
}
reader, err := r.MultipartReader()
if err != nil {
jsonResponse(w, http.StatusBadRequest, err.Error())
return
}
for {
part, err := reader.NextPart()
if err == io.EOF {
break
}
dst, err := os.Create(fmt.Sprintf("%s/%s", session.uploadFolder, part.FileName()))
log.Println(part.FileName())
if err != nil {
jsonResponse(w, http.StatusInternalServerError, err.Error())
return
}
defer dst.Close()
if _, err := io.Copy(dst, part); err != nil {
jsonResponse(w, http.StatusInternalServerError, err.Error())
return
}
}
}
func jsonResponse(w http.ResponseWriter, code int, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
fmt.Fprint(w, message)
log.Println(message)
}
func serve() {
if setting.mode == "local" {
// Only host config file if in local mode
http.Handle("/", http.FileServer(http.Dir("./")))
}
http.HandleFunc("/upload", handleUploadRequest)
err := http.ListenAndServe(":"+setting.port, nil)
if err != nil {
log.Fatalf("Failed to setup server! Error: %s", err.Error())
}
log.Printf("Start to host upload endpoint at http://%s:%s/upload\n", setting.ip, setting.port)
}
func main() {
ip := flag.String("ip", "127.0.0.1", "The ip of commander, i.e. this machine")
port := flag.String("port", "8080", "The port which the commander uses to communicate with soldiers")
mode := flag.String("mode", "local", "The config mode, local or s3")
configURL := flag.String("config_url", DefaultConfigUrl, "The config URL")
flag.Parse()
config(*ip, *port, *mode, *configURL)
go serve()
scanner := bufio.NewScanner(os.Stdin)
for true {
log.Printf("Listening to Your Command:")
if !scanner.Scan() {
break
}
handleCommand(scanner.Text())
}
}

@ -1,312 +0,0 @@
/*
Soldier is responsible for receiving commands from commander and doing tasks such as starting nodes, uploading logs.
cd harmony-benchmark/bin
go build -o soldier ../aws-experiment-launch/experiment/soldier/main.go
./soldier -ip={node_ip} -port={node_port}
*/
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/soldier/s3"
"github.com/simple-rules/harmony-benchmark/aws-experiment-launch/experiment/utils"
"github.com/simple-rules/harmony-benchmark/configr"
globalUtils "github.com/simple-rules/harmony-benchmark/utils"
)
type soliderSetting struct {
ip string
port string
}
type sessionInfo struct {
id string
commanderIP string
commanderPort string
localConfigFileName string
logFolder string
configr *configr.Configr
myConfig configr.ConfigEntry
}
const (
bucketName = "richard-bucket-test"
logFolderPrefix = "../tmp_log/"
)
var (
setting soliderSetting
globalSession sessionInfo
)
func socketServer() {
soldierPort := "1" + setting.port // the soldier port is "1" + node port
listen, err := net.Listen("tcp4", ":"+soldierPort)
if err != nil {
log.Fatalf("Socket listen port %s failed,%s", soldierPort, err)
os.Exit(1)
}
defer listen.Close()
log.Printf("Begin listen for command on port: %s", soldierPort)
for {
conn, err := listen.Accept()
if err != nil {
log.Fatalln(err)
continue
}
go handler(conn)
}
}
func handler(conn net.Conn) {
defer conn.Close()
var (
buf = make([]byte, 1024)
r = bufio.NewReader(conn)
w = bufio.NewWriter(conn)
)
ILOOP:
for {
n, err := r.Read(buf)
data := string(buf[:n])
switch err {
case io.EOF:
break ILOOP
case nil:
log.Println("Received command", data)
handleCommand(data, w)
log.Println("Waiting for new command...")
default:
log.Fatalf("Receive data failed:%s", err)
return
}
}
}
func handleCommand(command string, w *bufio.Writer) {
args := strings.Split(command, " ")
if len(args) <= 0 {
return
}
switch command := args[0]; command {
case "ping":
{
handlePingCommand(w)
}
case "init":
{
handleInitCommand(args[1:], w)
}
case "kill":
{
handleKillCommand(w)
}
case "log":
{
handleLogCommand(w)
}
case "log2":
{
handleLog2Command(w)
}
}
}
func handleInitCommand(args []string, w *bufio.Writer) {
// init ip port config_file sessionID
log.Println("Init command", args)
// read arguments
ip := args[0]
globalSession.commanderIP = ip
port := args[1]
globalSession.commanderPort = port
configURL := args[2]
sessionID := args[3]
globalSession.id = sessionID
globalSession.logFolder = fmt.Sprintf("%slog-%v", logFolderPrefix, sessionID)
// create local config file
globalSession.localConfigFileName = fmt.Sprintf("node_config_%v_%v.txt", setting.port, globalSession.id)
utils.DownloadFile(globalSession.localConfigFileName, configURL)
log.Println("Successfully downloaded config")
globalSession.configr.ReadConfigFile(globalSession.localConfigFileName)
globalSession.myConfig = *globalSession.configr.GetMyConfigEntry(setting.ip, setting.port)
if err := runInstance(); err == nil {
logAndReply(w, "Done init.")
} else {
logAndReply(w, "Failed.")
}
}
func handleKillCommand(w *bufio.Writer) {
log.Println("Kill command")
if err := killPort(setting.port); err == nil {
logAndReply(w, "Done kill.")
} else {
logAndReply(w, "Failed.")
}
}
func killPort(port string) error {
if runtime.GOOS == "windows" {
command := fmt.Sprintf("(Get-NetTCPConnection -LocalPort %s).OwningProcess -Force", port)
return globalUtils.RunCmd("Stop-Process", "-Id", command)
} else {
command := fmt.Sprintf("lsof -i tcp:%s | grep LISTEN | awk '{print $2}' | xargs kill -9", port)
return globalUtils.RunCmd("bash", "-c", command)
}
}
func handlePingCommand(w *bufio.Writer) {
log.Println("Ping command")
logAndReply(w, "I'm alive")
}
func handleLogCommand(w *bufio.Writer) {
log.Println("Log command")
files, err := ioutil.ReadDir(globalSession.logFolder)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to read log folder. Error: %s", err.Error()))
return
}
filePaths := make([]string, len(files))
for i, f := range files {
filePaths[i] = fmt.Sprintf("%s/%s", globalSession.logFolder, f.Name())
}
req, err := newUploadFileRequest(
fmt.Sprintf("http://%s:%s/upload", globalSession.commanderIP, globalSession.commanderPort),
"file",
filePaths,
nil)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to create upload request. Error: %s", err.Error()))
return
}
client := &http.Client{}
_, err = client.Do(req)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to upload log. Error: %s", err.Error()))
return
}
logAndReply(w, "Upload log done!")
}
// Creates a new file upload http request with optional extra params
func newUploadFileRequest(uri string, paramName string, paths []string, params map[string]string) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for _, path := range paths {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
log.Printf(path)
}
for key, val := range params {
_ = writer.WriteField(key, val)
}
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", uri, body)
req.Header.Set("Content-Type", writer.FormDataContentType())
return req, err
}
func logAndReply(w *bufio.Writer, message string) {
log.Println(message)
w.Write([]byte(message))
w.Flush()
}
func handleLog2Command(w *bufio.Writer) {
log.Println("Log command")
files, err := ioutil.ReadDir(globalSession.logFolder)
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to create log folder. Error: %s", err.Error()))
return
}
filePaths := make([]string, len(files))
for i, f := range files {
filePaths[i] = fmt.Sprintf("%s/%s", globalSession.logFolder, f.Name())
}
// TODO: currently only upload the first file.
_, err = s3.UploadFile(bucketName, filePaths[0], strings.Replace(filePaths[0], logFolderPrefix, "", 1))
if err != nil {
logAndReply(w, fmt.Sprintf("Failed to create upload request. Error: %s", err.Error()))
return
}
logAndReply(w, "Upload log done!")
}
func runInstance() error {
os.MkdirAll(globalSession.logFolder, os.ModePerm)
if globalSession.myConfig.Role == "client" {
return runClient()
}
return runNode()
}
func runNode() error {
log.Println("running instance")
return globalUtils.RunCmd("./benchmark", "-ip", setting.ip, "-port", setting.port, "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
}
func runClient() error {
log.Println("running client")
return globalUtils.RunCmd("./txgen", "-config_file", globalSession.localConfigFileName, "-log_folder", globalSession.logFolder)
}
func main() {
ip := flag.String("ip", "127.0.0.1", "IP of the node.")
port := flag.String("port", "9000", "port of the node.")
flag.Parse()
setting.ip = *ip
setting.port = *port
socketServer()
}

@ -1,93 +0,0 @@
package s3
import (
"fmt"
"log"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
func createSession() *session.Session {
return session.Must(session.NewSession(&aws.Config{
Region: aws.String(endpoints.UsWest2RegionID),
MaxRetries: aws.Int(3),
}))
}
func CreateBucket(bucketName string, region string) {
sess := createSession()
svc := s3.New(sess)
input := &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
CreateBucketConfiguration: &s3.CreateBucketConfiguration{
LocationConstraint: aws.String(region),
},
}
result, err := svc.CreateBucket(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeBucketAlreadyExists:
fmt.Println(s3.ErrCodeBucketAlreadyExists, aerr.Error())
case s3.ErrCodeBucketAlreadyOwnedByYou:
fmt.Println(s3.ErrCodeBucketAlreadyOwnedByYou, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
func UploadFile(bucketName string, fileName string, key string) (result *s3manager.UploadOutput, err error) {
sess := createSession()
uploader := s3manager.NewUploader(sess)
f, err := os.Open(fileName)
if err != nil {
log.Println("Failed to open file", err)
return nil, err
}
// Upload the file to S3.
result, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: f,
})
if err != nil {
log.Println("failed to upload file", err)
return nil, err
}
fmt.Printf("file uploaded to, %s\n", result.Location)
return result, nil
}
func DownloadFile(bucketName string, fileName string, key string) (n int64, err error) {
sess := createSession()
downloader := s3manager.NewDownloader(sess)
f, err := os.Create(fileName)
if err != nil {
return
}
n, err = downloader.Download(f, &s3.GetObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
return
}

@ -1,30 +0,0 @@
package utils
import (
"io"
"net/http"
"os"
)
func DownloadFile(filepath string, url string) error {
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
// Get the data
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}

@ -1,37 +0,0 @@
import argparse
import logging
import sys
from utils import utils
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to genereate distribution config')
parser.add_argument('--ip_list_file', type=str, dest='ip_list_file',
default='raw_ip.txt', help="the file containing available raw ips")
# If the ip_list_file is None we need to use the region, node_name_tag and region_config to collect raw_ip
parser.add_argument('--region', type=str, dest='region_number',
default="4", help="region number")
parser.add_argument('--node_name_tag', type=str,
dest='node_name_tag', default='4-NODE-23-36-01-2018-07-05')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
parser.add_argument('--shard_number', type=int, dest='shard_number', default=1)
parser.add_argument('--client_number', type=int, dest='client_number', default=1)
parser.add_argument('--distribution_config', type=str,
dest='distribution_config', default='distribution_config.txt')
args = parser.parse_args()
if args.ip_list_file == None:
utils.generate_distribution_config2(
args.region_number, args.node_name_tag, args.region_config,
args.shard_number, args.client_number, args.distribution_config)
else:
utils.generate_distribution_config3(args.shard_number, args.client_number,
args.ip_list_file, args.distribution_config)
LOGGER.info("Done writing %s" % args.distribution_config)

@ -1 +0,0 @@
scp -i ../keys/ohio-key-benchmark.pem ec2-user@18.219.217.193:~/projects/src/harmony-benchmark/bin/upload tmp/

@ -1,96 +0,0 @@
import json
import sys
import os
import argparse
def formatFloat(v):
return "%.2f" % v
def formatPercent(v):
return formatFloat(v) + "%"
def formatMem(v):
return formatFloat(float(v) / 10**6) + "MB"
class Profiler:
def __init__(self):
self.tps = 0
self.tps_max = 0
self.tps_min = sys.maxsize
self.tps_count = 0
self.cpu_percent = 0
self.cpu_usr = 0
self.cpu_sys = 0
self.cpu_count = 0
self.mem_rss = 0
self.mem_rss_max = 0
self.mem_count = 0
def handleTPS(self, obj):
tps = obj["TPS"]
self.tps += tps
self.tps_max = max(self.tps_max, tps)
self.tps_min = min(self.tps_min, tps)
self.tps_count += 1
def handleCPU(self, obj):
# http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_times
# https://stackoverflow.com/questions/556405/what-do-real-user-and-sys-mean-in-the-output-of-time1
# http://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_percent
self.cpu_percent += obj["percent"]
times = json.loads(obj["times"])
self.cpu_usr = times["user"]
self.cpu_sys = times["system"]
self.cpu_count += 1
def handleMem(self, obj):
# http://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
info = json.loads(obj["info"])
rss = info["rss"]
self.mem_rss += rss
self.mem_rss_max = max(self.mem_rss_max, rss)
self.mem_count += 1
def report(self):
print("TPS",
"Avg", formatFloat(self.tps / self.tps_count),
"Min", formatFloat(self.tps_min),
"Max", formatFloat(self.tps_max))
print("CPU",
"Percent (Avg)", formatPercent(self.cpu_percent / self.cpu_count),
"Time (Usr)", str(self.cpu_usr) + "s",
"Time (Sys)", str(self.cpu_sys) + "s")
print("Mem",
"RSS (Max)", formatMem(self.mem_rss_max),
"RSS (Avg)", formatMem(self.mem_rss / self.mem_count))
def profileFile(path):
print(path)
profiler = Profiler()
with open(path) as f:
for line in f:
obj = json.loads(line)
if obj["lvl"] != "info":
continue
if obj["msg"] == "TPS Report":
profiler.handleTPS(obj)
elif obj["msg"] == "CPU Report":
profiler.handleCPU(obj)
elif obj["msg"] == "Mem Report":
profiler.handleMem(obj)
profiler.report()
# Example: python report_extractor.py --folder ../tmp_log/log-20180713-205431
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This script extracts reports from log files")
parser.add_argument("--folder", type=str, dest="folder",
default="",
help="the path to the log folder")
args = parser.parse_args()
for filename in os.listdir(args.folder):
if "leader" in filename:
profileFile(os.path.join(args.folder, filename))

@ -1,18 +0,0 @@
aws ec2 request-spot-instances \
--instance-count 1 \
--block-duration-minutes 60 \
--launch-specification "{ \
\"ImageId\": \"ami-f2d3638a\", \
\"InstanceType\": \"m3.medium\", \
\"SecurityGroups\": [ \
\"richard-spot-instance SSH\" \
], \
\"KeyName\": \"richard-spot-instance\", \
\"IamInstanceProfile\": { \
\"Name\": \"BenchMarkCodeDeployInstanceProfile\" \
}, \
\"UserData\": \"`base64 ../configs/userdata-commander.sh`\" \
}" \
#--dry-run # uncomment this line to send a real request.
# Note: on windows, you need to add `-w 0` to the base64 command"

@ -1,10 +0,0 @@
aws ec2 run-instances \
--count 1 \
--image-id "ami-f2d3638a" \
--instance-type "t2.micro" \
--key-name "richard-spot-instance" \
--security-groups "richard-spot-instance SSH" \
--iam-instance-profile "{ \
\"Name\": \"BenchMarkCodeDeployInstanceProfile\" \
}" \
--user-data "`base64 ../configs/userdata-commander.sh`"

@ -1,105 +0,0 @@
import argparse
import logging
import os
import random
import sys
import threading
import boto3
logging.basicConfig(level=logging.INFO, format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGGER = logging.getLogger(__file__)
LOGGER.setLevel(logging.INFO)
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
def read_configuration_file(filename):
config = {}
with open(filename, 'r') as f:
for myline in f:
mylist = myline.strip().split(',')
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
return config
def get_instance_ids(describe_instances_response):
instance_ids = []
if describe_instances_response["Reservations"]:
for reservation in describe_instances_response["Reservations"]:
instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId"))
return instance_ids
def get_instance_ids2(ec2_client, node_name_tag):
time.sleep(5)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
return get_instance_ids(ec2_client.describe_instances(Filters=filters))
def create_ec2_client(region_number, region_config):
config = read_configuration_file(region_config)
region_name = config[region_number][REGION_NAME]
session = boto3.Session(region_name=region_name)
return session.client('ec2'), session
def terminate_instances_by_region(region_number, region_config, node_name_tag):
ec2_client, _ = create_ec2_client(region_number, region_config)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
instance_ids = get_instance_ids(ec2_client.describe_instances(Filters=filters))
if instance_ids:
ec2_client.terminate_instances(InstanceIds=instance_ids)
LOGGER.info("waiting until instances with tag %s died." % node_name_tag)
waiter = ec2_client.get_waiter('instance_terminated')
waiter.wait(InstanceIds=instance_ids)
LOGGER.info("instances with node name tag %s terminated." % node_name_tag)
else:
pass
LOGGER.warn("there is no instances to terminate")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script helps you to collect public ips')
parser.add_argument('--instance_output', type=str, dest='instance_output',
default='instance_output.txt',
help='the file contains node_name_tag and region number of created instances.')
parser.add_argument('--node_name_tag', type=str, dest='node_name_tag')
parser.add_argument('--region_config', type=str,
dest='region_config', default='configuration.txt')
args = parser.parse_args()
if args.node_name_tag:
node_name_tag_items = args.node_name_tag.split(",")
region_number_items = [item[:1] for item in node_name_tag_items]
thread_pool = []
for i in range(len(region_number_items)):
region_number = region_number_items[i]
node_name_tag = node_name_tag_items[i]
t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag))
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")
elif args.instance_output:
with open(args.instance_output, "r") as fin:
thread_pool = []
for line in fin.readlines():
items = line.split(" ")
region_number = items[1].strip()
node_name_tag = items[0].strip()
t = threading.Thread(target=terminate_instances_by_region, args=(region_number, args.region_config, node_name_tag))
t.start()
thread_pool.append(t)
for t in thread_pool:
t.join()
LOGGER.info("done.")

@ -1,15 +0,0 @@
import boto3
import os
GOHOME ='/Users/alok/Documents/goworkspace/'
s3 = boto3.client('s3')
bucket_name = 'unique-bucket-bin'
#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
response = s3.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
print("Bucket List: %s" % buckets)
dirname = GOHOME + 'src/harmony-benchmark/bin/'
for myfile in os.listdir(dirname):
with open('distribution_config.txt','r') as f:
f = open(os.path.join(dirname,myfile))
response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile)
print(response)

@ -1,10 +0,0 @@
import boto3
import os
#from boto3.session import Session
s3 = boto3.client('s3')
bucket_name = 'unique-bucket-bin'
#response = s3.create_bucket(Bucket=bucket_name,ACL='public-read-write', CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
myfile = 'distribution_config.txt'
with open('distribution_config.txt','r') as f:
response = s3.put_object(ACL='public-read-write',Body=f.read(),Bucket=bucket_name,Key=myfile)
print(response)

@ -1,9 +0,0 @@
import boto3
s3 = boto3.client('s3')
bucket_name = 'first-try'
s3.create_bucket(Bucket=bucket_name,ACL='public-read-write')
response = s3.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
print("Bucket List: %s" % buckets)
filename='myfirst.txt'
s3.upload_file(filename, bucket_name, filename)

@ -1,11 +0,0 @@
#!/bin/bash -x
REGION=$(curl 169.254.169.254/latest/meta-data/placement/availability-zone/ | sed 's/[a-z]$//')
#yum update -y #This breaking codedeploy right now
yum install ruby wget -y
cd /home/ec2-user
touch yum-not-updated.txt
wget https://aws-codedeploy-$REGION.s3.amazonaws.com/latest/install
chmod +x ./install
./install auto
mkdir projects
mkdir projects/src

@ -1,31 +0,0 @@
#!/bin/bash
yum install ruby -y
cd /home/ec2-user/
curl http://unique-bucket-bin.s3.amazonaws.com/txgen -o txgen
curl http://unique-bucket-bin.s3.amazonaws.com/soldier -o soldier
curl http://unique-bucket-bin.s3.amazonaws.com/benchmark -o benchmark
chmod +x ./soldier
chmod +x ./txgen
chmod +x ./commander
chmod +x ./kill_node.sh
echo "* soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "session required pam_limits.so" | sudo tee -a /etc/pam.d/common-session
# Get My IP
ip=`curl http://169.254.169.254/latest/meta-data/public-ipv4`
NODE_PORT=9000
SOLDIER_PORT=1$NODE_PORT
# Kill existing soldier/node
fuser -k -n tcp $SOLDIER_PORT
fuser -k -n tcp $NODE_PORT
# Run soldier
./soldier -ip $ip -port $NODE_PORT > soldier_log 2>&1 &

@ -1,8 +0,0 @@
1,us-east-1,virginia-key-benchmark,virginia-security-group,virginia,ami-b70554c8,sg-04d0b62ee08ce8800
2,us-east-2,ohio-key-benchmark,ohio-security-group,ohio,ami-8c122be9,sg-0789078f1c76defbe
3,us-west-1,california-key-benchmark,california-security-group,california,ami-e0ba5c83,sg-0a66ccb6ab9161a14
4,us-west-2,oregon-key-benchmark,oregon-security-group,oregon,ami-a9d09ed1,sg-020cb5729fa212d43
5,ap-northeast-1,tokyo-key-benchmark,tokyo-security-group,tokyo,ami-e99f4896,sg-009aeb97f675c1ad5
6,ap-southeast-1,singapore-key-benchmark,singapore-security-group,singapore,ami-05868579,sg-05f9b60044a19dfb2
7,eu-central-1,frankfurt-key-benchmark,frankfurt-security-group,frankfurt,ami-7c4f7097,sg-0bb06fcd8b25b5910
8,eu-west-1,ireland-key-benchmark,ireland-security-group,ireland,ami-466768ac,sg-0aa8954acb79fdb58

@ -1,46 +0,0 @@
import utils
def get_launch_template_name(region_number):
return 'benchmark-' + utils.CONFIG[region_number][utils.REGION_NAME]
def create(ec2_client, region_number):
return ec2_client.create_launch_template(
# DryRun=True,
LaunchTemplateName=get_launch_template_name(region_number),
LaunchTemplateData={
'IamInstanceProfile': {
'Name': utils.IAM_INSTANCE_PROFILE
},
'ImageId': utils.CONFIG[region_number][utils.REGION_AMI],
# 'InstanceType': instance_type,
'KeyName': utils.CONFIG[region_number][utils.REGION_KEY],
'UserData': utils.USER_DATA_BASE64,
'SecurityGroupIds': [
utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID]
],
# 'InstanceInitiatedShutdownBehavior': 'stop',
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'LaunchTemplate',
'Value': 'Yes'
}
]
}
],
# 'InstanceMarketOptions': {
# 'MarketType': 'spot',
# 'SpotOptions': {
# 'MaxPrice': 'string',
# 'SpotInstanceType': 'one-time'|'persistent',
# 'BlockDurationMinutes': 123,
# 'InstanceInterruptionBehavior': 'hibernate'|'stop'|'terminate'
# }
# },
}
)

@ -1,9 +0,0 @@
import logging
logging.basicConfig(level=logging.INFO,
format='%(threadName)s %(asctime)s - %(name)s - %(levelname)s - %(message)s')
def getLogger(file):
LOGGER = logging.getLogger(file)
LOGGER.setLevel(logging.INFO)
return LOGGER

@ -1,119 +0,0 @@
import utils
import logger
import launch_template
LOGGER = logger.getLogger(__file__)
def create_launch_specification(region_number, instanceType):
return {
# Region irrelevant fields
'IamInstanceProfile': {
'Name': utils.IAM_INSTANCE_PROFILE
},
'InstanceType': instanceType,
'UserData': utils.USER_DATA_BASE64,
# Region relevant fields
'SecurityGroups': [
{
# In certain scenarios, we have to use group id instead of group name
# https://github.com/boto/boto/issues/350#issuecomment-27359492
'GroupId': utils.CONFIG[region_number][utils.REGION_SECURITY_GROUP_ID]
}
],
'ImageId': utils.CONFIG[region_number][utils.REGION_AMI],
'KeyName': utils.CONFIG[region_number][utils.REGION_KEY],
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': utils.get_node_name_tag(region_number)
}
]
}
],
# 'WeightedCapacity': 123.0,
# 'Placement': {
# # 'AvailabilityZone': get_one_availability_zone(ec2_client)
# }
}
def create_launch_specification_list(region_number, instance_type_list):
return list(map(lambda type: create_launch_specification(region_number, type), instance_type_list))
def get_launch_template(region_number, instance_type):
return {
'LaunchTemplateSpecification': {
'LaunchTemplateName': launch_template.get_launch_template_name(region_number),
'Version': '1'
},
'Overrides': [
{
'InstanceType': instance_type
}
]
}
def get_launch_template_list(region_number, instance_type_list):
return list(map(lambda type: get_launch_template(region_number, type), instance_type_list))
def request_spot_fleet(ec2_client, region_number, number_of_instances, instance_type_list):
LOGGER.info("Requesting spot fleet")
LOGGER.info("Creating node_name_tag: %s" %
utils.get_node_name_tag(region_number))
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet
response = ec2_client.request_spot_fleet(
# DryRun=True,
SpotFleetRequestConfig={
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy
'AllocationStrategy': 'diversified',
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole',
'LaunchSpecifications': create_launch_specification_list(region_number, instance_type_list),
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.
'TargetCapacity': number_of_instances,
'Type': 'maintain'
}
)
return response["SpotFleetRequestId"]
def request_spot_fleet_with_on_demand(ec2_client, region_number, number_of_instances, number_of_on_demand, instance_type_list):
LOGGER.info("Requesting spot fleet")
LOGGER.info("Creating node_name_tag: %s" %
utils.get_node_name_tag(region_number))
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.request_spot_fleet
response = ec2_client.request_spot_fleet(
# DryRun=True,
SpotFleetRequestConfig={
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html#spot-fleet-allocation-strategy
'AllocationStrategy': 'diversified',
'IamFleetRole': 'arn:aws:iam::656503231766:role/RichardFleetRole',
'LaunchTemplateConfigs': get_launch_template_list(region_number, instance_type_list),
# 'SpotPrice': 'string', # The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.
'TargetCapacity': number_of_instances,
'OnDemandTargetCapacity': number_of_on_demand,
'Type': 'maintain'
}
)
return response
def get_instance_ids(client, request_id):
res = client.describe_spot_fleet_instances(
SpotFleetRequestId=request_id
)
return [ inst["InstanceId"] for inst in res["ActiveInstances"] ]
def run_one_region(region_number, number_of_instances, fout, fout2):
client = utils.create_client(utils.CONFIG, region_number)
instance_type_list = ['t2.micro', 't2.small', 'm3.medium']
# node_name_tag = request_spot_fleet_with_on_demand(
# client, region_number, int(number_of_instances), 1, instance_type_list)
request_id = request_spot_fleet(
client, region_number, int(number_of_instances), instance_type_list)
instance_ids = get_instance_ids(client, request_id)
print(instance_ids) # TODO@ricl, no data here since the request is not fulfilled.

@ -1,210 +0,0 @@
import boto3
import datetime
import json
import sys
import time
import base64
import threading
MAX_INTANCES_FOR_WAITER = 100
MAX_INSTANCES_FOR_DEPLOYMENT = 500
REGION_NAME = 'region_name'
REGION_KEY = 'region_key'
REGION_SECURITY_GROUP = 'region_security_group'
REGION_SECURITY_GROUP_ID = 'region_security_group_id'
REGION_HUMAN_NAME = 'region_human_name'
INSTANCE_TYPE = 't2.micro'
REGION_AMI = 'region_ami'
IAM_INSTANCE_PROFILE = 'BenchMarkCodeDeployInstanceProfile'
time_stamp = time.time()
CURRENT_SESSION = datetime.datetime.fromtimestamp(
time_stamp).strftime('%H-%M-%S-%Y-%m-%d')
NODE_NAME_SUFFIX = "NODE-" + CURRENT_SESSION
def get_node_name_tag(region_number):
return region_number + "-" + NODE_NAME_SUFFIX
def get_node_name_tag2(region_number, tag):
return region_number + "-" + NODE_NAME_SUFFIX + "-" + str(tag)
with open("userdata-soldier.sh", "r") as userdata_file:
USER_DATA = userdata_file.read()
# UserData must be base64 encoded for spot instances.
USER_DATA_BASE64 = base64.b64encode(USER_DATA)
def create_client(config, region_number):
region_name = config[region_number][REGION_NAME]
# Create session.
session = boto3.Session(region_name=region_name)
# Create a client.
return session.client('ec2')
def read_region_config(region_config='configuration.txt'):
global CONFIG
config = {}
with open(region_config, 'r') as f:
for myline in f:
mylist = [item.strip() for item in myline.strip().split(',')]
region_num = mylist[0]
config[region_num] = {}
config[region_num][REGION_NAME] = mylist[1]
config[region_num][REGION_KEY] = mylist[2]
config[region_num][REGION_SECURITY_GROUP] = mylist[3]
config[region_num][REGION_HUMAN_NAME] = mylist[4]
config[region_num][REGION_AMI] = mylist[5]
config[region_num][REGION_SECURITY_GROUP_ID] = mylist[6]
CONFIG = config
return config
def get_ip_list(response):
if response.get('Instances', None):
return [instance.get('PublicIpAddress', None) for instance in response['Instances']]
else:
return []
def create_ec2_client(region_number, region_config):
config = read_region_config(region_config)
region_name = config[region_number][REGION_NAME]
session = boto3.Session(region_name=region_name)
return session.client('ec2'), session
def collect_public_ips_from_ec2_client(ec2_client, node_name_tag):
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
response = ec2_client.describe_instances(Filters=filters)
ip_list = []
if response.get('Reservations'):
for reservation in response[u'Reservations']:
ip_list.extend(instance['PublicIpAddress'] for instance in reservation['Instances'] if instance.get('PublicIpAddress'))
return ip_list
def collect_public_ips(region_number, node_name_tag, region_config):
ec2_client, _ = create_ec2_client(region_number, region_config)
ip_list = collect_public_ips_from_ec2_client(ec2_client, node_name_tag)
return ip_list
def get_application(codedeploy, application_name):
response = codedeploy.list_applications()
if application_name in response['applications']:
return response
else:
response = codedeploy.create_application(
applicationName=application_name,
computePlatform='Server'
)
return response
def create_deployment_group(codedeploy, region_number,application_name, deployment_group_name, node_name_tag):
response = codedeploy.list_deployment_groups(applicationName=application_name)
if response.get('deploymentGroups') and (deployment_group_name in response['deploymentGroups']):
return None
else:
response = codedeploy.create_deployment_group(
applicationName=application_name,
deploymentGroupName=deployment_group_name,
deploymentConfigName='CodeDeployDefault.AllAtOnce',
serviceRoleArn='arn:aws:iam::656503231766:role/BenchMarkCodeDeployServiceRole',
deploymentStyle={
'deploymentType': 'IN_PLACE',
'deploymentOption': 'WITHOUT_TRAFFIC_CONTROL'
},
ec2TagFilters = [
{
'Key': 'Name',
'Value': node_name_tag,
'Type': 'KEY_AND_VALUE'
}
]
)
return response['deploymentGroupId']
def generate_distribution_config2(region_number, node_name_tag, region_config,
shard_number, client_number, distribution_config):
ip_list = collect_public_ips(region_number, node_name_tag, region_config)
generate_distribution_config(shard_number, client_number, ip_list, distribution_config)
def generate_distribution_config3(shard_number, client_number, ip_list_file, distribution_config):
with open(ip_list_file, "r") as fin:
lines = fin.readlines()
ip_list = [line.strip() for line in lines]
generate_distribution_config(shard_number, client_number, ip_list, distribution_config)
def generate_distribution_config(shard_number, client_number, ip_list, distribution_config):
if len(ip_list) < shard_number * 2 + client_number + 1:
print("Not enough nodes to generate a config file")
return False
# Create ip for clients.
commander_id, client_id, leader_id, validator_id = 0, 0, 0, 0
validator_number = len(ip_list) - client_number - shard_number - 1
with open(distribution_config, "w") as fout:
for i in range(len(ip_list)):
ip, node_name_tag = ip_list[i].split(" ")
if commander_id < 1:
fout.write("%s 9000 commander %d %s\n" % (ip, commander_id, node_name_tag))
commander_id = commander_id + 1
elif leader_id < shard_number:
fout.write("%s 9000 leader %d %s\n" % (ip, leader_id, node_name_tag))
leader_id = leader_id + 1
elif validator_id < validator_number:
fout.write("%s 9000 validator %d %s\n" % (ip, validator_id % shard_number, node_name_tag))
validator_id = validator_id + 1
else:
fout.write("%s 9000 client %d %s\n" % (ip, client_id % shard_number, node_name_tag))
client_id = client_id + 1
def get_availability_zones(ec2_client):
response = ec2_client.describe_availability_zones()
all_zones = []
if response.get('AvailabilityZones', None):
all_zones = [info['ZoneName'] for info in response.get('AvailabilityZones') if info['State'] == 'available']
return all_zones
def get_one_availability_zone(ec2_client):
time.sleep(1)
all_zones = get_availability_zones(ec2_client)
if len(all_zones) > 0:
return all_zones[0]
else:
return None
def get_instance_ids2(ec2_client, node_name_tag):
time.sleep(5)
filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
return get_instance_ids(ec2_client.describe_instances(Filters=filters))
# Get instance_ids from describe_instances_response.
def get_instance_ids(describe_instances_response):
instance_ids = []
if describe_instances_response["Reservations"]:
for reservation in describe_instances_response["Reservations"]:
instance_ids.extend(instance["InstanceId"] for instance in reservation["Instances"] if instance.get("InstanceId"))
return instance_ids
WAITER_LOCK = threading.Lock()
def run_waiter_100_instances_for_status(ec2_client, status, instance_ids):
time.sleep(10)
WAITER_LOCK.acquire()
waiter = ec2_client.get_waiter('instance_running')
WAITER_LOCK.release()
waiter.wait(InstanceIds=instance_ids)
def run_waiter_for_status(ec2_client, status, instance_ids):
thread_pool = []
i = 0
while i < len(instance_ids):
j = i + min(len(instance_ids), i + MAX_INTANCES_FOR_WAITER)
t = threading.Thread(target=run_waiter_100_instances_for_status, args=(
ec2_client, status, instance_ids[i:j]))
t.start()
thread_pool.append(t)
i = i + MAX_INTANCES_FOR_WAITER
for t in thread_pool:
t.join()
# used for testing only.
# if __name__ == "__main__":
# ip_list = collect_public_ips('4', "4-NODE-23-36-01-2018-07-05", "configuration.txt")
# print ip_list
# generate_distribution_config(2, 1, ip_list, "config_test.txt")

@ -1,31 +0,0 @@
import unittest
from utils import generate_distribution_config
class TestCreateAndDeploy(unittest.TestCase):
def test_generate_config_file(self):
ips = ["102.000.000.1", "102.000.000.2", "102.000.000.3", "102.000.000.4", "102.000.000.5", "102.000.000.6"]
generate_distribution_config(2, 2, ips, "config_test.txt")
with open("config_test.txt", "r") as fin:
lines = fin.readlines()
collection = {}
collection['ip'] = []
collection['client'] = {}
leader_count, validator_count, client_count = 0, 0, 0
for line in lines:
strs = line.split(" ")
assert(not strs[0] in collection['ip'])
collection['ip'].append(strs[0])
if strs[2] == "client":
client_count = client_count + 1
elif strs[2] == "leader":
leader_count = leader_count + 1
elif strs[2] == "validator":
validator_count = validator_count + 1
assert(validator_count == 2)
assert(leader_count == 2)
assert(client_count == 2)
if __name__ == '__main__':
unittest.main()

@ -1,5 +0,0 @@
for pid in `/bin/ps -fu $USER| grep "slave.go\|slave -port\|leader\|benchmark_node" | grep -v "grep" | awk '{print $2}'`;
do
echo 'Killed process: '$pid
kill -9 $pid
done

@ -1,34 +0,0 @@
import json
def get_public_ip(all_reservations):
all_public_ip_addresses = []
for individual_instances in all_reservations:
instance_information = individual_instances['Instances'][0]
if "running" != instance_information["State"]["Name"]:
continue
all_public_ip_addresses.append(instance_information['PublicIpAddress'])
return all_public_ip_addresses
def make_peers_list(all_reservations,port="9001",filename="config.txt"):
p = get_public_ip(all_reservations)
f = open(filename,"w")
for i in range(len(p)):
if i == 0:
f.write(p[i] + " " + port + " " + "leader"+"\n")
else:
f.write(p[i] + " " + port + " " + "validator"+"\n")
f.close()
def is_it_running(f):
pass
if __name__ == "__main__":
json_data=open("aws.json").read()
f = json.loads(json_data)
all_reservations = f['Reservations']
make_peers_list(all_reservations)

@ -1,26 +0,0 @@
import requests
amazon_ipv4_url = "http://169.254.169.254/latest/meta-data/public-ipv4"
def get_my_ip():
return current_ip = requests.get(amazon_ipv4_url).text
if __name__ == "__main__":
current_ip = requests.get(amazon_ipv4_url).text
f = open("global_nodes.txt","r")
peerList = []
for myline in f:
mylist = myline.split(" ")
ip = mylist[0]
node = mylist[2]
if str(ip) != str(current_ip):
if node != "transaction":
peerList.append(myline)
else:
if node == "transaction":
h = open("isTransaction.txt","w")
h.write("I am just a transaction generator node")
h.close()
f.close()
g = open("global_peerlist.txt","w")
for myline in peerList:
g.write(myline)
g.close()

@ -1,34 +0,0 @@
#!/bin/bash -x
echo "Run Instances starts" >> tmplog
echo "Update systcl" >> tmplog
sudo sysctl net.core.somaxconn=1024
sudo sysctl net.core.netdev_max_backlog=65536;
sudo sysctl net.ipv4.tcp_tw_reuse=1;
sudo sysctl -w net.ipv4.tcp_rmem='65536 873800 1534217728';
sudo sysctl -w net.ipv4.tcp_wmem='65536 873800 1534217728';
sudo sysctl -w net.ipv4.tcp_mem='65536 873800 1534217728';
echo "Setup path" >> tmplog
./kill_node.sh
MyHOME=/home/ec2-user
source ~/.bash_profile
export GOROOT=/usr/lib/golang
export GOPATH=$MyHOME/projects
export PATH=$PATH:$GOROOT/bin
echo "Get ip" >> tmplog
# Get my IP
wget http://169.254.169.254/latest/meta-data/public-ipv4
ip=$(head -n 1 public-ipv4)
echo "Current IP is >>>"
echo $ip
echo ">>>>"
echo "Run soldier" >> tmplog
# Run soldier
cd $GOPATH/src/harmony-benchmark/bin/
node_port=9000
./soldier -ip $ip -port $node_port > soldier_log 2>&1 &
echo "Run Instances done" >> tmplog

@ -1 +0,0 @@
echo "Bye" >> tmplog

@ -1 +0,0 @@
echo "Hello" >> tmplog

@ -1,43 +0,0 @@
#!/bin/bash -x
echo "Setup Golang" >> tmplog
#sudo yum update -y
sudo yum install -y golang
sudo yum install -y git
MyHOME=/home/ec2-user
echo "now setting up go-lang paths"
# GOROOT is the location where Go package is installed on your system
echo "export GOROOT=/usr/lib/golang" >> $MyHOME/.bash_profile
# GOPATH is the location of your work directory
echo "export GOPATH=$MyHOME/projects" >> $MyHOME/.bash_profile
# PATH in order to access go binary system wide
echo "export PATH=$PATH:$GOROOT/bin" >> $MyHOME/.bash_profile
export GOROOT=/usr/lib/golang
export GOPATH=$MyHOME/projects
export PATH=$PATH:$GOROOT/bin
source $MyHOME/.bash_profile
cd $GOPATH/src/harmony-benchmark
touch 'yum_not_updated.txt'
# go get dependencies
go get ./...
curl --silent http://169.254.169.254/latest/meta-data/public-ipv4 >> bin/myip.txt
# build executables
go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go
go build -o bin/commander aws-experiment-launch/experiment/commander/main.go
go build -o bin/benchmark benchmark.go
go build -o bin/txgen client/txgen/main.go
# Setup ulimit
echo "* soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "* soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "* hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nproc 65535" | sudo tee -a /etc/security/limits.conf
echo "root soft nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "root hard nofile 65535" | sudo tee -a /etc/security/limits.conf
echo "session required pam_limits.so" | sudo tee -a /etc/pam.d/common-session

@ -1 +0,0 @@
aws ec2 run-instances --image-id ami-e251209a --count 1 --instance-type t2.nano --key-name main --security-group-ids sg-066a8b0ec187c7247

@ -1,177 +0,0 @@
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute.models import DiskCreateOption
SUBSCRIPTION_ID = '8f969b5c-f8cb-4483-8252-354a929962e0'
GROUP_NAME = 'myResourceGroup'
LOCATION = 'westus'
VM_NAME = 'myomyVM'
def get_credentials():
credentials = ServicePrincipalCredentials(
client_id = '3b75dccc-f500-4195-99df-8da994541d03',
secret = 'Nj44R21IECrg8Vp/+3MBsXcmQrHcl0SEIpLjPIeOYc4=',
tenant = '6d22d644-2eec-4dac-9715-7147563a9fe5'
)
return credentials
def create_resource_group(resource_group_client):
resource_group_params = { 'location':LOCATION }
resource_group_result = resource_group_client.resource_groups.create_or_update(
GROUP_NAME,
resource_group_params
)
def create_availability_set(compute_client):
avset_params = {
'location': LOCATION,
'sku': { 'name': 'Aligned' },
'platform_fault_domain_count': 3
}
availability_set_result = compute_client.availability_sets.create_or_update(
GROUP_NAME,
'myAVSet',
avset_params
)
def create_public_ip_address(network_client):
public_ip_addess_params = {
'location': LOCATION,
'public_ip_allocation_method': 'Dynamic'
}
creation_result = network_client.public_ip_addresses.create_or_update(
GROUP_NAME,
'myIPAddress',
public_ip_addess_params
)
return creation_result.result()
def create_vnet(network_client):
vnet_params = {
'location': LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
creation_result = network_client.virtual_networks.create_or_update(
GROUP_NAME,
'myVNet',
vnet_params
)
return creation_result.result()
def create_subnet(network_client):
subnet_params = {
'address_prefix': '10.0.0.0/24'
}
creation_result = network_client.subnets.create_or_update(
GROUP_NAME,
'myVNet',
'mySubnet',
subnet_params
)
return creation_result.result()
def create_nic(network_client):
subnet_info = network_client.subnets.get(
GROUP_NAME,
'myVNet',
'mySubnet'
)
publicIPAddress = network_client.public_ip_addresses.get(
GROUP_NAME,
'myIPAddress'
)
nic_params = {
'location': LOCATION,
'ip_configurations': [{
'name': 'myIPConfig',
'public_ip_address': publicIPAddress,
'subnet': {
'id': subnet_info.id
}
}]
}
creation_result = network_client.network_interfaces.create_or_update(
GROUP_NAME,
'myNic',
nic_params
)
return creation_result.result()
def create_vm(network_client, compute_client):
nic = network_client.network_interfaces.get(
GROUP_NAME,
'myNic'
)
avset = compute_client.availability_sets.get(
GROUP_NAME,
'myAVSet'
)
vm_parameters = {
'location': LOCATION,
'os_profile': {
'computer_name': VM_NAME,
'admin_username': 'azureuser',
'admin_password': 'Azure12345678'
},
'hardware_profile': {
'vm_size': 'Standard_DS1'
},
'storage_profile': {
'image_reference': {
'publisher': 'MicrosoftWindowsServer',
'offer': 'WindowsServer',
'sku': '2012-R2-Datacenter',
'version': 'latest'
}
},
'network_profile': {
'network_interfaces': [{
'id': nic.id
}]
},
'availability_set': {
'id': avset.id
}
}
creation_result = compute_client.virtual_machines.create_or_update(
GROUP_NAME,
VM_NAME,
vm_parameters
)
return creation_result.result()
if __name__ == '__main__':
credentials = get_credentials()
resource_group_client = ResourceManagementClient(
credentials,
SUBSCRIPTION_ID
)
network_client = NetworkManagementClient(
credentials,
SUBSCRIPTION_ID
)
compute_client = ComputeManagementClient(
credentials,
SUBSCRIPTION_ID
)
create_resource_group(resource_group_client)
print('Resource group created....')
create_availability_set(compute_client)
print('Availability set created')
creation_result = create_public_ip_address(network_client)
print('Public IP created')
creation_result = create_vnet(network_client)
print('Virtual Net Created')
creation_result = create_subnet(network_client)
print('Subnet created')
creation_result = create_nic(network_client)
print('NIC Created')
creation_result = create_vm(network_client, compute_client)
print("------------------------------------------------------")
print("VM Created")
print(creation_result)

@ -5,7 +5,6 @@ import (
"fmt"
"math/rand"
"os"
"strconv"
"time"
"github.com/simple-rules/harmony-benchmark/attack"
@ -13,6 +12,7 @@ import (
"github.com/simple-rules/harmony-benchmark/db"
"github.com/simple-rules/harmony-benchmark/log"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/profiler"
"github.com/simple-rules/harmony-benchmark/utils"
)
@ -32,13 +32,6 @@ func attackDetermination(attackedMode int) bool {
return false
}
func startProfiler(shardID string, logFolder string) {
err := utils.RunCmd("./bin/profiler", "-pid", strconv.Itoa(os.Getpid()), "-shard_id", shardID, "-log_folder", logFolder)
if err != nil {
log.Error("Failed to start profiler")
}
}
func InitLDBDatabase(ip string, port string) (*db.LDBDatabase, error) {
// TODO(minhdoan): Refactor this.
dbFileName := "/tmp/harmony_" + ip + port + ".dat"
@ -56,6 +49,7 @@ func main() {
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
attackedMode := flag.Int("attacked_mode", 0, "0 means not attacked, 1 means attacked, 2 means being open to be selected as attacked")
dbSupported := flag.Bool("db_supported", false, "false means not db_supported, true means db_supported")
profile := flag.Bool("profile", false, "Turn on profiling (CPU, Memory).")
flag.Parse()
// Set up randomization seed.
@ -95,9 +89,10 @@ func main() {
// Consensus object.
consensus := consensus.NewConsensus(*ip, *port, shardID, peers, leader)
// Start Profiler for leader
if role == "leader" {
startProfiler(shardID, *logFolder)
// Start Profiler for leader if profile argument is on
if *profile && role == "leader" {
profiler := profiler.NewProfiler(consensus.Log, os.Getpid(), shardID)
profiler.Start()
}
// Set logger to attack model.

@ -23,11 +23,21 @@ type Block struct {
ShardId uint32
Hash [32]byte
MerkleRootData []byte
State *State // If present, this block is state block
// Signature...
Bitmap []byte // Contains which validator signed the block.
Signature [66]byte // Schnorr collective signature
}
type State struct {
NumBlocks int32 // Total number of blocks
NumTransactions int32 // Total number of transactions
}
func (b *Block) IsStateBlock() bool {
return b.State != nil && bytes.Equal(b.PrevBlockHash[:], (&[32]byte{})[:]) // TODO: think of a better indicator to check
}
// Serialize serializes the block
func (b *Block) Serialize() []byte {
var result bytes.Buffer
@ -75,6 +85,10 @@ func (b *Block) Write(db db.Database, key string) error {
return db.Put([]byte(key), b.Serialize())
}
func Delete(db db.Database, key string) error {
return db.Delete([]byte(key))
}
// CalculateBlockHash returns a hash of the block
func (b *Block) CalculateBlockHash() []byte {
var hashes [][]byte
@ -112,3 +126,32 @@ func NewBlock(transactions []*Transaction, prevBlockHash [32]byte, shardId uint3
func NewGenesisBlock(coinbase *Transaction, shardId uint32) *Block {
return NewBlock([]*Transaction{coinbase}, [32]byte{}, shardId)
}
// NewStateBlock creates and returns a state Block based on utxo pool.
// TODO(RJ): take care of dangling cross shard transaction
func NewStateBlock(utxoPool *UTXOPool, numBlocks, numTxs int32) *Block {
stateTransactions := []*Transaction{}
stateTransactionIds := [][32]byte{}
for address, txHash2Vout2AmountMap := range utxoPool.UtxoMap {
stateTransaction := Transaction{}
for txHash, vout2AmountMap := range txHash2Vout2AmountMap {
for index, amount := range vout2AmountMap {
txHashBytes, err := utils.Get32BytesFromString(txHash)
if err == nil {
stateTransaction.TxInput = append(stateTransaction.TxInput, *NewTXInput(NewOutPoint(&txHashBytes, index), address, utxoPool.ShardID))
stateTransaction.TxOutput = append(stateTransaction.TxOutput, TXOutput{Amount: amount, Address: address, ShardID: utxoPool.ShardID})
} else {
return nil
}
}
}
if len(stateTransaction.TxOutput) != 0 {
stateTransaction.SetID()
stateTransactionIds = append(stateTransactionIds, stateTransaction.ID)
stateTransactions = append(stateTransactions, &stateTransaction)
}
}
newBlock := NewBlock(stateTransactions, [32]byte{}, utxoPool.ShardID)
newBlock.State = &State{NumBlocks: numBlocks, NumTransactions: numTxs}
return newBlock
}

@ -195,3 +195,19 @@ func CreateBlockchain(address [20]byte, shardId uint32) *Blockchain {
return &bc
}
// Create state block based on the utxos.
func (bc *Blockchain) CreateStateBlock(utxoPool *UTXOPool) *Block {
var numBlocks int32 = 0
var numTxs int32 = 0
for _, block := range bc.Blocks {
if block.IsStateBlock() {
numBlocks += block.State.NumBlocks
numTxs += block.State.NumTransactions
} else {
numBlocks += 1
numTxs += block.NumTransactions
}
}
return NewStateBlock(utxoPool, numBlocks, numTxs)
}

@ -3,6 +3,7 @@ package blockchain
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"encoding/gob"
"encoding/hex"
"fmt"
@ -32,7 +33,7 @@ type Transaction struct {
// TXOutput is the struct of transaction output in a transaction.
type TXOutput struct {
Amount int
Amount int // TODO: Switch to big int or uint32
Address [20]byte // last 20 bytes of the hash of public key
ShardID uint32 // The Id of the shard where this UTXO belongs
}
@ -109,13 +110,7 @@ func (tx *Transaction) SetID() {
}
func (tx *Transaction) Sign(priKey kyber.Scalar) error {
var encoded bytes.Buffer
enc := gob.NewEncoder(&encoded)
err := enc.Encode(tx)
if err != nil {
log.Panic(err)
}
signature, err := schnorr.Sign(crypto.Ed25519Curve, priKey, encoded.Bytes())
signature, err := schnorr.Sign(crypto.Ed25519Curve, priKey, tx.GetContentToVerify())
if err != nil {
log.Panic(err)
}
@ -124,17 +119,23 @@ func (tx *Transaction) Sign(priKey kyber.Scalar) error {
return err
}
func (tx *Transaction) IsCrossShard() bool {
shardIds := make(map[uint32]bool)
for _, value := range tx.TxInput {
shardIds[value.ShardID] = true
}
for _, value := range tx.TxOutput {
shardIds[value.ShardID] = true
}
return len(shardIds) > 1
}
func (tx *Transaction) GetContentToVerify() []byte {
tempTx := *tx
tempTx.Signature = [64]byte{}
tempTx.Proofs = []CrossShardTxProof{}
var encoded bytes.Buffer
enc := gob.NewEncoder(&encoded)
err := enc.Encode(tempTx)
if err != nil {
log.Panic(err)
}
return encoded.Bytes()
return tempTx.Serialize()
}
// NewCoinbaseTX creates a new coinbase transaction
@ -156,7 +157,7 @@ func (txInput *TXInput) String() string {
res := fmt.Sprintf("TxID: %v, ", hex.EncodeToString(txInput.PreviousOutPoint.TxID[:]))
res += fmt.Sprintf("TxOutputIndex: %v, ", txInput.PreviousOutPoint.Index)
res += fmt.Sprintf("Address: %v, ", txInput.Address)
res += fmt.Sprintf("Shard Id: %v", txInput.ShardID)
res += fmt.Sprintf("ShardId: %v", txInput.ShardID)
return res
}
@ -164,12 +165,19 @@ func (txInput *TXInput) String() string {
func (txOutput *TXOutput) String() string {
res := fmt.Sprintf("Amount: %v, ", txOutput.Amount)
res += fmt.Sprintf("Address: %v", txOutput.Address)
res += fmt.Sprintf("ShardId: %v", txOutput.ShardID)
return res
}
// Used for debuging.
func (proof *CrossShardTxProof) String() string {
res := fmt.Sprintf("Accept: %v, ", proof.Accept)
res += fmt.Sprintf("TxId: %v, ", hex.EncodeToString(proof.TxID[:]))
res += fmt.Sprintf("BlockHash: %v, ", hex.EncodeToString(proof.BlockHash[:]))
res += fmt.Sprintf("TxInput:\n")
for id, value := range proof.TxInput {
res += fmt.Sprintf("%v: %v\n", id, value.String())
}
return res
}
@ -185,7 +193,71 @@ func (tx *Transaction) String() string {
res += fmt.Sprintf("%v: %v\n", id, value.String())
}
for id, value := range tx.Proofs {
res += fmt.Sprintf("Proof:\n")
res += fmt.Sprintf("%v: %v\n", id, value.String())
}
res += fmt.Sprintf("PublicKey: %v\n", hex.EncodeToString(tx.PublicKey[:]))
res += fmt.Sprintf("Sig: %v\n", hex.EncodeToString(tx.Signature[:]))
return res
}
func (tx *Transaction) Serialize() []byte {
buffer := bytes.NewBuffer([]byte{})
buffer.Write(tx.ID[:])
for _, value := range tx.TxInput {
buffer.Write(value.Serialize())
}
for _, value := range tx.TxOutput {
buffer.Write(value.Serialize())
}
for _, value := range tx.Proofs {
buffer.Write(value.Serialize())
}
buffer.Write(tx.PublicKey[:])
buffer.Write(tx.Signature[:])
return buffer.Bytes()
}
func (txInput *TXInput) Serialize() []byte {
buffer := bytes.NewBuffer([]byte{})
buffer.Write(txInput.Address[:])
fourBytes := make([]byte, 4)
binary.BigEndian.PutUint32(fourBytes, txInput.ShardID)
buffer.Write(fourBytes)
binary.BigEndian.PutUint32(fourBytes, txInput.PreviousOutPoint.Index)
buffer.Write(fourBytes)
buffer.Write(txInput.PreviousOutPoint.TxID[:])
return buffer.Bytes()
}
func (txOutput *TXOutput) Serialize() []byte {
buffer := bytes.NewBuffer([]byte{})
buffer.Write(txOutput.Address[:])
fourBytes := make([]byte, 4)
binary.BigEndian.PutUint32(fourBytes, txOutput.ShardID)
buffer.Write(fourBytes)
binary.BigEndian.PutUint32(fourBytes, uint32(txOutput.Amount)) // TODO(RJ): make amount a bigInt
buffer.Write(fourBytes)
return buffer.Bytes()
}
func (crossProof *CrossShardTxProof) Serialize() []byte {
buffer := bytes.NewBuffer([]byte{})
buffer.Write(crossProof.TxID[:])
buffer.Write(crossProof.BlockHash[:])
for _, value := range crossProof.TxInput {
buffer.Write(value.Serialize())
}
if crossProof.Accept {
buffer.WriteByte(byte(1))
} else {
buffer.WriteByte(byte(0))
}
return buffer.Bytes()
}

@ -38,6 +38,28 @@ type UTXOPool struct {
mutex sync.Mutex
}
// Merges the utxoMap into that of the UtxoPool
func (utxoPool *UTXOPool) MergeUtxoMap(utxoMap UtxoMap) {
for address, txHash2Vout2AmountMap := range utxoMap {
clientTxHashMap, ok := utxoPool.UtxoMap[address]
if ok {
for txHash, vout2AmountMap := range txHash2Vout2AmountMap {
clientVout2AmountMap, ok := clientTxHashMap[txHash]
if ok {
for vout, amount := range vout2AmountMap {
clientVout2AmountMap[vout] = amount
}
} else {
clientTxHashMap[txHash] = vout2AmountMap
}
}
} else {
utxoPool.UtxoMap[address] = txHash2Vout2AmountMap
}
}
}
// Gets the Utxo map for specific addresses
func (utxoPool *UTXOPool) GetUtxoMapByAddresses(addresses [][20]byte) UtxoMap {
result := make(UtxoMap)
@ -63,6 +85,34 @@ func (utxoPool *UTXOPool) VerifyTransactions(transactions []*Transaction) bool {
return true
}
// VerifyStateBlock verifies if the given state block matches the current utxo pool.
func (utxoPool *UTXOPool) VerifyStateBlock(stateBlock *Block) bool {
accountBalanceInUtxoPool := make(map[[20]byte]int)
for address, txHash2Vout2AmountMap := range utxoPool.UtxoMap {
for _, vout2AmountMap := range txHash2Vout2AmountMap {
for _, amount := range vout2AmountMap {
accountBalanceInUtxoPool[address] = accountBalanceInUtxoPool[address] + amount
}
}
}
for _, transaction := range stateBlock.Transactions {
for _, txOutput := range transaction.TxOutput {
if txOutput.ShardID != utxoPool.ShardID {
return false
}
accountBalanceInUtxoPool[txOutput.Address] = accountBalanceInUtxoPool[txOutput.Address] - txOutput.Amount
}
}
for _, amount := range accountBalanceInUtxoPool {
if amount != 0 {
return false
}
}
return true
}
// VerifyOneTransaction verifies if a list of transactions valid.
func (utxoPool *UTXOPool) VerifyOneTransaction(tx *Transaction, spentTXOs *map[[20]byte]map[string]map[uint32]bool) (valid, crossShard bool) {
if len(tx.Proofs) != 0 {
@ -113,9 +163,14 @@ func (utxoPool *UTXOPool) VerifyOneTransaction(tx *Transaction, spentTXOs *map[[
// Calculate the sum of TxOutput
for _, out := range tx.TxOutput {
outTotal += out.Amount
if out.ShardID != utxoPool.ShardID {
crossShard = true
}
}
if (crossShard && inTotal >= outTotal) || (!crossShard && inTotal != outTotal) {
// TODO: improve this checking logic
if (crossShard && inTotal > outTotal) || (!crossShard && inTotal != outTotal) {
return false, crossShard
}
@ -185,10 +240,16 @@ func (utxoPool *UTXOPool) UpdateOneTransaction(tx *Transaction) {
break
}
}
for _, out := range tx.TxOutput {
if out.ShardID != utxoPool.ShardID {
isCrossShard = true
break
}
}
isValidCrossShard := true
if isCrossShard {
// Check whether for this shard this cross transaction is valid or not.
// Check whether for this cross shard transaction is valid or not.
for _, in := range tx.TxInput {
// Only check the input for my own shard.
if in.ShardID != utxoPool.ShardID {

@ -28,7 +28,7 @@ import (
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
"github.com/simple-rules/harmony-benchmark/client/btctxiter"
"github.com/simple-rules/harmony-benchmark/configr"
client_config "github.com/simple-rules/harmony-benchmark/client/config"
"github.com/simple-rules/harmony-benchmark/consensus"
"github.com/simple-rules/harmony-benchmark/log"
"github.com/simple-rules/harmony-benchmark/node"
@ -176,9 +176,9 @@ func main() {
flag.Parse()
// Read the configs
configr := configr.NewConfigr()
configr.ReadConfigFile(*configFile)
leaders, shardIDs := configr.GetLeadersAndShardIds()
config := client_config.NewConfig()
config.ReadConfigFile(*configFile)
leaders, shardIDs := config.GetLeadersAndShardIds()
// Do cross shard tx if there are more than one shard
setting.crossShard = len(shardIDs) > 1
@ -204,7 +204,7 @@ func main() {
}
// Client/txgenerator server node setup
clientPort := configr.GetClientPort()
clientPort := config.GetClientPort()
consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{})
clientNode := node.New(consensusObj, nil)
@ -254,6 +254,6 @@ func main() {
// Send a stop message to stop the nodes at the end
msg := proto_node.ConstructStopMessage()
peers := append(configr.GetValidators(), leaders...)
peers := append(config.GetValidators(), leaders...)
p2p.BroadcastMessage(peers, msg)
}

@ -3,7 +3,6 @@ package client
import (
"bytes"
"encoding/gob"
"fmt"
"github.com/simple-rules/harmony-benchmark/proto/node"
"sync"
@ -17,26 +16,11 @@ import (
type Client struct {
PendingCrossTxs map[[32]byte]*blockchain.Transaction // Map of TxId to pending cross shard txs. Pending means the proof-of-accept/rejects are not complete
PendingCrossTxsMutex sync.Mutex // Mutex for the pending txs list
leaders *[]p2p.Peer // All the leaders for each shard
Leaders *[]p2p.Peer // All the leaders for each shard
UpdateBlocks func([]*blockchain.Block) // Closure function used to sync new block with the leader. Once the leader finishes the consensus on a new block, it will send it to the clients. Clients use this method to update their blockchain
UtxoMap blockchain.UtxoMap
ShardResponseTracker map[uint32]bool // A map containing the shard id of responded shard.
log log.Logger // Log utility
}
func (client *Client) PrintUtxoBalance() {
for address, txHash2Vout2AmountMap := range client.UtxoMap {
balance := 0
for _, vout2AmountMap := range txHash2Vout2AmountMap {
for _, amount := range vout2AmountMap {
balance += amount
}
}
fmt.Printf("Address: {%x}\n", address)
fmt.Printf("Balance: %d\n", balance)
}
ShardUtxoMap map[uint32]blockchain.UtxoMap
log log.Logger // Log utility
}
// The message handler for CLIENT/TRANSACTION messages.
@ -54,7 +38,6 @@ func (client *Client) TransactionMessageHandler(msgPayload []byte) {
}
client.handleProofOfLockMessage(proofs)
case client_proto.UTXO_RESPONSE:
fmt.Print("Received utxo resposne")
txDecoder := gob.NewDecoder(bytes.NewReader(msgPayload[1:])) // skip the PROOF_OF_LOCK messge type
fetchUtxoResponse := new(client_proto.FetchUtxoResponseMessage)
err := txDecoder.Decode(fetchUtxoResponse)
@ -120,41 +103,22 @@ func (client *Client) handleProofOfLockMessage(proofs *[]blockchain.CrossShardTx
}
func (client *Client) handleFetchUtxoResponseMessage(utxoResponse client_proto.FetchUtxoResponseMessage) {
_, ok := client.ShardResponseTracker[utxoResponse.ShardId]
_, ok := client.ShardUtxoMap[utxoResponse.ShardId]
if ok {
return
}
client.ShardResponseTracker[utxoResponse.ShardId] = true
// Merge utxo response into client utxo map.
for address, txHash2Vout2AmountMap := range utxoResponse.UtxoMap {
clientTxHashMap, ok := client.UtxoMap[address]
if ok {
for txHash, vout2AmountMap := range txHash2Vout2AmountMap {
clientVout2AmountMap, ok := clientTxHashMap[txHash]
if ok {
for vout, amount := range vout2AmountMap {
clientVout2AmountMap[vout] = amount
}
} else {
clientTxHashMap[txHash] = vout2AmountMap
}
}
} else {
client.UtxoMap[address] = txHash2Vout2AmountMap
}
}
client.ShardUtxoMap[utxoResponse.ShardId] = utxoResponse.UtxoMap
}
func (client *Client) broadcastCrossShardTxUnlockMessage(txsToSend *[]blockchain.Transaction) {
p2p.BroadcastMessage(*client.leaders, node.ConstructUnlockToCommitOrAbortMessage(*txsToSend))
p2p.BroadcastMessage(*client.Leaders, node.ConstructUnlockToCommitOrAbortMessage(*txsToSend))
}
// Create a new Client
func NewClient(leaders *[]p2p.Peer) *Client {
client := Client{}
client.PendingCrossTxs = make(map[[32]byte]*blockchain.Transaction)
client.leaders = leaders
client.Leaders = leaders
// Logger
client.log = log.New()

@ -0,0 +1,99 @@
package config
import (
"bufio"
"log"
"os"
"strconv"
"strings"
"github.com/simple-rules/harmony-benchmark/p2p"
)
type ConfigEntry struct {
IP string
Port string
Role string
ShardID string
}
type Config struct {
config []ConfigEntry
}
func NewConfig() *Config {
config := Config{}
return &config
}
// Gets all the validator peers
func (config *Config) GetValidators() []p2p.Peer {
var peerList []p2p.Peer
for _, entry := range config.config {
if entry.Role != "validator" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
peerList = append(peerList, peer)
}
return peerList
}
// Gets all the leader peers and corresponding shard Ids
func (config *Config) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) {
var peerList []p2p.Peer
var shardIDs []uint32
for _, entry := range config.config {
if entry.Role == "leader" {
peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port})
val, err := strconv.Atoi(entry.ShardID)
if err == nil {
shardIDs = append(shardIDs, uint32(val))
} else {
log.Print("[Generator] Error parsing the shard Id ", entry.ShardID)
}
}
}
return peerList, shardIDs
}
func (config *Config) GetClientPeer() *p2p.Peer {
for _, entry := range config.config {
if entry.Role != "client" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
return &peer
}
return nil
}
// Gets the port of the client node in the config
func (config *Config) GetClientPort() string {
for _, entry := range config.config {
if entry.Role == "client" {
return entry.Port
}
}
return ""
}
// Parse the config file and return a 2d array containing the file data
func (config *Config) ReadConfigFile(filename string) error {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Fatal("Failed to read config file ", filename)
return err
}
fscanner := bufio.NewScanner(file)
result := []ConfigEntry{}
for fscanner.Scan() {
p := strings.Split(fscanner.Text(), " ")
entry := ConfigEntry{p[0], p[1], p[2], p[3]}
result = append(result, entry)
}
config.config = result
return nil
}

@ -10,7 +10,7 @@ import (
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
"github.com/simple-rules/harmony-benchmark/configr"
client_config "github.com/simple-rules/harmony-benchmark/client/config"
"github.com/simple-rules/harmony-benchmark/consensus"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/log"
@ -179,13 +179,8 @@ func generateCrossShardTx(txInfo *TxInfo) {
priKeyInt, ok := client.LookUpIntPriKey(txInfo.address)
if ok {
bytes, err := pki.GetPublicKeyFromScalar(pki.GetPrivateKeyScalarFromInt(priKeyInt)).MarshalBinary()
if err == nil {
copy(tx.PublicKey[:], bytes)
} else {
log.Error("Failed to serialized public key", "error", err)
return
}
tx.PublicKey = pki.GetBytesFromPublicKey(pki.GetPublicKeyFromScalar(pki.GetPrivateKeyScalarFromInt(priKeyInt)))
tx.SetID() // TODO(RJ): figure out the correct way to set Tx ID.
tx.Sign(pki.GetPrivateKeyScalarFromInt(priKeyInt))
} else {
@ -208,13 +203,7 @@ func generateSingleShardTx(txInfo *TxInfo) {
priKeyInt, ok := client.LookUpIntPriKey(txInfo.address)
if ok {
bytes, err := pki.GetPublicKeyFromScalar(pki.GetPrivateKeyScalarFromInt(priKeyInt)).MarshalBinary()
if err == nil {
copy(tx.PublicKey[:], bytes)
} else {
log.Error("Failed to serialized public key", "error", err)
return
}
tx.PublicKey = pki.GetBytesFromPublicKey(pki.GetPublicKeyFromScalar(pki.GetPrivateKeyScalarFromInt(priKeyInt)))
tx.SetID() // TODO(RJ): figure out the correct way to set Tx ID.
tx.Sign(pki.GetPrivateKeyScalarFromInt(priKeyInt))
} else {
@ -255,9 +244,9 @@ func main() {
flag.Parse()
// Read the configs
configr := configr.NewConfigr()
configr.ReadConfigFile(*configFile)
leaders, shardIds := configr.GetLeadersAndShardIds()
config := client_config.NewConfig()
config.ReadConfigFile(*configFile)
leaders, shardIds := config.GetLeadersAndShardIds()
setting.numOfAddress = 10000
// Do cross shard tx if there are more than one shard
@ -270,7 +259,6 @@ func main() {
h := log.MultiHandler(
log.StdoutHandler,
log.Must.FileHandler(logFileName, log.LogfmtFormat()), // Log to file
// log.Must.NetHandler("tcp", ":3000", log.JSONFormat()) // Log to remote
)
log.Root().SetHandler(h)
@ -284,7 +272,7 @@ func main() {
}
// Client/txgenerator server node setup
clientPort := configr.GetClientPort()
clientPort := config.GetClientPort()
consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{})
clientNode := node.New(consensusObj, nil)
@ -361,6 +349,6 @@ func main() {
// Send a stop message to stop the nodes at the end
msg := proto_node.ConstructStopMessage()
peers := append(configr.GetValidators(), leaders...)
peers := append(config.GetValidators(), leaders...)
p2p.BroadcastMessage(peers, msg)
}

@ -3,57 +3,52 @@ package main
import (
"crypto/rand"
"encoding/hex"
"errors"
"flag"
"fmt"
"github.com/dedis/kyber"
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
"github.com/simple-rules/harmony-benchmark/configr"
client_config "github.com/simple-rules/harmony-benchmark/client/config"
"github.com/simple-rules/harmony-benchmark/crypto"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/p2p"
proto_node "github.com/simple-rules/harmony-benchmark/proto/node"
"github.com/simple-rules/harmony-benchmark/utils"
"io"
"io/ioutil"
math_rand "math/rand"
"os"
"strconv"
"time"
)
func main() {
// Account subcommands
accountImportCommand := flag.NewFlagSet("import", flag.ExitOnError)
//accountListCommand := flag.NewFlagSet("list", flag.ExitOnError)
//
//// Transaction subcommands
//transactionNewCommand := flag.NewFlagSet("new", flag.ExitOnError)
//
//// Account subcommand flag pointers
//// Adding a new choice for --metric of 'substring' and a new --substring flag
accountImportPtr := accountImportCommand.String("privateKey", "", "Specify the private key to import")
//accountListPtr := accountNewCommand.Bool("new", false, "N/A")
//
//// Transaction subcommand flag pointers
//transactionNewPtr := transactionNewCommand.String("text", "", "Text to parse. (Required)")
// Transfer subcommands
transferCommand := flag.NewFlagSet("transfer", flag.ExitOnError)
transferSenderPtr := transferCommand.String("sender", "0", "Specify the sender account address or index")
transferReceiverPtr := transferCommand.String("receiver", "", "Specify the receiver account")
transferAmountPtr := transferCommand.Int("amount", 0, "Specify the amount to transfer")
// Verify that a subcommand has been provided
// os.Arg[0] is the main command
// os.Arg[1] will be the subcommand
if len(os.Args) < 2 {
fmt.Println("account or transaction subcommand is required")
fmt.Println("account or transfer subcommand is required")
os.Exit(1)
}
// Switch on the subcommand
// Parse the flags for appropriate FlagSet
// FlagSet.Parse() requires a set of arguments to parse as input
// os.Args[2:] will be all arguments starting after the subcommand at os.Args[1]
switch os.Args[1] {
case "account":
switch os.Args[2] {
case "new":
fmt.Println("Creating new account...")
randomBytes := [32]byte{}
_, err := io.ReadFull(rand.Reader, randomBytes[:])
@ -64,7 +59,7 @@ func main() {
priKey := crypto.Ed25519Curve.Scalar().SetBytes(randomBytes[:])
priKeyBytes, err := priKey.MarshalBinary()
if err != nil {
panic("Failed to generate private key")
panic("Failed to serialize the private key")
}
pubKey := pki.GetPublicKeyFromScalar(priKey)
address := pki.GetAddressFromPublicKey(pubKey)
@ -72,18 +67,15 @@ func main() {
fmt.Printf("New account created:\nAddress: {%x}\n", address)
case "list":
for i, address := range ReadAddresses() {
fmt.Printf("Account %d:\n {%x}\n", i+1, address)
fmt.Printf("Account %d:\n {%x}\n", i+1, address)
}
case "clearAll":
fmt.Println("Deleting existing accounts...")
DeletePrivateKey()
ClearKeystore()
fmt.Println("All existing accounts deleted...")
case "import":
fmt.Println("Importing private key...")
accountImportCommand.Parse(os.Args[3:])
priKey := *accountImportPtr
if accountImportCommand.Parsed() {
fmt.Println(priKey)
} else {
if !accountImportCommand.Parsed() {
fmt.Println("Failed to parse flags")
}
priKeyBytes, err := hex.DecodeString(priKey)
@ -91,31 +83,19 @@ func main() {
panic("Failed to parse the private key into bytes")
}
StorePrivateKey(priKeyBytes)
fmt.Println("Private key imported...")
case "showBalance":
configr := configr.NewConfigr()
configr.ReadConfigFile("local_config_shards.txt")
leaders, _ := configr.GetLeadersAndShardIds()
clientPeer := configr.GetClientPeer()
walletNode := node.New(nil, nil)
walletNode.Client = client.NewClient(&leaders)
go walletNode.StartServer(clientPeer.Port)
fmt.Println("Fetching account balance...")
walletNode.Client.ShardResponseTracker = make(map[uint32]bool)
walletNode.Client.UtxoMap = make(blockchain.UtxoMap)
p2p.BroadcastMessage(leaders, proto_node.ConstructFetchUtxoMessage(*clientPeer, ReadAddresses()))
go func() {
for true {
if len(walletNode.Client.ShardResponseTracker) == len(leaders) {
fmt.Println("All response received")
walletNode.Client.PrintUtxoBalance()
break
}
}
}()
time.Sleep(3 * time.Second) // Wait 3 seconds for the response. Exit afterward.
walletNode := CreateWalletServerNode()
go walletNode.StartServer(walletNode.ClientPeer.Port)
shardUtxoMap, err := FetchUtxos(ReadAddresses(), walletNode)
if err != nil {
fmt.Println(err)
}
PrintUtxoBalance(shardUtxoMap)
case "test":
priKey := pki.GetPrivateKeyScalarFromInt(33)
// Testing code
priKey := pki.GetPrivateKeyScalarFromInt(444)
address := pki.GetAddressFromPrivateKey(priKey)
priKeyBytes, err := priKey.MarshalBinary()
if err != nil {
@ -124,10 +104,109 @@ func main() {
fmt.Printf("Private Key :\n {%x}\n", priKeyBytes)
fmt.Printf("Address :\n {%x}\n", address)
}
case "transaction":
switch os.Args[2] {
case "new":
fmt.Println("Creating new transaction...")
case "transfer":
transferCommand.Parse(os.Args[2:])
if !transferCommand.Parsed() {
fmt.Println("Failed to parse flags")
}
sender := *transferSenderPtr
receiver := *transferReceiverPtr
amount := *transferAmountPtr
if amount <= 0 {
fmt.Println("Please specify positive amount to transfer")
}
priKeys := ReadPrivateKeys()
if len(priKeys) == 0 {
fmt.Println("No imported account to use.")
return
}
senderIndex, err := strconv.Atoi(sender)
senderAddress := ""
addresses := ReadAddresses()
if err != nil {
senderIndex = -1
for i, address := range addresses {
if fmt.Sprintf("%x", address) == senderAddress {
senderIndex = i
break
}
}
if senderIndex == -1 {
fmt.Println("The specified sender account is not imported yet.")
break
}
}
if senderIndex >= len(priKeys) {
fmt.Println("Sender account index out of bounds.")
return
}
receiverAddress, err := hex.DecodeString(receiver)
if err != nil || len(receiverAddress) != 20 {
fmt.Println("The receiver address is not a valid.")
return
}
// Generate transaction
trimmedReceiverAddress := [20]byte{}
copy(trimmedReceiverAddress[:], receiverAddress[:20])
senderPriKey := priKeys[senderIndex]
senderAddressBytes := pki.GetAddressFromPrivateKey(senderPriKey)
// Start client server
walletNode := CreateWalletServerNode()
go walletNode.StartServer(walletNode.ClientPeer.Port)
shardUtxoMap, err := FetchUtxos([][20]byte{senderAddressBytes}, walletNode)
if err != nil {
fmt.Printf("Failed to fetch utxos: %s\n", err)
}
cummulativeBalance := 0
txInputs := []blockchain.TXInput{}
LOOP:
for shardId, utxoMap := range shardUtxoMap {
for txId, vout2AmountMap := range utxoMap[senderAddressBytes] {
txIdBytes, err := utils.Get32BytesFromString(txId)
if err != nil {
fmt.Println("Failed to parse txId")
continue
}
for voutIndex, utxoAmount := range vout2AmountMap {
cummulativeBalance += utxoAmount
txIn := blockchain.NewTXInput(blockchain.NewOutPoint(&txIdBytes, voutIndex), senderAddressBytes, shardId)
txInputs = append(txInputs, *txIn)
if cummulativeBalance >= amount {
break LOOP
}
}
}
}
txout := blockchain.TXOutput{Amount: amount, Address: trimmedReceiverAddress, ShardID: uint32(math_rand.Intn(len(shardUtxoMap)))}
txOutputs := []blockchain.TXOutput{txout}
if cummulativeBalance > amount {
changeTxOut := blockchain.TXOutput{Amount: cummulativeBalance - amount, Address: senderAddressBytes, ShardID: uint32(math_rand.Intn(len(shardUtxoMap)))}
txOutputs = append(txOutputs, changeTxOut)
}
tx := blockchain.Transaction{ID: [32]byte{}, PublicKey: pki.GetBytesFromPublicKey(pki.GetPublicKeyFromScalar(senderPriKey)), TxInput: txInputs, TxOutput: txOutputs, Proofs: nil}
tx.SetID() // TODO(RJ): figure out the correct way to set Tx ID.
tx.Sign(senderPriKey)
pubKey := crypto.Ed25519Curve.Point()
err = pubKey.UnmarshalBinary(tx.PublicKey[:])
if err != nil {
fmt.Println("Failed to deserialize public key", "error", err)
}
err = ExecuteTransaction(tx, walletNode)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("Transaction submitted successfully")
}
default:
flag.PrintDefaults()
@ -135,6 +214,94 @@ func main() {
}
}
func CreateWalletServerNode() *node.Node {
configr := client_config.NewConfig()
configr.ReadConfigFile("local_config_shards.txt")
leaders, _ := configr.GetLeadersAndShardIds()
clientPeer := configr.GetClientPeer()
walletNode := node.New(nil, nil)
walletNode.Client = client.NewClient(&leaders)
walletNode.ClientPeer = clientPeer
return walletNode
}
// Issue the transaction to the Harmony network
func ExecuteTransaction(tx blockchain.Transaction, walletNode *node.Node) error {
if tx.IsCrossShard() {
walletNode.Client.PendingCrossTxsMutex.Lock()
walletNode.Client.PendingCrossTxs[tx.ID] = &tx
walletNode.Client.PendingCrossTxsMutex.Unlock()
}
msg := proto_node.ConstructTransactionListMessage([]*blockchain.Transaction{&tx})
p2p.BroadcastMessage(*walletNode.Client.Leaders, msg)
doneSignal := make(chan int)
go func() {
for true {
if len(walletNode.Client.PendingCrossTxs) == 0 {
doneSignal <- 0
break
}
}
}()
select {
case <-doneSignal:
time.Sleep(100 * time.Millisecond)
return nil
case <-time.After(5 * time.Second):
return errors.New("Cross-shard Transaction processing timed out")
}
}
// Fetch utxos of specified address from the Harmony network
func FetchUtxos(addresses [][20]byte, walletNode *node.Node) (map[uint32]blockchain.UtxoMap, error) {
fmt.Println("Fetching account balance...")
walletNode.Client.ShardUtxoMap = make(map[uint32]blockchain.UtxoMap)
p2p.BroadcastMessage(*walletNode.Client.Leaders, proto_node.ConstructFetchUtxoMessage(*walletNode.ClientPeer, addresses))
doneSignal := make(chan int)
go func() {
for true {
if len(walletNode.Client.ShardUtxoMap) == len(*walletNode.Client.Leaders) {
doneSignal <- 0
break
}
}
}()
select {
case <-doneSignal:
return walletNode.Client.ShardUtxoMap, nil
case <-time.After(3 * time.Second):
return nil, errors.New("Utxo fetch timed out")
}
}
func PrintUtxoBalance(shardUtxoMap map[uint32]blockchain.UtxoMap) {
addressBalance := make(map[[20]byte]int)
for _, utxoMap := range shardUtxoMap {
for address, txHash2Vout2AmountMap := range utxoMap {
for _, vout2AmountMap := range txHash2Vout2AmountMap {
for _, amount := range vout2AmountMap {
value, ok := addressBalance[address]
if ok {
addressBalance[address] = value + amount
} else {
addressBalance[address] = amount
}
}
}
}
}
for address, balance := range addressBalance {
fmt.Printf("Address: {%x}\n", address)
fmt.Printf("Balance: %d\n", balance)
}
}
// Read the addresses stored in local keystore
func ReadAddresses() [][20]byte {
priKeys := ReadPrivateKeys()
addresses := [][20]byte{}
@ -144,6 +311,7 @@ func ReadAddresses() [][20]byte {
return addresses
}
// Store the specified private key in local keystore
func StorePrivateKey(priKey []byte) {
for _, address := range ReadAddresses() {
if address == pki.GetAddressFromPrivateKey(crypto.Ed25519Curve.Scalar().SetBytes(priKey)) {
@ -164,10 +332,12 @@ func StorePrivateKey(priKey []byte) {
f.Close()
}
func DeletePrivateKey() {
// Delete all data in the local keystore
func ClearKeystore() {
ioutil.WriteFile("keystore", []byte{}, 0644)
}
// Read all the private key stored in local keystore
func ReadPrivateKeys() []kyber.Scalar {
keys, err := ioutil.ReadFile("keystore")
if err != nil {

@ -1,159 +0,0 @@
package configr
import (
"bufio"
"log"
"os"
"strconv"
"strings"
"github.com/simple-rules/harmony-benchmark/crypto"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/p2p"
"github.com/simple-rules/harmony-benchmark/utils"
)
type ConfigEntry struct {
IP string
Port string
Role string
ShardID string
}
type Configr struct {
config []ConfigEntry
}
func NewConfigr() *Configr {
configr := Configr{}
return &configr
}
// Gets all the validator peers
func (configr *Configr) GetValidators() []p2p.Peer {
var peerList []p2p.Peer
for _, entry := range configr.config {
if entry.Role != "validator" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
peerList = append(peerList, peer)
}
return peerList
}
// Gets all the leader peers and corresponding shard Ids
func (configr *Configr) GetLeadersAndShardIds() ([]p2p.Peer, []uint32) {
var peerList []p2p.Peer
var shardIDs []uint32
for _, entry := range configr.config {
if entry.Role == "leader" {
peerList = append(peerList, p2p.Peer{Ip: entry.IP, Port: entry.Port})
val, err := strconv.Atoi(entry.ShardID)
if err == nil {
shardIDs = append(shardIDs, uint32(val))
} else {
log.Print("[Generator] Error parsing the shard Id ", entry.ShardID)
}
}
}
return peerList, shardIDs
}
func (configr *Configr) GetClientPeer() *p2p.Peer {
for _, entry := range configr.config {
if entry.Role != "client" {
continue
}
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
return &peer
}
return nil
}
// Gets the port of the client node in the config
func (configr *Configr) GetClientPort() string {
for _, entry := range configr.config {
if entry.Role == "client" {
return entry.Port
}
}
return ""
}
// Parse the config file and return a 2d array containing the file data
func (configr *Configr) ReadConfigFile(filename string) error {
file, err := os.Open(filename)
defer file.Close()
if err != nil {
log.Fatal("Failed to read config file ", filename)
return err
}
fscanner := bufio.NewScanner(file)
result := []ConfigEntry{}
for fscanner.Scan() {
p := strings.Split(fscanner.Text(), " ")
entry := ConfigEntry{p[0], p[1], p[2], p[3]}
result = append(result, entry)
}
configr.config = result
return nil
}
// GetShardID Gets the shard id of the node corresponding to this ip and port
func (configr *Configr) GetShardID(ip, port string) string {
for _, entry := range configr.config {
if entry.IP == ip && entry.Port == port {
return entry.ShardID
}
}
return "N/A"
}
// GetPeers Gets the validator list
func (configr *Configr) GetPeers(ip, port, shardID string) []p2p.Peer {
var peerList []p2p.Peer
for _, entry := range configr.config {
if entry.Role != "validator" || entry.ShardID != shardID {
continue
}
// Get public key deterministically based on ip and port
peer := p2p.Peer{Port: entry.Port, Ip: entry.IP}
setKey(&peer)
peerList = append(peerList, peer)
}
return peerList
}
// GetLeader Gets the leader of this shard id
func (configr *Configr) GetLeader(shardID string) p2p.Peer {
var leaderPeer p2p.Peer
for _, entry := range configr.config {
if entry.Role == "leader" && entry.ShardID == shardID {
leaderPeer.Ip = entry.IP
leaderPeer.Port = entry.Port
setKey(&leaderPeer)
}
}
return leaderPeer
}
func (configr *Configr) GetConfigEntries() []ConfigEntry {
return configr.config
}
func (configr *Configr) GetMyConfigEntry(ip string, port string) *ConfigEntry {
for _, entry := range configr.config {
if entry.IP == ip && entry.Port == port {
return &entry
}
}
return nil
}
func setKey(peer *p2p.Peer) {
// Get public key deterministically based on ip and port
priKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(utils.GetUniqueIdFromPeer(*peer))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)
peer.PubKey = pki.GetPublicKeyFromScalar(priKey)
}

@ -199,5 +199,5 @@ func (consensus *Consensus) String() string {
duty = "VLD" // validator
}
return fmt.Sprintf("[duty:%s, priKey:%s, ShardID:%v, nodeId:%v, state:%s]",
duty, fmt.Sprintf("%x", consensus.priKey), consensus.ShardID, consensus.nodeId, consensus.state)
duty, consensus.priKey.String(), consensus.ShardID, consensus.nodeId, consensus.state)
}

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/binary"
"encoding/gob"
"encoding/hex"
"errors"
"net/http"
"net/url"
@ -366,12 +367,11 @@ func (consensus *Consensus) processResponseMessage(payload []byte, targetState C
}
// Sign the block
// TODO(RJ): populate bitmap
copy(blockHeaderObj.Signature[:], collectiveSig[:])
copy(blockHeaderObj.Bitmap[:], bitmap)
consensus.OnConsensusDone(&blockHeaderObj)
consensus.reportTPS(blockHeaderObj.NumTransactions)
consensus.reportMetrics(blockHeaderObj)
// Send signal to Node so the new block can be added and new round of consensus can be triggered
consensus.ReadySignal <- 1
}
@ -403,30 +403,42 @@ func (consensus *Consensus) verifyResponse(commitments *map[uint16]kyber.Point,
return nil
}
func (consensus *Consensus) reportTPS(numOfTxs int32) {
endTime := time.Now()
timeElapsed := endTime.Sub(startTime)
tps := float64(numOfTxs) / timeElapsed.Seconds()
consensus.Log.Info("TPS Report",
"numOfTXs", numOfTxs,
"startTime", startTime,
"endTime", endTime,
"timeElapsed", timeElapsed,
"TPS", tps,
"consensus", consensus)
reportMetrics(tps)
}
func reportMetrics(tps float64) {
URL := "http://localhost:3000/report"
form := url.Values{
"tps": {strconv.FormatFloat(tps, 'f', 2, 64)},
}
func (consensus *Consensus) reportMetrics(block blockchain.Block) {
if !block.IsStateBlock() { // Skip state block stats
endTime := time.Now()
timeElapsed := endTime.Sub(startTime)
numOfTxs := block.NumTransactions
tps := float64(numOfTxs) / timeElapsed.Seconds()
consensus.Log.Info("TPS Report",
"numOfTXs", numOfTxs,
"startTime", startTime,
"endTime", endTime,
"timeElapsed", timeElapsed,
"TPS", tps,
"consensus", consensus)
// Post metrics
URL := "http://localhost:3000/report"
txHashes := []string{}
for i := 1; i <= 3; i++ {
if len(block.TransactionIds)-i >= 0 {
txHashes = append(txHashes, hex.EncodeToString(block.TransactionIds[len(block.TransactionIds)-i][:]))
}
}
form := url.Values{
"key": {consensus.pubKey.String()},
"tps": {strconv.FormatFloat(tps, 'f', 2, 64)},
"txCount": {strconv.Itoa(int(numOfTxs))},
"nodeCount": {strconv.Itoa(len(consensus.validators) + 1)},
"latestBlockHash": {hex.EncodeToString(consensus.blockHash[:])},
"latestTxHashes": txHashes,
"blockLatency": {strconv.Itoa(int(timeElapsed / time.Millisecond))},
}
body := bytes.NewBufferString(form.Encode())
rsp, err := http.Post(URL, "application/x-www-form-urlencoded", body)
if err != nil {
return
body := bytes.NewBufferString(form.Encode())
rsp, err := http.Post(URL, "application/x-www-form-urlencoded", body)
if err == nil {
defer rsp.Body.Close()
}
}
defer rsp.Body.Close()
}

@ -22,6 +22,10 @@ func GetAddressFromPrivateKey(priKey kyber.Scalar) [20]byte {
return GetAddressFromPublicKey(GetPublicKeyFromScalar(priKey))
}
func GetAddressFromPrivateKeyBytes(priKey [32]byte) [20]byte {
return GetAddressFromPublicKey(GetPublicKeyFromScalar(crypto.Ed25519Curve.Scalar().SetBytes(priKey[:])))
}
// Temporary helper function for benchmark use
func GetAddressFromInt(value int) [20]byte {
return GetAddressFromPublicKey(GetPublicKeyFromScalar(GetPrivateKeyScalarFromInt(value)))
@ -51,3 +55,13 @@ func GetPublicKeyFromPrivateKey(priKey [32]byte) kyber.Point {
func GetPublicKeyFromScalar(priKey kyber.Scalar) kyber.Point {
return crypto.Ed25519Curve.Point().Mul(priKey, nil)
}
// Converts public key point to bytes
func GetBytesFromPublicKey(pubKey kyber.Point) [32]byte {
bytes, err := pubKey.MarshalBinary()
result := [32]byte{}
if err == nil {
copy(result[:], bytes)
}
return result
}

@ -37,7 +37,6 @@ db_supported=$2
# Also it's recommended to use `go build` for testing the whole exe.
go build -o bin/benchmark
go build -o bin/txgen client/txgen/main.go
go build -o bin/profiler profiler/main.go
# Create a tmp folder for logs
t=`date +"%Y%m%d-%H%M%S"`
@ -58,8 +57,8 @@ while IFS='' read -r line || [[ -n "$line" ]]; do
fi
done < $config
txgen_enabled=$3
txgen_disabled=$3
# Generate transactions
if [ -z "$txgen_enabled" ]; then
if [ -z "$txgen_disabled" ]; then
./bin/txgen -config_file $config -log_folder $log_folder
fi

@ -3,8 +3,6 @@
GOOS=linux
GOARCH=amd64
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/benchmark benchmark.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/commander aws-experiment-launch/experiment/commander/main.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/txgen client/txgen/main.go
AWSCLI=aws
@ -13,7 +11,4 @@ if [ "$1" != "" ]; then
fi
$AWSCLI s3 cp bin/benchmark s3://unique-bucket-bin/benchmark --acl public-read-write
$AWSCLI s3 cp bin/soldier s3://unique-bucket-bin/soldier --acl public-read-write
$AWSCLI s3 cp bin/commander s3://unique-bucket-bin/commander --acl public-read-write
$AWSCLI s3 cp bin/txgen s3://unique-bucket-bin/txgen --acl public-read-write
$AWSCLI s3 cp kill_node.sh s3://unique-bucket-bin/kill_node.sh --acl public-read-write

@ -4,7 +4,6 @@ import (
"bytes"
"encoding/gob"
"net"
"os"
"sync"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
@ -70,20 +69,25 @@ func (node *Node) getTransactionsForNewBlock(maxNumTxs int) ([]*blockchain.Trans
// Start a server and process the request by a handler.
func (node *Node) StartServer(port string) {
node.log.Debug("Starting server", "node", node, "port", port)
node.listenOnPort(port)
}
func (node *Node) listenOnPort(port string) {
listen, err := net.Listen("tcp4", ":"+port)
defer listen.Close()
defer func(listen net.Listener) {
if listen != nil {
listen.Close()
}
}(listen)
if err != nil {
node.log.Crit("Socket listen port failed", "port", port, "err", err)
os.Exit(1)
node.log.Error("Socket listen port failed", "port", port, "err", err)
return
}
for {
conn, err := listen.Accept()
if err != nil {
node.log.Crit("Error listening on port. Exiting.", "port", port)
node.log.Error("Error listening on port.", "port", port)
continue
}
go node.NodeHandler(conn)

@ -22,6 +22,8 @@ import (
const (
// The max number of transaction per a block.
MaxNumberOfTransactionsPerBlock = 3000
// The number of blocks allowed before generating state block
NumBlocksBeforeStateBlock = 10
)
// NodeHandler handles a new incoming connection.
@ -90,10 +92,12 @@ func (node *Node) NodeHandler(conn net.Conn) {
decoder := gob.NewDecoder(bytes.NewReader(msgPayload[1:])) // skip the SYNC messge type
blocks := new([]*blockchain.Block)
decoder.Decode(blocks)
if node.Client != nil && blocks != nil {
if node.Client != nil && node.Client.UpdateBlocks != nil && blocks != nil {
node.Client.UpdateBlocks(*blocks)
}
}
case proto_node.BLOCKCHAIN_SYNC:
node.transactionMessageHandler(msgPayload)
case proto_node.CLIENT:
clientMsgType := proto_node.ClientMessageType(msgPayload[0])
switch clientMsgType {
@ -106,7 +110,6 @@ func (node *Node) NodeHandler(conn net.Conn) {
utxoMap := node.UtxoPool.GetUtxoMapByAddresses(fetchUtxoMessage.Addresses)
p2p.SendMessage(fetchUtxoMessage.Sender, client.ConstructFetchUtxoResponseMessage(&utxoMap, node.UtxoPool.ShardID))
log.Info("Utxo Map", "Detail", utxoMap)
}
case proto_node.CONTROL:
controlType := msgPayload[0]
@ -118,25 +121,38 @@ func (node *Node) NodeHandler(conn net.Conn) {
avgBlockSizeInBytes := 0
txCount := 0
blockCount := 0
totalTxCount := 0
totalBlockCount := 0
avgTxSize := 0
for _, block := range node.blockchain.Blocks {
byteBuffer := bytes.NewBuffer([]byte{})
encoder := gob.NewEncoder(byteBuffer)
encoder.Encode(block)
avgBlockSizeInBytes += len(byteBuffer.Bytes())
txCount += len(block.Transactions)
byteBuffer = bytes.NewBuffer([]byte{})
encoder = gob.NewEncoder(byteBuffer)
encoder.Encode(block.Transactions)
avgTxSize += len(byteBuffer.Bytes())
if block.IsStateBlock() {
totalTxCount += int(block.State.NumTransactions)
totalBlockCount += int(block.State.NumBlocks)
} else {
byteBuffer := bytes.NewBuffer([]byte{})
encoder := gob.NewEncoder(byteBuffer)
encoder.Encode(block)
avgBlockSizeInBytes += len(byteBuffer.Bytes())
txCount += len(block.Transactions)
blockCount += 1
totalTxCount += len(block.TransactionIds)
totalBlockCount += 1
byteBuffer = bytes.NewBuffer([]byte{})
encoder = gob.NewEncoder(byteBuffer)
encoder.Encode(block.Transactions)
avgTxSize += len(byteBuffer.Bytes())
}
}
if blockCount != 0 {
avgBlockSizeInBytes = avgBlockSizeInBytes / blockCount
avgTxSize = avgTxSize / txCount
}
avgBlockSizeInBytes = avgBlockSizeInBytes / len(node.blockchain.Blocks)
avgTxSize = avgTxSize / txCount
node.log.Debug("Blockchain Report", "numBlocks", len(node.blockchain.Blocks), "avgBlockSize", avgBlockSizeInBytes, "numTxs", txCount, "avgTxSzie", avgTxSize)
node.log.Debug("Blockchain Report", "totalNumBlocks", totalBlockCount, "avgBlockSizeInCurrentEpoch", avgBlockSizeInBytes, "totalNumTxs", totalTxCount, "avgTxSzieInCurrentEpoch", avgTxSize)
os.Exit(0)
}
@ -158,11 +174,10 @@ func (node *Node) transactionMessageHandler(msgPayload []byte) {
switch txMessageType {
case proto_node.SEND:
txDecoder := gob.NewDecoder(bytes.NewReader(msgPayload[1:])) // skip the SEND messge type
txList := new([]*blockchain.Transaction)
err := txDecoder.Decode(&txList)
err := txDecoder.Decode(txList)
if err != nil {
node.log.Error("Failed deserializing transaction list", "node", node)
node.log.Error("Failed to deserialize transaction list", "error", err)
}
node.addPendingTransactions(*txList)
case proto_node.REQUEST:
@ -221,26 +236,32 @@ func (node *Node) WaitForConsensusReady(readySignal chan int) {
//node.log.Debug("Adding new block", "currentChainSize", len(node.blockchain.Blocks), "numTxs", len(node.blockchain.GetLatestBlock().Transactions), "PrevHash", node.blockchain.GetLatestBlock().PrevBlockHash, "Hash", node.blockchain.GetLatestBlock().Hash)
if !retry {
for {
// Once we have more than 100 transactions pending we will try creating a new block
if len(node.pendingTransactions) >= 100 {
selectedTxs, crossShardTxAndProofs := node.getTransactionsForNewBlock(MaxNumberOfTransactionsPerBlock)
if len(selectedTxs) == 0 {
node.log.Debug("No valid transactions exist", "pendingTx", len(node.pendingTransactions))
} else {
node.log.Debug("Creating new block", "numTxs", len(selectedTxs), "pendingTxs", len(node.pendingTransactions), "currentChainSize", len(node.blockchain.Blocks))
node.transactionInConsensus = selectedTxs
node.log.Debug("CROSS SHARD TX", "num", len(crossShardTxAndProofs))
node.CrossTxsInConsensus = crossShardTxAndProofs
newBlock = blockchain.NewBlock(selectedTxs, node.blockchain.GetLatestBlock().Hash, node.Consensus.ShardID)
break
if len(node.blockchain.Blocks) > NumBlocksBeforeStateBlock {
// Generate state block and run consensus on it
newBlock = node.blockchain.CreateStateBlock(node.UtxoPool)
} else {
// Normal tx block consensus
for {
// Once we have pending transactions we will try creating a new block
if len(node.pendingTransactions) >= 1 {
selectedTxs, crossShardTxAndProofs := node.getTransactionsForNewBlock(MaxNumberOfTransactionsPerBlock)
if len(selectedTxs) == 0 {
node.log.Debug("No valid transactions exist", "pendingTx", len(node.pendingTransactions))
} else {
node.log.Debug("Creating new block", "numTxs", len(selectedTxs), "pendingTxs", len(node.pendingTransactions), "currentChainSize", len(node.blockchain.Blocks))
node.transactionInConsensus = selectedTxs
node.log.Debug("CROSS SHARD TX", "num", len(crossShardTxAndProofs))
node.CrossTxsInConsensus = crossShardTxAndProofs
newBlock = blockchain.NewBlock(selectedTxs, node.blockchain.GetLatestBlock().Hash, node.Consensus.ShardID)
break
}
}
// If not enough transactions to run Consensus,
// periodically check whether we have enough transactions to package into block.
time.Sleep(1 * time.Second)
}
// If not enough transactions to run Consensus,
// periodically check whether we have enough transactions to package into block.
time.Sleep(1 * time.Second)
}
}
@ -278,29 +299,47 @@ func (node *Node) BroadcastNewBlock(newBlock *blockchain.Block) {
// This is called by consensus participants to verify the block they are running consensus on
func (node *Node) VerifyNewBlock(newBlock *blockchain.Block) bool {
return node.UtxoPool.VerifyTransactions(newBlock.Transactions)
if newBlock.IsStateBlock() {
return node.UtxoPool.VerifyStateBlock(newBlock)
} else {
return node.UtxoPool.VerifyTransactions(newBlock.Transactions)
}
}
// This is called by consensus participants, after consensus is done, to:
// 1. add the new block to blockchain
// 2. [leader] move cross shard tx and proof to the list where they wait to be sent to the client
func (node *Node) PostConsensusProcessing(newBlock *blockchain.Block) {
node.AddNewBlock(newBlock)
if node.Consensus.IsLeader {
// Move crossTx-in-consensus into the list to be returned to client
for _, crossTxAndProof := range node.CrossTxsInConsensus {
crossTxAndProof.Proof.BlockHash = newBlock.Hash
// TODO: fill in the signature proofs
}
if len(node.CrossTxsInConsensus) != 0 {
node.addCrossTxsToReturn(node.CrossTxsInConsensus)
node.CrossTxsInConsensus = []*blockchain.CrossShardTxAndProof{}
if newBlock.IsStateBlock() {
// Clear out old tx blocks and put state block as genesis
if node.db != nil {
node.log.Info("Deleting old blocks.")
for i := 1; i <= len(node.blockchain.Blocks); i++ {
blockchain.Delete(node.db, strconv.Itoa(i))
}
}
node.blockchain.Blocks = []*blockchain.Block{}
node.AddNewBlock(newBlock)
} else {
node.AddNewBlock(newBlock)
node.UpdateUtxoAndState(newBlock)
if node.Consensus.IsLeader {
// Move crossTx-in-consensus into the list to be returned to client
for _, crossTxAndProof := range node.CrossTxsInConsensus {
crossTxAndProof.Proof.BlockHash = newBlock.Hash
// TODO: fill in the signature proofs
}
if len(node.CrossTxsInConsensus) != 0 {
node.addCrossTxsToReturn(node.CrossTxsInConsensus)
node.CrossTxsInConsensus = []*blockchain.CrossShardTxAndProof{}
}
node.SendBackProofOfAcceptOrReject()
node.BroadcastNewBlock(newBlock)
node.SendBackProofOfAcceptOrReject()
node.BroadcastNewBlock(newBlock)
}
}
}
func (node *Node) AddNewBlock(newBlock *blockchain.Block) {
@ -311,6 +350,9 @@ func (node *Node) AddNewBlock(newBlock *blockchain.Block) {
node.log.Info("Writing new block into disk.")
newBlock.Write(node.db, strconv.Itoa(len(node.blockchain.Blocks)))
}
}
func (node *Node) UpdateUtxoAndState(newBlock *blockchain.Block) {
// Update UTXO pool
node.UtxoPool.Update(newBlock.Transactions)
// Clear transaction-in-Consensus list

@ -94,3 +94,23 @@ ILOOP:
}
return contentBuf.Bytes(), nil
}
func CreateMessage(msgType byte, data []byte) []byte {
buffer := bytes.NewBuffer([]byte{})
buffer.WriteByte(msgType)
fourBytes := make([]byte, 4)
binary.BigEndian.PutUint32(fourBytes, uint32(len(data)))
buffer.Write(fourBytes)
buffer.Write(data)
return buffer.Bytes()
}
func SendMessageContent(conn net.Conn, data []byte) {
msgToSend := CreateMessage(byte(1), data)
w := bufio.NewWriter(conn)
w.Write(msgToSend)
w.Flush()
}

@ -0,0 +1,53 @@
package p2p
import (
"bufio"
"net"
"testing"
)
func setUpTestServer(times int, t *testing.T, conCreated chan bool) {
t.Parallel()
ln, _ := net.Listen("tcp", ":8081")
conCreated <- true
conn, _ := ln.Accept()
defer conn.Close()
var (
w = bufio.NewWriter(conn)
)
for times > 0 {
times--
data, err := ReadMessageContent(conn)
if err != nil {
t.Fatalf("error when ReadMessageContent %v", err)
}
data = CreateMessage(byte(1), data)
w.Write(data)
w.Flush()
}
}
func TestNewNewNode(t *testing.T) {
times := 100
conCreated := make(chan bool)
go setUpTestServer(times, t, conCreated)
<-conCreated
conn, _ := net.Dial("tcp", "127.0.0.1:8081")
for times > 0 {
times--
myMsg := "minhdoan"
SendMessageContent(conn, []byte(myMsg))
data, err := ReadMessageContent(conn)
if err != nil {
t.Error("got an error when trying to receive an expected message from server.")
}
if string(data) != myMsg {
t.Error("did not receive expected message")
}
}
conn.Close()
}

@ -1,55 +0,0 @@
package main
import (
"flag"
"fmt"
"time"
"github.com/shirou/gopsutil/process"
"github.com/simple-rules/harmony-benchmark/log"
)
type profilerSetting struct {
pid int32
shardID string
}
var (
setting profilerSetting
)
func logPerf() {
p, _ := process.NewProcess(setting.pid)
for {
// log mem usage
info, _ := p.MemoryInfo()
memMap, _ := p.MemoryMaps(false)
log.Info("Mem Report", "info", info, "map", memMap, "shardID", setting.shardID)
// log cpu usage
percent, _ := p.CPUPercent()
times, _ := p.Times()
log.Info("CPU Report", "percent", percent, "times", times, "shardID", setting.shardID)
time.Sleep(3 * time.Second)
}
}
func main() {
pid := flag.Int("pid", 0, "process id of the node")
shardID := flag.String("shard_id", "0", "the shard id of this node")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
flag.Parse()
setting.pid = int32(*pid)
setting.shardID = *shardID
logFileName := fmt.Sprintf("./%v/profiler-%v.log", *logFolder, *shardID)
h := log.MultiHandler(
log.StdoutHandler,
log.Must.FileHandler(logFileName, log.JSONFormat()), // Log to file
// log.Must.NetHandler("tcp", ":3000", log.JSONFormat()) // Log to remote
)
log.Root().SetHandler(h)
logPerf()
}

@ -0,0 +1,48 @@
package profiler
import (
"time"
"github.com/shirou/gopsutil/process"
"github.com/simple-rules/harmony-benchmark/log"
)
type Profiler struct {
logger log.Logger
PID int32
ShardID string
proc *process.Process
}
func NewProfiler(logger log.Logger, pid int, shardID string) *Profiler {
profiler := Profiler{logger, int32(pid), shardID, nil}
return &profiler
}
func (profiler *Profiler) LogMemory() {
for {
// log mem usage
info, _ := profiler.proc.MemoryInfo()
memMap, _ := profiler.proc.MemoryMaps(false)
profiler.logger.Info("Mem Report", "info", info, "map", memMap, "shardID", profiler.ShardID)
time.Sleep(3 * time.Second)
}
}
func (profiler *Profiler) LogCPU() {
for {
// log cpu usage
percent, _ := profiler.proc.CPUPercent()
times, _ := profiler.proc.Times()
profiler.logger.Info("CPU Report", "percent", percent, "times", times, "shardID", profiler.ShardID)
time.Sleep(3 * time.Second)
}
}
func (profiler *Profiler) Start() {
profiler.proc, _ = process.NewProcess(profiler.PID)
go profiler.LogCPU()
go profiler.LogMemory()
}

@ -17,6 +17,7 @@ const (
BLOCK
CLIENT
CONTROL
BLOCKCHAIN_SYNC
// TODO: add more types
)
@ -89,7 +90,10 @@ func ConstructTransactionListMessage(transactions []*blockchain.Transaction) []b
for i := range txs {
txs[i] = *transactions[i]
}
encoder.Encode(txs)
err := encoder.Encode(txs)
if err != nil {
return []byte{} // TODO(RJ): better handle of the error
}
return byteBuffer.Bytes()
}

@ -3,9 +3,6 @@
go build -o bin/benchmark
go build -o bin/txgen client/txgen/main.go
go build -o bin/commander aws-experiment-launch/experiment/commander/main.go
go build -o bin/soldier aws-experiment-launch/experiment/soldier/main.go
go build -o bin/profiler profiler/main.go
cd bin
# Create a tmp folder for logs
@ -22,4 +19,4 @@ while IFS='' read -r line || [[ -n "$line" ]]; do
./soldier -ip $ip -port $port&
done < $config
./commander
./commander

@ -110,3 +110,14 @@ func LeftPadBytes(slice []byte, l int) []byte {
return padded
}
// Parse the string representation of hex into 32 byte array
func Get32BytesFromString(hashString string) ([32]byte, error) {
bytes, err := hex.DecodeString(hashString)
if err != nil {
return [32]byte{}, err
}
bytesArray := [32]byte{}
copy(bytesArray[:], bytes)
return bytesArray, nil
}

Loading…
Cancel
Save