Merge pull request #176 from harmony-one/open_source_cleanup
remove static config file support codepull/180/head
commit
83521223e4
@ -1,29 +0,0 @@ |
||||
# Kill nodes if any |
||||
./kill_node.sh |
||||
|
||||
# Since `go run` will generate a temporary exe every time, |
||||
# On windows, your system will pop up a network security dialog for each instance |
||||
# and you won't be able to turn it off. With `go build` generating one |
||||
# exe, the dialog will only pop up once at the very first time. |
||||
# Also it's recommended to use `go build` for testing the whole exe. |
||||
go build -o bin/benchmark |
||||
go build -o bin/btctxgen client/btctxgen/main.go |
||||
|
||||
# Create a tmp folder for logs |
||||
t=`date +"%Y%m%d-%H%M%S"` |
||||
log_folder="tmp_log/log-$t" |
||||
|
||||
mkdir -p $log_folder |
||||
|
||||
# Start nodes |
||||
config=$1 |
||||
while IFS='' read -r line || [[ -n "$line" ]]; do |
||||
IFS=' ' read ip port mode shardID <<< $line |
||||
#echo $ip $port $mode |
||||
if [ "$mode" != "client" ]; then |
||||
./bin/benchmark -ip $ip -port $port -config_file $config -log_folder $log_folder& |
||||
fi |
||||
done < $config |
||||
|
||||
# Generate transactions |
||||
./bin/btctxgen -config_file $config -log_folder $log_folder |
@ -1,22 +0,0 @@ |
||||
#!/bin/bash -x |
||||
##The commented suffix is for linux |
||||
##Reference: https://github.com/Zilliqa/Zilliqa/blob/master/tests/Node/test_node_simple.sh |
||||
sudo sysctl net.core.somaxconn=1024 |
||||
sudo sysctl net.core.netdev_max_backlog=65536; |
||||
sudo sysctl net.ipv4.tcp_tw_reuse=1; |
||||
sudo sysctl -w net.ipv4.tcp_rmem='65536 873800 1534217728'; |
||||
sudo sysctl -w net.ipv4.tcp_wmem='65536 873800 1534217728'; |
||||
sudo sysctl -w net.ipv4.tcp_mem='65536 873800 1534217728'; |
||||
|
||||
./kill_node.sh |
||||
source ~/.bash_profile |
||||
echo "Inside deploy linux" |
||||
echo $GOPATH |
||||
echo "Inside deploy linux line 2" |
||||
|
||||
config=$1 |
||||
while read ip port mode; do |
||||
#echo $ip $port $mode $config |
||||
go run ./benchmark.go -ip $ip -port $port -config_file $config& |
||||
done < $config |
||||
go run ./client/txgen/main.go -config_file $config |
@ -1,70 +0,0 @@ |
||||
package discovery |
||||
|
||||
import ( |
||||
"fmt" |
||||
|
||||
"github.com/dedis/kyber" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
) |
||||
|
||||
// ConfigEntry is the config entry.
|
||||
type ConfigEntry struct { |
||||
IP string |
||||
Port string |
||||
Role string |
||||
ShardID string |
||||
ValidatorID int // Validator ID in its shard.
|
||||
leader p2p.Peer |
||||
self p2p.Peer |
||||
peers []p2p.Peer |
||||
priK kyber.Scalar |
||||
pubK kyber.Point |
||||
} |
||||
|
||||
func (config ConfigEntry) String() string { |
||||
return fmt.Sprintf("bc: %v:%v", config.IP, config.Port) |
||||
} |
||||
|
||||
// New return new ConfigEntry.
|
||||
// TODO: This should be change because this package is discovery and New here implies New Discovery.
|
||||
func New(priK kyber.Scalar, pubK kyber.Point) *ConfigEntry { |
||||
var config ConfigEntry |
||||
config.priK = priK |
||||
config.pubK = pubK |
||||
|
||||
config.peers = make([]p2p.Peer, 0) |
||||
|
||||
return &config |
||||
} |
||||
|
||||
// StartClientMode starts client mode.
|
||||
func (config *ConfigEntry) StartClientMode(bcIP, bcPort string) error { |
||||
config.IP = "myip" |
||||
config.Port = "myport" |
||||
|
||||
fmt.Printf("bc ip/port: %v/%v\n", bcIP, bcPort) |
||||
|
||||
// ...
|
||||
// TODO: connect to bc, and wait unless acknowledge
|
||||
return nil |
||||
} |
||||
|
||||
// GetShardID ...
|
||||
func (config *ConfigEntry) GetShardID() string { |
||||
return config.ShardID |
||||
} |
||||
|
||||
// GetPeers ...
|
||||
func (config *ConfigEntry) GetPeers() []p2p.Peer { |
||||
return config.peers |
||||
} |
||||
|
||||
// GetLeader ...
|
||||
func (config *ConfigEntry) GetLeader() p2p.Peer { |
||||
return config.leader |
||||
} |
||||
|
||||
// GetSelfPeer ...
|
||||
func (config *ConfigEntry) GetSelfPeer() p2p.Peer { |
||||
return config.self |
||||
} |
@ -1,3 +0,0 @@ |
||||
go get -u golang.org/x/lint/golint |
||||
golint ./... |
||||
|
@ -1,10 +0,0 @@ |
||||
cd ~/projects/src/harmony-benchmark/ |
||||
# Compile |
||||
sudo go build -o bin/commander aws-experiment-launch/experiment/commander/main.go |
||||
cd bin |
||||
sudo cp /tmp/distribution_config.txt . |
||||
sudo cp /tmp/commander_info.txt . |
||||
# Take ip address |
||||
IP=`head -n 1 commander_info.txt` |
||||
# Run commander |
||||
sudo ./commander -ip $IP -port 9000 -config_file distribution_config.txt |
@ -1,22 +0,0 @@ |
||||
# Kill nodes if any |
||||
./kill_node.sh |
||||
|
||||
go build -o bin/benchmark |
||||
go build -o bin/txgen client/txgen/main.go |
||||
cd bin |
||||
|
||||
# Create a tmp folder for logs |
||||
t=`date +"%Y%m%d-%H%M%S"` |
||||
log_folder="tmp_log/log-$t" |
||||
|
||||
mkdir -p $log_folder |
||||
|
||||
# For each of the nodes, start soldier |
||||
config=distribution_config.txt |
||||
while IFS='' read -r line || [[ -n "$line" ]]; do |
||||
IFS=' ' read ip port mode shardID <<< $line |
||||
#echo $ip $port $mode |
||||
./soldier -ip $ip -port $port& |
||||
done < $config |
||||
|
||||
./commander |
@ -1,18 +0,0 @@ |
||||
# System kernel setup (Mac) |
||||
# Reference: http://www.macfreek.nl/memory/Kernel_Configuration |
||||
sudo sysctl -w kern.ipc.somaxconn=10240 # Limit of number of new connections |
||||
#sudo sysctl -w kern.ipc.maxsockets=10240 # Initial number of sockets in memory |
||||
sudo sysctl -w net.inet.tcp.msl=1000 # TIME_WAIT |
||||
#sudo sysctl -w net.inet.tcp.rfc1323=1 # Enable TCP window scaling |
||||
sudo sysctl -w kern.ipc.maxsockbuf=4194304 # Maximum TCP Window size |
||||
sudo sysctl -w net.inet.tcp.sendspace=131072 # Default send buffer |
||||
sudo sysctl -w net.inet.tcp.recvspace=358400 # Default receive buffer |
||||
|
||||
# The commented suffix is for linux |
||||
# Reference: https://github.com/Zilliqa/Zilliqa/blob/master/tests/Node/test_node_simple.sh |
||||
#sudo sysctl net.core.somaxconn=1024 |
||||
#sudo sysctl net.core.netdev_max_backlog=65536; |
||||
#sudo sysctl net.ipv4.tcp_tw_reuse=1; |
||||
#sudo sysctl -w net.ipv4.tcp_rmem='65536 873800 1534217728'; |
||||
#sudo sysctl -w net.ipv4.tcp_wmem='65536 873800 1534217728'; |
||||
#sudo sysctl -w net.ipv4.tcp_mem='65536 873800 1534217728'; |
@ -1,173 +0,0 @@ |
||||
package utils |
||||
|
||||
import ( |
||||
"bufio" |
||||
"log" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/harmony-one/harmony/crypto" |
||||
"github.com/harmony-one/harmony/crypto/pki" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
) |
||||
|
||||
// ConfigEntry is the config entry.
|
||||
type ConfigEntry struct { |
||||
IP string |
||||
Port string |
||||
Role string |
||||
ShardID string |
||||
ValidatorID int // Validator ID in its shard.
|
||||
} |
||||
|
||||
// DistributionConfig is the distribution config.
|
||||
type DistributionConfig struct { |
||||
config []ConfigEntry |
||||
} |
||||
|
||||
// NewDistributionConfig creates new DistributionConfig
|
||||
func NewDistributionConfig() *DistributionConfig { |
||||
config := DistributionConfig{} |
||||
return &config |
||||
} |
||||
|
||||
// GetLeadersAndShardIDs gets all the leader peers and corresponding shard Ids
|
||||
func (config *DistributionConfig) GetLeadersAndShardIDs() ([]p2p.Peer, []uint32) { |
||||
var peerList []p2p.Peer |
||||
var shardIDs []uint32 |
||||
for _, entry := range config.config { |
||||
if entry.Role == "leader" { |
||||
peerList = append(peerList, p2p.Peer{IP: entry.IP, Port: entry.Port}) |
||||
val, err := strconv.Atoi(entry.ShardID) |
||||
if err == nil { |
||||
shardIDs = append(shardIDs, uint32(val)) |
||||
} else { |
||||
log.Print("[Generator] Error parsing the shard Id ", entry.ShardID) |
||||
} |
||||
} |
||||
} |
||||
return peerList, shardIDs |
||||
} |
||||
|
||||
// GetClientPeer returns client peer.
|
||||
func (config *DistributionConfig) GetClientPeer() *p2p.Peer { |
||||
for _, entry := range config.config { |
||||
if entry.Role != "client" { |
||||
continue |
||||
} |
||||
peer := p2p.Peer{Port: entry.Port, IP: entry.IP} |
||||
return &peer |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// GetClientPort gets the port of the client node in the config
|
||||
func (config *DistributionConfig) GetClientPort() string { |
||||
for _, entry := range config.config { |
||||
if entry.Role == "client" { |
||||
return entry.Port |
||||
} |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// ReadConfigFile parses the config file and return a 2d array containing the file data
|
||||
func (config *DistributionConfig) ReadConfigFile(filename string) error { |
||||
file, err := os.Open(filename) |
||||
if err != nil { |
||||
log.Fatal("Failed to read config file ", filename) |
||||
return err |
||||
} |
||||
defer file.Close() |
||||
fscanner := bufio.NewScanner(file) |
||||
|
||||
result := []ConfigEntry{} |
||||
validatorMap := map[int]int{} |
||||
for fscanner.Scan() { |
||||
p := strings.Split(fscanner.Text(), " ") |
||||
shardID, _ := strconv.Atoi(p[3]) |
||||
validatorID := -1 |
||||
if p[2] == "validator" { |
||||
validatorMap[shardID]++ |
||||
validatorID = validatorMap[shardID] |
||||
} |
||||
entry := ConfigEntry{p[0], p[1], p[2], p[3], validatorID} |
||||
result = append(result, entry) |
||||
} |
||||
config.config = result |
||||
return nil |
||||
} |
||||
|
||||
// GetShardID Gets the shard id of the node corresponding to this ip and port
|
||||
func (config *DistributionConfig) GetShardID(ip, port string) string { |
||||
for _, entry := range config.config { |
||||
if entry.IP == ip && entry.Port == port { |
||||
return entry.ShardID |
||||
} |
||||
} |
||||
return "N/A" |
||||
} |
||||
|
||||
// GetPeers Gets the validator list
|
||||
func (config *DistributionConfig) GetPeers(ip, port, shardID string) []p2p.Peer { |
||||
var peerList []p2p.Peer |
||||
for _, entry := range config.config { |
||||
if entry.Role != "validator" || entry.ShardID != shardID { |
||||
continue |
||||
} |
||||
// Get public key deterministically based on ip and port
|
||||
peer := p2p.Peer{Port: entry.Port, IP: entry.IP, ValidatorID: entry.ValidatorID} |
||||
setKey(&peer) |
||||
peerList = append(peerList, peer) |
||||
} |
||||
return peerList |
||||
} |
||||
|
||||
// GetSelfPeer Gets the validator list
|
||||
func (config *DistributionConfig) GetSelfPeer(ip, port, shardID string) p2p.Peer { |
||||
for _, entry := range config.config { |
||||
if entry.IP == ip && entry.Port == port && entry.ShardID == shardID { |
||||
peer := p2p.Peer{Port: entry.Port, IP: entry.IP, ValidatorID: entry.ValidatorID} |
||||
return peer |
||||
} |
||||
} |
||||
return p2p.Peer{} |
||||
} |
||||
|
||||
// GetLeader Gets the leader of this shard id
|
||||
func (config *DistributionConfig) GetLeader(shardID string) p2p.Peer { |
||||
var leaderPeer p2p.Peer |
||||
for _, entry := range config.config { |
||||
if entry.Role == "leader" && entry.ShardID == shardID { |
||||
leaderPeer.IP = entry.IP |
||||
leaderPeer.Port = entry.Port |
||||
setKey(&leaderPeer) |
||||
} |
||||
} |
||||
return leaderPeer |
||||
} |
||||
|
||||
// GetConfigEntries returns a list of ConfigEntry.
|
||||
func (config *DistributionConfig) GetConfigEntries() []ConfigEntry { |
||||
return config.config |
||||
} |
||||
|
||||
// GetMyConfigEntry ...
|
||||
func (config *DistributionConfig) GetMyConfigEntry(ip string, port string) *ConfigEntry { |
||||
if config.config == nil { |
||||
return nil |
||||
} |
||||
for _, entry := range config.config { |
||||
if entry.IP == ip && entry.Port == port { |
||||
return &entry |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func setKey(peer *p2p.Peer) { |
||||
// Get public key deterministically based on ip and port
|
||||
priKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(GetUniqueIDFromPeer(*peer))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)
|
||||
peer.PubKey = pki.GetPublicKeyFromScalar(priKey) |
||||
} |
Loading…
Reference in new issue