Merge branch 'master' into sync

pull/69/head
Minh Doan 6 years ago
commit cc4b491850
  1. 57
      client/txgen/main.go
  2. 11
      client/utils.go
  3. 99
      local_config_8shards.txt

@ -89,14 +89,13 @@ func generateSimulatedTransactions(subsetId, numSubset int, shardId int, dataNod
] ]
*/ */
utxoPoolMutex.Lock()
txInfo := TxInfo{} txInfo := TxInfo{}
txInfo.shardID = shardId txInfo.shardID = shardId
txInfo.dataNodes = dataNodes txInfo.dataNodes = dataNodes
txInfo.txCount = 0 txInfo.txCount = 0
UTXOLOOP: UTXOLOOP:
// Loop over all addresses // Loop over all addresses
for address, txMap := range dataNodes[shardId].UtxoPool.UtxoMap { for address, txMap := range dataNodes[shardId].UtxoPool.UtxoMap {
if int(binary.BigEndian.Uint32(address[:]))%numSubset == subsetId%numSubset { // Work on one subset of utxo at a time if int(binary.BigEndian.Uint32(address[:]))%numSubset == subsetId%numSubset { // Work on one subset of utxo at a time
txInfo.address = address txInfo.address = address
@ -129,7 +128,6 @@ UTXOLOOP:
} }
} }
log.Info("UTXO CLIENT", "numUtxo", dataNodes[shardId].UtxoPool.CountNumOfUtxos(), "shardId", shardId) log.Info("UTXO CLIENT", "numUtxo", dataNodes[shardId].UtxoPool.CountNumOfUtxos(), "shardId", shardId)
utxoPoolMutex.Unlock()
log.Debug("[Generator] generated transations", "single-shard", len(txInfo.txs), "cross-shard", len(txInfo.crossTxs)) log.Debug("[Generator] generated transations", "single-shard", len(txInfo.txs), "cross-shard", len(txInfo.crossTxs))
return txInfo.txs, txInfo.crossTxs return txInfo.txs, txInfo.crossTxs
} }
@ -319,7 +317,9 @@ func main() {
start := time.Now() start := time.Now()
totalTime := float64(*duration) totalTime := float64(*duration)
client.InitLookUpIntPriKeyMap()
subsetCounter := 0 subsetCounter := 0
for true { for true {
t := time.Now() t := time.Now()
if totalTime > 0 && t.Sub(start).Seconds() >= totalTime { if totalTime > 0 && t.Sub(start).Seconds() >= totalTime {
@ -327,33 +327,50 @@ func main() {
break break
} }
shardIdTxsMap := make(map[uint32][]*blockchain.Transaction) shardIdTxsMap := make(map[uint32][]*blockchain.Transaction)
for shardId, _ := range shardIdLeaderMap { // Generate simulated transactions lock := sync.Mutex{}
txs, crossTxs := generateSimulatedTransactions(subsetCounter, *numSubset, int(shardId), nodes) var wg sync.WaitGroup
wg.Add(len(shardIdLeaderMap))
// Put cross shard tx into a pending list waiting for proofs from leaders utxoPoolMutex.Lock()
if clientPort != "" { log.Warn("STARTING TX GEN")
clientNode.Client.PendingCrossTxsMutex.Lock() for shardId, _ := range shardIdLeaderMap { // Generate simulated transactions
for _, tx := range crossTxs { go func() {
clientNode.Client.PendingCrossTxs[tx.ID] = tx txs, crossTxs := generateSimulatedTransactions(subsetCounter, *numSubset, int(shardId), nodes)
// Put cross shard tx into a pending list waiting for proofs from leaders
if clientPort != "" {
clientNode.Client.PendingCrossTxsMutex.Lock()
for _, tx := range crossTxs {
clientNode.Client.PendingCrossTxs[tx.ID] = tx
}
clientNode.Client.PendingCrossTxsMutex.Unlock()
} }
clientNode.Client.PendingCrossTxsMutex.Unlock()
}
// Put txs into corresponding shards lock.Lock()
shardIdTxsMap[shardId] = append(shardIdTxsMap[shardId], txs...) // Put txs into corresponding shards
for _, crossTx := range crossTxs { shardIdTxsMap[shardId] = append(shardIdTxsMap[shardId], txs...)
for curShardId, _ := range client.GetInputShardIdsOfCrossShardTx(crossTx) { for _, crossTx := range crossTxs {
shardIdTxsMap[curShardId] = append(shardIdTxsMap[curShardId], crossTx) for curShardId, _ := range client.GetInputShardIdsOfCrossShardTx(crossTx) {
shardIdTxsMap[curShardId] = append(shardIdTxsMap[curShardId], crossTx)
}
} }
} lock.Unlock()
wg.Done()
}()
} }
utxoPoolMutex.Unlock()
wg.Wait()
lock.Lock()
for shardId, txs := range shardIdTxsMap { // Send the txs to corresponding shards for shardId, txs := range shardIdTxsMap { // Send the txs to corresponding shards
SendTxsToLeader(shardIdLeaderMap[shardId], txs) go func() {
SendTxsToLeader(shardIdLeaderMap[shardId], txs)
}()
} }
lock.Unlock()
subsetCounter++ subsetCounter++
time.Sleep(2000 * time.Millisecond) time.Sleep(5000 * time.Millisecond)
} }
// Send a stop message to stop the nodes at the end // Send a stop message to stop the nodes at the end

@ -2,22 +2,29 @@ package client
import ( import (
"bytes" "bytes"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"io" "io"
"log" "log"
"net/http" "net/http"
"sync"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
) )
var AddressToIntPriKeyMap map[[20]byte]int // For convenience, we use int as the secret seed for generating private key var AddressToIntPriKeyMap map[[20]byte]int // For convenience, we use int as the secret seed for generating private key
var AddressToIntPriKeyMapLock sync.Mutex
func LookUpIntPriKey(address [20]byte) (int, bool) { func InitLookUpIntPriKeyMap() {
if AddressToIntPriKeyMap == nil { if AddressToIntPriKeyMap == nil {
AddressToIntPriKeyMapLock.Lock()
AddressToIntPriKeyMap = make(map[[20]byte]int) AddressToIntPriKeyMap = make(map[[20]byte]int)
for i := 1; i <= 10000; i++ { for i := 1; i <= 10000; i++ {
AddressToIntPriKeyMap[pki.GetAddressFromInt(i)] = i AddressToIntPriKeyMap[pki.GetAddressFromInt(i)] = i
} }
AddressToIntPriKeyMapLock.Unlock()
} }
}
func LookUpIntPriKey(address [20]byte) (int, bool) {
value, ok := AddressToIntPriKeyMap[address] value, ok := AddressToIntPriKeyMap[address]
return value, ok return value, ok
} }

@ -0,0 +1,99 @@
127.0.0.1 9010 validator 0
127.0.0.1 9011 validator 0
127.0.0.1 9012 validator 0
127.0.0.1 9013 validator 0
127.0.0.1 9014 validator 0
127.0.0.1 9015 validator 0
127.0.0.1 9016 validator 0
127.0.0.1 9017 validator 0
127.0.0.1 9018 validator 0
127.0.0.1 9019 validator 0
127.0.0.1 9020 validator 1
127.0.0.1 9021 validator 1
127.0.0.1 9022 validator 1
127.0.0.1 9023 validator 1
127.0.0.1 9024 validator 1
127.0.0.1 9025 validator 1
127.0.0.1 9026 validator 1
127.0.0.1 9027 validator 1
127.0.0.1 9028 validator 1
127.0.0.1 9029 validator 1
127.0.0.1 9030 validator 2
127.0.0.1 9031 validator 2
127.0.0.1 9032 validator 2
127.0.0.1 9033 validator 2
127.0.0.1 9034 validator 2
127.0.0.1 9035 validator 2
127.0.0.1 9036 validator 2
127.0.0.1 9037 validator 2
127.0.0.1 9038 validator 2
127.0.0.1 9039 validator 2
127.0.0.1 9040 validator 3
127.0.0.1 9041 validator 3
127.0.0.1 9042 validator 3
127.0.0.1 9043 validator 3
127.0.0.1 9044 validator 3
127.0.0.1 9045 validator 3
127.0.0.1 9046 validator 3
127.0.0.1 9047 validator 3
127.0.0.1 9048 validator 3
127.0.0.1 9049 validator 3
127.0.0.1 9050 validator 4
127.0.0.1 9051 validator 4
127.0.0.1 9052 validator 4
127.0.0.1 9053 validator 4
127.0.0.1 9054 validator 4
127.0.0.1 9055 validator 4
127.0.0.1 9056 validator 4
127.0.0.1 9057 validator 4
127.0.0.1 9058 validator 4
127.0.0.1 9059 validator 4
127.0.0.1 9060 validator 5
127.0.0.1 9061 validator 5
127.0.0.1 9062 validator 5
127.0.0.1 9063 validator 5
127.0.0.1 9064 validator 5
127.0.0.1 9065 validator 5
127.0.0.1 9066 validator 5
127.0.0.1 9067 validator 5
127.0.0.1 9068 validator 5
127.0.0.1 9069 validator 5
127.0.0.1 9070 validator 6
127.0.0.1 9071 validator 6
127.0.0.1 9072 validator 6
127.0.0.1 9073 validator 6
127.0.0.1 9074 validator 6
127.0.0.1 9075 validator 6
127.0.0.1 9076 validator 6
127.0.0.1 9077 validator 6
127.0.0.1 9078 validator 6
127.0.0.1 9079 validator 6
127.0.0.1 9080 validator 7
127.0.0.1 9081 validator 7
127.0.0.1 9082 validator 7
127.0.0.1 9083 validator 7
127.0.0.1 9084 validator 7
127.0.0.1 9085 validator 7
127.0.0.1 9086 validator 7
127.0.0.1 9087 validator 7
127.0.0.1 9088 validator 7
127.0.0.1 9089 validator 7
127.0.0.1 9090 validator 8
127.0.0.1 9091 validator 8
127.0.0.1 9092 validator 8
127.0.0.1 9093 validator 8
127.0.0.1 9094 validator 8
127.0.0.1 9095 validator 8
127.0.0.1 9096 validator 8
127.0.0.1 9097 validator 8
127.0.0.1 9098 validator 8
127.0.0.1 9099 validator 8
127.0.0.1 9000 leader 0
127.0.0.1 9001 leader 1
127.0.0.1 9003 leader 3
127.0.0.1 9004 leader 4
127.0.0.1 9005 leader 5
127.0.0.1 9006 leader 6
127.0.0.1 9007 leader 7
127.0.0.1 9008 leader 8
127.0.0.1 9999 client 0
Loading…
Cancel
Save