Merge branch 'master' into sync

pull/69/head
Minh Doan 6 years ago
commit 210f9f7ba9
  1. 19
      benchmark.go
  2. 4
      blockchain/block.go
  3. 4
      blockchain/blockchain_test.go
  4. 20
      blockchain/merkle_tree.go
  5. 21
      blockchain/merkle_tree_test.go
  6. 123
      blockchain/utxopool.go
  7. 6
      blockchain/utxopool_test.go
  8. 1
      client/client.go
  9. 77
      client/txgen/main.go
  10. 104
      go_executable_build.sh
  11. 12
      identitychain/identityblock.go
  12. 138
      identitychain/identitychain.go
  13. 72
      identitychain/identitychain_handler.go
  14. 20
      identitychain/identitychain_test.go
  15. 43
      identitymanage/identitymanage.go
  16. 107
      node/node.go
  17. 66
      node/node_handler.go
  18. 21
      pow/LICENSE
  19. 57
      pow/README.md
  20. 131
      pow/api.go
  21. 56
      pow/api_test.go
  22. 25
      pow/example_test.go
  23. 81
      pow/pow.go
  24. 78
      pow/sha2bday.go
  25. 8
      proto/identity/identity.go
  26. 11
      runid/run_identity.go
  27. 39
      runwait/run_wait.go
  28. 3
      utils/distribution_config.go
  29. 82
      waitnode/pow.txt
  30. 82
      waitnode/wait_node.go
  31. 18
      waitnode/wait_node_test.go

@ -5,6 +5,7 @@ import (
"fmt"
"math/rand"
"os"
"path"
"time"
"github.com/simple-rules/harmony-benchmark/attack"
@ -16,6 +17,13 @@ import (
"github.com/simple-rules/harmony-benchmark/utils"
)
var (
version string
builtBy string
builtAt string
commit string
)
const (
AttackProbability = 20
)
@ -42,6 +50,11 @@ func InitLDBDatabase(ip string, port string) (*db.LDBDatabase, error) {
return db.NewLDBDatabase(dbFileName, 0, 0)
}
func printVersion(me string) {
fmt.Fprintf(os.Stderr, "Harmony (C) 2018. %v, version %v-%v (%v %v)\n", path.Base(me), version, commit, builtBy, builtAt)
os.Exit(0)
}
func main() {
ip := flag.String("ip", "127.0.0.1", "IP of the node")
port := flag.String("port", "9000", "port of the node.")
@ -51,8 +64,14 @@ func main() {
dbSupported := flag.Bool("db_supported", false, "false means not db_supported, true means db_supported")
profile := flag.Bool("profile", false, "Turn on profiling (CPU, Memory).")
metricsReportURL := flag.String("metrics_profile_url", "", "If set, reports metrics to this URL.")
versionFlag := flag.Bool("version", false, "Output version info")
flag.Parse()
if *versionFlag {
printVersion(os.Args[0])
}
// Set up randomization seed.
rand.Seed(int64(time.Now().Nanosecond()))

@ -133,7 +133,7 @@ func NewStateBlock(utxoPool *UTXOPool, numBlocks, numTxs int32) *Block {
stateTransactions := []*Transaction{}
stateTransactionIds := [][32]byte{}
for address, txHash2Vout2AmountMap := range utxoPool.UtxoMap {
stateTransaction := Transaction{}
stateTransaction := &Transaction{}
for txHash, vout2AmountMap := range txHash2Vout2AmountMap {
for index, amount := range vout2AmountMap {
txHashBytes, err := utils.Get32BytesFromString(txHash)
@ -148,7 +148,7 @@ func NewStateBlock(utxoPool *UTXOPool, numBlocks, numTxs int32) *Block {
if len(stateTransaction.TxOutput) != 0 {
stateTransaction.SetID()
stateTransactionIds = append(stateTransactionIds, stateTransaction.ID)
stateTransactions = append(stateTransactions, &stateTransaction)
stateTransactions = append(stateTransactions, stateTransaction)
}
}
newBlock := NewBlock(stateTransactions, [32]byte{}, utxoPool.ShardID)

@ -58,7 +58,7 @@ func TestFindUTXO(t *testing.T) {
func TestAddNewUserTransfer(t *testing.T) {
bc := CreateBlockchain(TestAddressOne, 0)
utxoPool := CreateUTXOPoolFromGenesisBlockChain(bc)
utxoPool := CreateUTXOPoolFromGenesisBlock(bc.Blocks[0])
if !bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressThree, 3, 0) {
t.Error("Failed to add new transfer to alok.")
@ -75,7 +75,7 @@ func TestAddNewUserTransfer(t *testing.T) {
func TestVerifyNewBlock(t *testing.T) {
bc := CreateBlockchain(TestAddressOne, 0)
utxoPool := CreateUTXOPoolFromGenesisBlockChain(bc)
utxoPool := CreateUTXOPoolFromGenesisBlock(bc.Blocks[0])
bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressThree, 3, 0)
bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressTwo, 100, 0)

@ -18,6 +18,9 @@ type MerkleNode struct {
// NewMerkleTree creates a new Merkle tree from a sequence of data
func NewMerkleTree(data [][]byte) *MerkleTree {
if len(data) == 0 {
return nil
}
var nodes []*MerkleNode
for _, datum := range data {
@ -49,15 +52,16 @@ func NewMerkleTree(data [][]byte) *MerkleTree {
func NewMerkleNode(left, right *MerkleNode, data []byte) *MerkleNode {
mNode := MerkleNode{}
if left == nil && right == nil {
hash := sha256.Sum256(data)
mNode.Data = hash[:]
} else {
prevHashes := append(left.Data, right.Data...)
hash := sha256.Sum256(prevHashes)
mNode.Data = hash[:]
prevHashes := []byte{}
if left != nil {
prevHashes = append(prevHashes, left.Data...)
}
if right != nil {
prevHashes = append(prevHashes, right.Data...)
}
prevHashes = append(prevHashes, data...)
hash := sha256.Sum256(prevHashes)
mNode.Data = hash[:]
mNode.Left = left
mNode.Right = right

@ -13,6 +13,7 @@ func TestNewMerkleNode(t *testing.T) {
[]byte("node3"),
}
fmt.Println("TEting")
// Level 1
n1 := NewMerkleNode(nil, nil, data[0])
@ -59,3 +60,23 @@ func TestNewMerkleTree(t *testing.T) {
t.Errorf("Merkle tree root hash is incorrect")
}
}
func TestNewMerkleTree2(t *testing.T) {
data := [][]byte{
[]byte("node1"),
[]byte("node2"),
}
// Level 1
n1 := NewMerkleNode(nil, nil, data[0])
n2 := NewMerkleNode(nil, nil, data[1])
// Level 2
n3 := NewMerkleNode(n1, n2, nil)
rootHash := fmt.Sprintf("%x", n3.Data)
mTree := NewMerkleTree(data)
if rootHash != fmt.Sprintf("%x", mTree.RootNode.Data) {
t.Errorf("Merkle tree root hash is incorrect")
}
}

@ -115,8 +115,9 @@ func (utxoPool *UTXOPool) VerifyStateBlock(stateBlock *Block) bool {
}
// VerifyOneTransaction verifies if a list of transactions valid.
// Add another sanity check function (e.g. spending the same utxo) called before this one.
func (utxoPool *UTXOPool) VerifyOneTransaction(tx *Transaction, spentTXOs *map[[20]byte]map[string]map[uint32]bool) (err error, crossShard bool) {
if len(tx.Proofs) != 0 {
if len(tx.Proofs) > 1 {
return utxoPool.VerifyUnlockTransaction(tx)
}
@ -223,7 +224,7 @@ func (utxoPool *UTXOPool) Update(transactions []*Transaction) {
// UpdateOneTransaction updates utxoPool in respect to the new Transaction.
func (utxoPool *UTXOPool) UpdateOneTransaction(tx *Transaction) {
isUnlockTx := len(tx.Proofs) != 0
isUnlockTx := len(tx.Proofs) > 1
unlockToCommit := true
if isUnlockTx {
for _, proof := range tx.Proofs {
@ -286,7 +287,6 @@ func (utxoPool *UTXOPool) UpdateOneTransaction(tx *Transaction) {
inTxID := hex.EncodeToString(in.PreviousOutPoint.TxID[:])
if _, ok := utxoPool.LockedUtxoMap[in.Address]; !ok {
utxoPool.LockedUtxoMap[in.Address] = make(TXHash2Vout2AmountMap)
utxoPool.LockedUtxoMap[in.Address][inTxID] = make(Vout2AmountMap)
}
if _, ok := utxoPool.LockedUtxoMap[in.Address][inTxID]; !ok {
utxoPool.LockedUtxoMap[in.Address][inTxID] = make(Vout2AmountMap)
@ -300,16 +300,17 @@ func (utxoPool *UTXOPool) UpdateOneTransaction(tx *Transaction) {
// Update
if !isCrossShard || isUnlockTx {
if !unlockToCommit {
if isValidCrossShard {
// unlock-to-abort, bring back (unlock) the utxo input
for _, in := range tx.TxInput {
// Only unlock the input for my own shard.
if in.ShardID != utxoPool.ShardID {
continue
}
// unlock-to-abort, bring back (unlock) the utxo input
for _, in := range tx.TxInput {
// Only unlock the input for my own shard.
if in.ShardID != utxoPool.ShardID {
continue
}
// Simply bring back the locked (removed) utxo
inTxID := hex.EncodeToString(in.PreviousOutPoint.TxID[:])
inTxID := hex.EncodeToString(in.PreviousOutPoint.TxID[:])
if utxoPool.LockedUtxoExists(in.Address, inTxID, in.PreviousOutPoint.Index) {
// bring back the locked (removed) utxo
if _, ok := utxoPool.UtxoMap[in.Address]; !ok {
utxoPool.UtxoMap[in.Address] = make(TXHash2Vout2AmountMap)
utxoPool.UtxoMap[in.Address][inTxID] = make(Vout2AmountMap)
@ -339,6 +340,17 @@ func (utxoPool *UTXOPool) UpdateOneTransaction(tx *Transaction) {
}
utxoPool.UtxoMap[out.Address][txID][uint32(index)] = out.Amount
}
if isUnlockTx { // for unlock-to-commit transaction, also need to delete the locked utxo
for _, in := range tx.TxInput {
// Only unlock the input for my own shard.
if in.ShardID != utxoPool.ShardID {
continue
}
inTxID := hex.EncodeToString(in.PreviousOutPoint.TxID[:])
utxoPool.DeleteOneLockedUtxo(in.Address, inTxID, in.PreviousOutPoint.Index)
}
}
}
} // If it's a cross shard locking Tx, then don't update so the input UTXOs are locked (removed), and the money is not spendable until unlock-to-commit or unlock-to-abort
}
@ -363,28 +375,31 @@ func (utxoPool *UTXOPool) VerifyAndUpdate(transactions []*Transaction) bool {
return false
}
// CreateUTXOPoolFromTransaction a Utxo pool from a genesis transaction.
func CreateUTXOPoolFromTransaction(tx *Transaction, shardId uint32) *UTXOPool {
// CreateUTXOPoolFromGenesisBlock a Utxo pool from a genesis block.
func CreateUTXOPoolFromGenesisBlock(block *Block) *UTXOPool {
shardId := block.ShardId
var utxoPool UTXOPool
txID := hex.EncodeToString(tx.ID[:])
utxoPool.UtxoMap = make(UtxoMap)
utxoPool.LockedUtxoMap = make(UtxoMap)
for index, out := range tx.TxOutput {
utxoPool.UtxoMap[out.Address] = make(TXHash2Vout2AmountMap)
utxoPool.UtxoMap[out.Address][txID] = make(Vout2AmountMap)
utxoPool.UtxoMap[out.Address][txID][uint32(index)] = out.Amount
for _, tx := range block.Transactions {
txID := hex.EncodeToString(tx.ID[:])
for index, out := range tx.TxOutput {
_, ok := utxoPool.UtxoMap[out.Address]
if !ok {
utxoPool.UtxoMap[out.Address] = make(TXHash2Vout2AmountMap)
}
_, ok = utxoPool.UtxoMap[out.Address][txID]
if !ok {
utxoPool.UtxoMap[out.Address][txID] = make(Vout2AmountMap)
}
utxoPool.UtxoMap[out.Address][txID][uint32(index)] = out.Amount
}
}
utxoPool.ShardID = shardId
return &utxoPool
}
// CreateUTXOPoolFromGenesisBlockChain a Utxo pool from a genesis blockchain.
func CreateUTXOPoolFromGenesisBlockChain(bc *Blockchain) *UTXOPool {
tx := bc.Blocks[0].Transactions[0]
shardId := bc.Blocks[0].ShardId
return CreateUTXOPoolFromTransaction(tx, shardId)
}
// SelectTransactionsForNewBlock returns a list of index of valid transactions for the new block.
func (utxoPool *UTXOPool) SelectTransactionsForNewBlock(transactions []*Transaction, maxNumTxs int) ([]*Transaction, []*Transaction, []*Transaction, []*CrossShardTxAndProof) {
selected, unselected, invalid, crossShardTxs := []*Transaction{}, []*Transaction{}, []*Transaction{}, []*CrossShardTxAndProof{}
@ -394,12 +409,13 @@ func (utxoPool *UTXOPool) SelectTransactionsForNewBlock(transactions []*Transact
if len(selected) < maxNumTxs {
if err == nil || crossShard {
selected = append(selected, tx)
if crossShard {
proof := CrossShardTxProof{Accept: err == nil, TxID: tx.ID, TxInput: getShardTxInput(tx, utxoPool.ShardID)}
txAndProof := CrossShardTxAndProof{tx, &proof}
crossShardTxs = append(crossShardTxs, &txAndProof)
tx.Proofs = append(tx.Proofs, proof)
}
selected = append(selected, tx)
} else {
invalid = append(invalid, tx)
}
@ -431,6 +447,23 @@ func (utxoPool *UTXOPool) DeleteOneUtxo(address [20]byte, txID string, index uin
}
}
// DeleteOneBalanceItem deletes one balance item of UTXOPool and clean up if possible.
func (utxoPool *UTXOPool) LockedUtxoExists(address [20]byte, txID string, index uint32) bool {
_, ok := utxoPool.LockedUtxoMap[address]
if !ok {
return false
}
_, ok = utxoPool.LockedUtxoMap[address][txID]
if !ok {
return false
}
_, ok = utxoPool.LockedUtxoMap[address][txID][index]
if !ok {
return false
}
return true
}
// DeleteOneBalanceItem deletes one balance item of UTXOPool and clean up if possible.
func (utxoPool *UTXOPool) DeleteOneLockedUtxo(address [20]byte, txID string, index uint32) {
delete(utxoPool.LockedUtxoMap[address][txID], index)
@ -463,8 +496,17 @@ func (utxoPool *UTXOPool) CleanUp() {
// Used for debugging.
func (utxoPool *UTXOPool) String() string {
return printUtxos(&utxoPool.UtxoMap)
}
// Used for debugging.
func (utxoPool *UTXOPool) StringOfLockedUtxos() string {
return printUtxos(&utxoPool.LockedUtxoMap)
}
func printUtxos(utxos *UtxoMap) string {
res := ""
for address, v1 := range utxoPool.UtxoMap {
for address, v1 := range *utxos {
for txid, v2 := range v1 {
for index, value := range v2 {
res += fmt.Sprintf("address: %v, tx id: %v, index: %v, value: %v\n", address, txid, index, value)
@ -484,3 +526,28 @@ func (utxoPool *UTXOPool) GetSizeInByteOfUtxoMap() int {
encoder.Encode(utxoPool.UtxoMap)
return len(byteBuffer.Bytes())
}
// A utility func that counts the total number of utxos in a pool.
func (utxoPool *UTXOPool) CountNumOfUtxos() int {
return countNumOfUtxos(&utxoPool.UtxoMap)
}
// A utility func that counts the total number of locked utxos in a pool.
func (utxoPool *UTXOPool) CountNumOfLockedUtxos() int {
return countNumOfUtxos(&utxoPool.LockedUtxoMap)
}
func countNumOfUtxos(utxos *UtxoMap) int {
countAll := 0
for _, utxoMap := range *utxos {
for txIdStr, val := range utxoMap {
_, err := hex.DecodeString(txIdStr)
if err != nil {
continue
}
countAll += len(val)
}
}
return countAll
}

@ -6,7 +6,7 @@ import (
func TestVerifyOneTransactionAndUpdate(t *testing.T) {
bc := CreateBlockchain(TestAddressOne, 0)
utxoPool := CreateUTXOPoolFromGenesisBlockChain(bc)
utxoPool := CreateUTXOPoolFromGenesisBlock(bc.Blocks[0])
bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressThree, 3, 0)
bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressTwo, 100, 0)
@ -24,7 +24,7 @@ func TestVerifyOneTransactionAndUpdate(t *testing.T) {
func TestVerifyOneTransactionFail(t *testing.T) {
bc := CreateBlockchain(TestAddressOne, 0)
utxoPool := CreateUTXOPoolFromGenesisBlockChain(bc)
utxoPool := CreateUTXOPoolFromGenesisBlock(bc.Blocks[0])
bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressThree, 3, 0)
bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressTwo, 100, 0)
@ -42,7 +42,7 @@ func TestVerifyOneTransactionFail(t *testing.T) {
func TestDeleteOneBalanceItem(t *testing.T) {
bc := CreateBlockchain(TestAddressOne, 0)
utxoPool := CreateUTXOPoolFromGenesisBlockChain(bc)
utxoPool := CreateUTXOPoolFromGenesisBlock(bc.Blocks[0])
bc.AddNewUserTransfer(utxoPool, PriKeyOne, TestAddressOne, TestAddressThree, 3, 0)
bc.AddNewUserTransfer(utxoPool, PriKeyThree, TestAddressThree, TestAddressTwo, 3, 0)

@ -57,6 +57,7 @@ func (client *Client) TransactionMessageHandler(msgPayload []byte) {
func (client *Client) handleProofOfLockMessage(proofs *[]blockchain.CrossShardTxProof) {
txsToSend := []blockchain.Transaction{}
//fmt.Printf("PENDING CLIENT TX - %d\n", len(client.PendingCrossTxs))
// Loop through the newly received list of proofs
client.PendingCrossTxsMutex.Lock()
for _, proof := range *proofs {

@ -6,6 +6,8 @@ import (
"flag"
"fmt"
"math/rand"
"os"
"path"
"sync"
"time"
@ -20,10 +22,18 @@ import (
proto_node "github.com/simple-rules/harmony-benchmark/proto/node"
)
var (
version string
builtBy string
builtAt string
commit string
)
type txGenSettings struct {
numOfAddress int
crossShard bool
maxNumTxsPerBatch int
crossShardRatio int
}
var (
@ -64,7 +74,7 @@ type TxInfo struct {
// Returns:
// all single-shard txs
// all cross-shard txs
func generateSimulatedTransactions(subsetId, numSubset int, shardID int, dataNodes []*node.Node) ([]*blockchain.Transaction, []*blockchain.Transaction) {
func generateSimulatedTransactions(subsetId, numSubset int, shardId int, dataNodes []*node.Node) ([]*blockchain.Transaction, []*blockchain.Transaction) {
/*
UTXO map structure:
address - [
@ -81,13 +91,13 @@ func generateSimulatedTransactions(subsetId, numSubset int, shardID int, dataNod
utxoPoolMutex.Lock()
txInfo := TxInfo{}
txInfo.shardID = shardID
txInfo.shardID = shardId
txInfo.dataNodes = dataNodes
txInfo.txCount = 0
UTXOLOOP:
// Loop over all addresses
for address, txMap := range dataNodes[shardID].UtxoPool.UtxoMap {
for address, txMap := range dataNodes[shardId].UtxoPool.UtxoMap {
if int(binary.BigEndian.Uint32(address[:]))%numSubset == subsetId%numSubset { // Work on one subset of utxo at a time
txInfo.address = address
// Loop over all txIds for the address
@ -106,7 +116,7 @@ UTXOLOOP:
randNum := rand.Intn(100)
if setting.crossShard && randNum < 30 { // 1/3 cross shard transactions: add another txinput from another shard
if setting.crossShard && randNum < setting.crossShardRatio { // 30% cross shard transactions: add another txinput from another shard
generateCrossShardTx(&txInfo)
} else {
generateSingleShardTx(&txInfo)
@ -118,16 +128,23 @@ UTXOLOOP:
}
}
}
//fmt.Printf("UTXO CLIENT - %d\n", shardId)
//fmt.Println(dataNodes[shardId].UtxoPool.CountNumOfUtxos())
utxoPoolMutex.Unlock()
log.Debug("[Generator] generated transations", "single-shard", len(txInfo.txs), "cross-shard", len(txInfo.crossTxs))
return txInfo.txs, txInfo.crossTxs
}
func generateCrossShardTx(txInfo *TxInfo) {
nodeShardID := txInfo.dataNodes[txInfo.shardID].Consensus.ShardID
crossShardId := nodeShardID
// a random shard to spend money to
crossShardId := rand.Intn(len(txInfo.dataNodes))
for true {
crossShardId = uint32(rand.Intn(len(txInfo.dataNodes)))
if crossShardId != nodeShardID {
break
}
}
crossShardNode := txInfo.dataNodes[crossShardId]
crossShardUtxosMap := crossShardNode.UtxoPool.UtxoMap[txInfo.address]
@ -147,7 +164,7 @@ func generateCrossShardTx(txInfo *TxInfo) {
for crossShardIndex, crossShardValue := range crossShardUtxos {
crossUtxoValue = crossShardValue
crossTxin = blockchain.NewTXInput(blockchain.NewOutPoint(&crossTxId, crossShardIndex), txInfo.address, uint32(crossShardId))
crossTxin = blockchain.NewTXInput(blockchain.NewOutPoint(&crossTxId, crossShardIndex), txInfo.address, crossShardId)
break
}
if crossTxin != nil {
@ -171,7 +188,7 @@ func generateCrossShardTx(txInfo *TxInfo) {
// Spend the utxo from the other shard, if any, to a random address in [0 - N)
if crossTxin != nil {
crossTxout := blockchain.TXOutput{Amount: crossUtxoValue, Address: pki.GetAddressFromInt(rand.Intn(setting.numOfAddress) + 1), ShardID: uint32(crossShardId)}
crossTxout := blockchain.TXOutput{Amount: crossUtxoValue, Address: pki.GetAddressFromInt(rand.Intn(setting.numOfAddress) + 1), ShardID: crossShardId}
txOutputs = append(txOutputs, crossTxout)
}
@ -216,36 +233,25 @@ func generateSingleShardTx(txInfo *TxInfo) {
txInfo.txCount++
}
// A utility func that counts the total number of utxos in a pool.
func countNumOfUtxos(utxoPool *blockchain.UTXOPool) int {
countAll := 0
for _, utxoMap := range utxoPool.UtxoMap {
for txIdStr, val := range utxoMap {
_ = val
id, err := hex.DecodeString(txIdStr)
if err != nil {
continue
}
txId := [32]byte{}
copy(txId[:], id[:])
for _, utxo := range val {
_ = utxo
countAll++
}
}
}
return countAll
func printVersion(me string) {
fmt.Fprintf(os.Stderr, "Harmony (C) 2018. %v, version %v-%v (%v %v)\n", path.Base(me), version, commit, builtBy, builtAt)
os.Exit(0)
}
func main() {
configFile := flag.String("config_file", "local_config.txt", "file containing all ip addresses and config")
maxNumTxsPerBatch := flag.Int("max_num_txs_per_batch", 100000, "number of transactions to send per message")
maxNumTxsPerBatch := flag.Int("max_num_txs_per_batch", 10000, "number of transactions to send per message")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
numSubset := flag.Int("numSubset", 3, "the number of subsets of utxos to process separately")
duration := flag.Int("duration", 60, "duration of the tx generation in second")
duration := flag.Int("duration", 60, "duration of the tx generation in second. If it's negative, the experiment runs forever.")
versionFlag := flag.Bool("version", false, "Output version info")
crossShardRatio := flag.Int("cross_shard_ratio", 30, "The percentage of cross shard transactions.")
flag.Parse()
if *versionFlag {
printVersion(os.Args[0])
}
// Read the configs
config := client_config.NewConfig()
config.ReadConfigFile(*configFile)
@ -255,6 +261,7 @@ func main() {
// Do cross shard tx if there are more than one shard
setting.crossShard = len(shardIds) > 1
setting.maxNumTxsPerBatch = *maxNumTxsPerBatch
setting.crossShardRatio = *crossShardRatio
// TODO(Richard): refactor this chuck to a single method
// Setup a logger to stdout and log file.
@ -290,8 +297,8 @@ func main() {
if node.Consensus.ShardID == block.ShardId {
log.Debug("Adding block from leader", "shardId", block.ShardId)
// Add it to blockchain
utxoPoolMutex.Lock()
node.AddNewBlock(block)
utxoPoolMutex.Lock()
node.UpdateUtxoAndState(block)
utxoPoolMutex.Unlock()
} else {
@ -316,7 +323,7 @@ func main() {
batchCounter := 0
for true {
t := time.Now()
if t.Sub(start).Seconds() >= totalTime {
if totalTime > 0 && t.Sub(start).Seconds() >= totalTime {
log.Debug("Generator timer ended.", "duration", (int(t.Sub(start))), "startTime", start, "totalTime", totalTime)
break
}
@ -324,7 +331,7 @@ func main() {
constructAndSendTransaction(batchCounter, *numSubset, shardId, leaders, nodes, clientNode, clientPort)
}
batchCounter++
time.Sleep(1000 * time.Millisecond)
time.Sleep(5000 * time.Millisecond)
}
// Send a stop message to stop the nodes at the end
@ -341,13 +348,15 @@ func constructAndSendTransaction(subsetId, numSubset, shardId int, leaders []p2p
txs, crossTxs := generateSimulatedTransactions(subsetId, numSubset, shardId, nodes)
allCrossTxs = append(allCrossTxs, crossTxs...)
log.Debug("[Generator] Sending single-shard txs ...", "leader", leader, "numTxs", len(txs), "numCrossTxs", len(crossTxs))
log.Debug("[Generator] Sending single-shard txs ...", "leader", leader, "numTxs", len(txs))
msg := proto_node.ConstructTransactionListMessage(txs)
p2p.SendMessage(leader, msg)
// Note cross shard txs are later sent in batch
if len(allCrossTxs) > 0 {
log.Debug("[Generator] Broadcasting cross-shard txs ...", "allCrossTxs", len(allCrossTxs))
//fmt.Printf("SENDING CLIENT TXS: %d\n", shardId)
//fmt.Println(allCrossTxs)
msg := proto_node.ConstructTransactionListMessage(allCrossTxs)
p2p.BroadcastMessage(leaders, msg)

@ -1,14 +1,100 @@
#!/usr/bin/env bash
#!/bin/bash
# this script is used to generate the binary of benchmark/txgen
# TODO: add error and parameter checking
declare -A SRC
SRC[benchmark]=benchmark.go
SRC[txgen]=client/txgen/main.go
BINDIR=bin
BUCKET=unique-bucket-bin
GOOS=linux
GOARCH=amd64
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/benchmark benchmark.go
env GOOS=$GOOS GOARCH=$GOARCH go build -o bin/txgen client/txgen/main.go
FOLDER=
function usage
{
ME=$(basename $0)
cat<<EOF
Usage: $ME [OPTIONS] ACTION
OPTIONS:
-h print this help message
-p profile aws profile name
-a arch set build arch (default: $GOARCH)
-o os set build OS (default: $GOOS, windows is supported)
-b bucket set the upload bucket name (default: $BUCKET)
-f folder set the upload folder name in the bucket (default: $FOLDER)
ACTION:
build build binaries only (default action)
upload upload binaries to s3
EXAMPLES:
# build linux binaries only by default
$ME
# build windows binaries
$ME -o windows
# upload binaries to my s3 bucket, 0908 folder
$ME -b mybucket -f 0908 upload
EOF
exit 1
}
function build_only
{
VERSION=$(git rev-list --all --count)
COMMIT=$(git describe --always --long --dirty)
BUILTAT=$(date +%FT%T%z)
BUILTBY=${USER}@
for bin in "${!SRC[@]}"; do
env GOOS=$GOOS GOARCH=$GOARCH go build -ldflags="-X main.version=v${VERSION} -X main.commit=${COMMIT} -X main.builtAt=${BUILTAT} -X main.builtBy=${BUILTBY}" -o $BINDIR/$bin ${SRC[$bin]}
$BINDIR/$bin -version
done
md5sum $BINDIR/* > $BINDIR/md5sum.txt
}
function upload
{
AWSCLI=aws
if [ -n "$PROFILE" ]; then
AWSCLI+=" --profile $PROFILE"
fi
for bin in "${!SRC[@]}"; do
[ -e $BINDIR/$bin ] && $AWSCLI s3 cp $BINDIR/$bin s3://${BUCKET}$FOLDER/$bin --acl public-read
done
[ -e $BINDIR/md5sum.txt ] && $AWSCLI s3 cp $BINDIR/md5sum.txt s3://${BUCKET}$FOLDER/md5sum.txt --acl public-read
}
################################ MAIN FUNCTION ##############################
while getopts "hp:a:o:b:f:" option; do
case $option in
h) usage ;;
p) PROFILE=$OPTARG ;;
a) GOARCH=$OPTARG ;;
o) GOOS=$OPTARG ;;
b) BUCKET=$OPTARG/ ;;
f) FOLDER=$OPTARG ;;
esac
done
mkdir -p $BINDIR
shift $(($OPTIND-1))
AWSCLI=aws
if [ "$1" != "" ]; then
AWSCLI+=" --profile $1"
fi
ACTION=${1:-build}
$AWSCLI s3 cp bin/benchmark s3://unique-bucket-bin/benchmark --acl public-read-write
$AWSCLI s3 cp bin/txgen s3://unique-bucket-bin/txgen --acl public-read-write
case "$ACTION" in
"build") build_only ;;
"upload") upload ;;
*) usage ;;
esac

@ -7,8 +7,8 @@ import (
"log"
"time"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/utils"
"github.com/simple-rules/harmony-benchmark/waitnode"
)
// IdentityBlock has the information of one node
@ -16,7 +16,7 @@ type IdentityBlock struct {
Timestamp int64
PrevBlockHash [32]byte
NumIdentities int32
Identities []*waitnode.WaitNode
Identities []*node.Node
}
// Serialize serializes the block
@ -31,7 +31,7 @@ func (b *IdentityBlock) Serialize() []byte {
}
//Get Identities
func (b *IdentityBlock) GetIdentities() []*waitnode.WaitNode {
func (b *IdentityBlock) GetIdentities() []*node.Node {
return b.Identities
}
@ -47,7 +47,7 @@ func DeserializeBlock(d []byte) *IdentityBlock {
}
// NewBlock creates and returns a new block.
func NewBlock(Identities []*waitnode.WaitNode, prevBlockHash [32]byte) *IdentityBlock {
func NewBlock(Identities []*node.Node, prevBlockHash [32]byte) *IdentityBlock {
block := &IdentityBlock{Timestamp: time.Now().Unix(), PrevBlockHash: prevBlockHash, NumIdentities: int32(len(Identities)), Identities: Identities}
return block
}
@ -59,7 +59,7 @@ func (b *IdentityBlock) CalculateBlockHash() [32]byte {
hashes = append(hashes, utils.ConvertFixedDataIntoByteArray(b.Timestamp))
hashes = append(hashes, b.PrevBlockHash[:])
for _, id := range b.Identities {
hashes = append(hashes, utils.ConvertFixedDataIntoByteArray(id.ID))
hashes = append(hashes, utils.ConvertFixedDataIntoByteArray(id))
}
hashes = append(hashes, utils.ConvertFixedDataIntoByteArray(b.NumIdentities))
blockHash = sha256.Sum256(bytes.Join(hashes, []byte{}))
@ -68,7 +68,7 @@ func (b *IdentityBlock) CalculateBlockHash() [32]byte {
// NewGenesisBlock creates and returns genesis Block.
func NewGenesisBlock() *IdentityBlock {
var Ids []*waitnode.WaitNode
var Ids []*node.Node
block := &IdentityBlock{Timestamp: time.Now().Unix(), PrevBlockHash: [32]byte{}, NumIdentities: 1, Identities: Ids}
return block
}

@ -9,8 +9,8 @@ import (
"sync"
"github.com/simple-rules/harmony-benchmark/log"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/p2p"
"github.com/simple-rules/harmony-benchmark/waitnode"
)
var mutex sync.Mutex
@ -18,21 +18,23 @@ var identityPerBlock = 100000
// IdentityChain (Blockchain) keeps Identities per epoch, currently centralized!
type IdentityChain struct {
Identities []*IdentityBlock
PendingIdentities []*waitnode.WaitNode
//Identities []*IdentityBlock //No need to have the identity block as of now
Identities []*node.Node
PendingIdentities []*node.Node
log log.Logger
Peer p2p.Peer
SelectedIdentitites []*waitnode.WaitNode
SelectedIdentitites []*node.Node
EpochNum int
PeerToShardMap map[*waitnode.WaitNode]int
ShardLeaderMap map[int]*waitnode.WaitNode
PeerToShardMap map[*node.Node]int
ShardLeaderMap map[int]*node.Node
PubKey string
CurrentEpochStartTime int64
NumberOfShards int
NumberOfNodesInShard int
PowMap map[p2p.Peer]string
}
func seekRandomNumber(EpochNum int, SelectedIdentitites []*waitnode.WaitNode) int {
func seekRandomNumber(EpochNum int, SelectedIdentitites []*node.Node) int {
// Broadcast message to all nodes and collect back their numbers, do consensus and get a leader.
// Use leader to generate a random number.
//all here mocked
@ -50,46 +52,63 @@ type GlobalBlockchainConfig struct {
//Shard
func (IDC *IdentityChain) Shard() {
num := seekRandomNumber(IDC.EpochNum, IDC.SelectedIdentitites)
IDC.CreateShardAssignment(num)
IDC.SelectIds()
IDC.CreateShardAssignment()
IDC.ElectLeaders()
IDC.BroadCastNewConfiguration()
}
//
//ElectLeaders
func (IDC *IdentityChain) ElectLeaders() {
return
}
//BroadCastNewConfiguration
func (IDC *IdentityChain) BroadCastNewConfiguration() {
fmt.Println("Broadcasting leader and shard info to everyone!")
// allPeers := make([]p2p.Peer, len(IDC.SelectedIdentitites))
// msgToSend := proto.
// p2p.BroadCastMessage(allPeers, msgToSend)
}
//CreateShardAssignment
func (IDC *IdentityChain) CreateShardAssignment(num int) {
func (IDC *IdentityChain) CreateShardAssignment() {
num := seekRandomNumber(IDC.EpochNum, IDC.SelectedIdentitites)
IDC.NumberOfShards = IDC.NumberOfShards + needNewShards()
IDC.SelectedIdentitites = generateRandomPermutations(num, IDC.SelectedIdentitites)
IDC.PeerToShardMap = make(map[*waitnode.WaitNode]int)
IDC.generateRandomPermutations(num)
IDC.PeerToShardMap = make(map[*node.Node]int)
numberInOneShard := len(IDC.SelectedIdentitites) / IDC.NumberOfShards
for peerNum := 1; peerNum <= len(IDC.SelectedIdentitites); peerNum++ {
fmt.Println(len(IDC.SelectedIdentitites))
for peerNum := 0; peerNum < len(IDC.SelectedIdentitites); peerNum++ {
IDC.PeerToShardMap[IDC.SelectedIdentitites[peerNum]] = peerNum / numberInOneShard
}
}
func generateRandomPermutations(num int, SelectedIdentitites []*waitnode.WaitNode) []*waitnode.WaitNode {
func (IDC *IdentityChain) generateRandomPermutations(num int) {
src := rand.NewSource(int64(num))
rnd := rand.New(src)
perm := rnd.Perm(len(SelectedIdentitites))
SelectedIdentititesCopy := make([]*waitnode.WaitNode, len(SelectedIdentitites))
perm := rnd.Perm(len(IDC.SelectedIdentitites))
SelectedIdentititesCopy := make([]*node.Node, len(IDC.SelectedIdentitites))
for j, i := range perm {
SelectedIdentititesCopy[j] = SelectedIdentitites[i]
SelectedIdentititesCopy[j] = IDC.SelectedIdentitites[i]
}
return SelectedIdentititesCopy
IDC.SelectedIdentitites = SelectedIdentititesCopy
}
// SelectIds
// SelectIds as
func (IDC *IdentityChain) SelectIds() {
selectNumber := IDC.NumberOfNodesInShard - len(IDC.Identities)
IB := IDC.GetLatestBlock()
currentIDS := IB.GetIdentities()
// Insert the lines below once you have a identity block
// IB := IDC.GetLatestBlock()
// currentIDS := IB.GetIdentities()
currentIDS := IDC.Identities
selectNumber = int(math.Min(float64(len(IDC.PendingIdentities)), float64(selectNumber)))
pending := IDC.PendingIdentities[:selectNumber]
IDC.SelectedIdentitites = append(currentIDS, pending...)
IDC.PendingIdentities = []*waitnode.WaitNode{}
IDC.PendingIdentities = []*node.Node{}
}
//Checks how many new shards we need. Currently we say 0.
@ -97,37 +116,6 @@ func needNewShards() int {
return 0
}
// GetLatestBlock gests the latest block at the end of the chain
func (IDC *IdentityChain) GetLatestBlock() *IdentityBlock {
if len(IDC.Identities) == 0 {
return nil
}
return IDC.Identities[len(IDC.Identities)-1]
}
//UpdateIdentityChain is to create the Blocks to be added to the chain
func (IDC *IdentityChain) UpdateIdentityChain() {
//If there are no more Identities registring the blockchain is dead
if len(IDC.PendingIdentities) == 0 {
// This is abd, because previous block might not be alive
return
}
if len(IDC.Identities) == 0 {
block := NewGenesisBlock()
IDC.Identities = append(IDC.Identities, block)
} else {
prevBlock := IDC.GetLatestBlock()
prevBlockHash := prevBlock.CalculateBlockHash()
NewIdentities := IDC.PendingIdentities[:identityPerBlock]
IDC.PendingIdentities = []*waitnode.WaitNode{}
//All other blocks are dropped, we need to inform them that they are dropped?
IDBlock := NewBlock(NewIdentities, prevBlockHash)
IDC.Identities = append(IDC.Identities, IDBlock)
}
}
//StartServer a server and process the request by a handler.
func (IDC *IdentityChain) StartServer() {
fmt.Println("Starting server...")
@ -162,5 +150,47 @@ func New(Peer p2p.Peer) *IdentityChain {
IDC := IdentityChain{}
IDC.Peer = Peer
IDC.log = log.New()
IDC.NumberOfShards = 1 //to be filled via global config
IDC.NumberOfNodesInShard = 500 //to be filled via global config
IDC.Identities = make([]*node.Node, 0)
IDC.PendingIdentities = make([]*node.Node, 0)
IDC.SelectedIdentitites = make([]*node.Node, 0)
IDC.PowMap = make(map[p2p.Peer]string)
return &IDC
}
// -------------------------------------------------------------
// The code below is needed when we have a actual identity block
// GetLatestBlock gests the latest block at the end of the chain
// func (IDC *IdentityChain) GetLatestBlock() *IdentityBlock {
// if len(IDC.Identities) == 0 {
// return nil
// }
// return IDC.Identities[len(IDC.Identities)-1]
// }
//UpdateIdentityChain is to create the Blocks to be added to the chain
// func (IDC *IdentityChain) UpdateIdentityChain() {
// //If there are no more Identities registring the blockchain is dead
// if len(IDC.PendingIdentities) == 0 {
// // This is abd, because previous block might not be alive
// return
// }
// if len(IDC.Identities) == 0 {
// block := NewGenesisBlock()
// IDC.Identities = append(IDC.Identities, block)
// } else {
// prevBlock := IDC.GetLatestBlock()
// prevBlockHash := prevBlock.CalculateBlockHash()
// NewIdentities := IDC.PendingIdentities[:identityPerBlock]
// IDC.PendingIdentities = []*node.Node{}
// //All other blocks are dropped, we need to inform them that they are dropped?
// IDBlock := NewBlock(NewIdentities, prevBlockHash)
// IDC.Identities = append(IDC.Identities, IDBlock)
// }
// }
// -------------------------------------------------------------

@ -1,14 +1,19 @@
package identitychain
import (
"bytes"
"fmt"
"math/rand"
"net"
"os"
"strconv"
"time"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/p2p"
"github.com/simple-rules/harmony-benchmark/pow"
"github.com/simple-rules/harmony-benchmark/proto"
proto_identity "github.com/simple-rules/harmony-benchmark/proto/identity"
"github.com/simple-rules/harmony-benchmark/waitnode"
)
//IdentityChainHandler handles registration of new Identities
@ -27,6 +32,8 @@ func (IDC *IdentityChain) IdentityChainHandler(conn net.Conn) {
if msgCategory != proto.IDENTITY {
IDC.log.Error("Identity Chain Recieved incorrect protocol message")
os.Exit(1)
} else {
fmt.Println("Message category is correct")
}
msgType, err := proto.GetMessageType(content)
if err != nil {
@ -43,20 +50,73 @@ func (IDC *IdentityChain) IdentityChainHandler(conn net.Conn) {
actionType := proto_identity.IdentityMessageType(msgType)
switch actionType {
case proto_identity.IDENTITY:
IDC.registerIdentity(msgPayload)
idMsgType, err := proto_identity.GetIdentityMessageType(msgPayload)
if err != nil {
fmt.Println("Error finding the identity message type")
}
switch idMsgType {
case proto_identity.REGISTER:
IDC.registerIdentity(msgPayload)
case proto_identity.ANNOUNCE:
IDC.acceptNewConnection(msgPayload)
}
}
}
}
func (IDC *IdentityChain) registerIdentity(msgPayload []byte) {
identityPayload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
payload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
if err != nil {
IDC.log.Error("identity payload not read")
} else {
fmt.Println("identity payload read")
}
NewWaitNode := waitnode.DeserializeWaitNode(identityPayload)
IDC.PendingIdentities = append(IDC.PendingIdentities, NewWaitNode)
fmt.Println(len(IDC.PendingIdentities))
fmt.Println("we are now registering identities")
offset := 0
proof := payload[offset : offset+32]
offset = offset + 32
Node := node.DeserializeWaitNode(payload[offset:])
req := IDC.PowMap[Node.Self]
ok, err := pow.Check(req, string(proof), []byte(""))
fmt.Println(err)
if ok {
fmt.Println("Proof of work accepted")
IDC.PendingIdentities = append(IDC.PendingIdentities, Node)
fmt.Println(len(IDC.PendingIdentities)) //Fix why IDC does not have log working.
} else {
fmt.Println("identity proof of work not accepted")
}
}
func (IDC *IdentityChain) acceptNewConnection(msgPayload []byte) {
identityPayload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
if err != nil {
fmt.Println("There was a error in reading the identity payload")
} else {
fmt.Println("accepted new connection")
}
fmt.Println("Sleeping for 2 secs ...")
time.Sleep(2 * time.Second)
Node := node.DeserializeWaitNode(identityPayload)
buffer := bytes.NewBuffer([]byte{})
src := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(src)
challengeNonce := int((rnd.Int31()))
req := pow.NewRequest(5, []byte(strconv.Itoa(challengeNonce)))
IDC.PowMap[Node.Self] = req
fmt.Println(Node.Self)
fmt.Println(req)
buffer.Write([]byte(req))
// 32 byte block hash
// buffer.Write(prevBlockHash)
// The message is missing previous BlockHash, this is because we don't actively maintain a identitychain
// This canbe included in the fulfill request.
// Message should be encrypted and then signed to follow PKE.
//IDC should accept node publickey, encrypt the nonce and blockhash
// Then sign the message by own private key and send the message back.
msgToSend := proto_identity.ConstructIdentityMessage(proto_identity.REGISTER, buffer.Bytes())
p2p.SendMessage(Node.Self, msgToSend)
}

@ -1,20 +0,0 @@
package identitychain
import (
"fmt"
"os"
"testing"
"github.com/simple-rules/harmony-benchmark/p2p"
)
func TestIDCFormed(test *testing.T) {
peer := p2p.Peer{Ip: "127.0.0.1", Port: "8080"}
IDC := New(peer)
if IDC == nil {
fmt.Println("IDC not formed.")
os.Exit(1)
}
}
//TODO Mock netconnection to test whether identitychain is listening.

@ -1,43 +0,0 @@
package identitymanage
// import (
// "bytes"
// "encoding/binary"
// "log"
// "github.com/dedis/kyber"
// )
// // Consensus data containing all info related to one round of consensus process
// type Identity struct {
// priKey kyber.Scalar
// pubKey kyber.Point
// Log log.Logger
// }
// // Construct the response message to send to leader (assumption the consensus data is already verified)
// func (identity *Identity) registerIdentity(msgType proto_consensus.MessageType, response kyber.Scalar) []byte {
// buffer := bytes.NewBuffer([]byte{})
// // 4 byte consensus id
// fourBytes := make([]byte, 4)
// binary.BigEndian.PutUint32(fourBytes, consensus.consensusId)
// buffer.Write(fourBytes)
// // 32 byte block hash
// buffer.Write(consensus.blockHash[:32])
// // 2 byte validator id
// twoBytes := make([]byte, 2)
// binary.BigEndian.PutUint16(twoBytes, consensus.nodeId)
// buffer.Write(twoBytes)
// // 32 byte of response
// response.MarshalTo(buffer)
// // 64 byte of signature on previous data
// signature := consensus.signMessage(buffer.Bytes())
// buffer.Write(signature)
// return proto_identity.ConstructIdentityMessage(msgType, buffer.Bytes())
// }

@ -1,10 +1,15 @@
package node
import (
"bytes"
"encoding/gob"
"fmt"
"net"
"sync"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/pow"
"github.com/simple-rules/harmony-benchmark/proto/identity"
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
@ -33,6 +38,9 @@ type Node struct {
doneSyncing chan struct{}
ClientPeer *p2p.Peer // The peer for the benchmark tx generator client, used for leaders to return proof-of-accept
Client *client.Client // The presence of a client object means this node will also act as a client
IsWaiting bool
Self p2p.Peer
IDCPeer p2p.Peer
}
// Add new crossTx and proofs to the list of crossTx that needs to be sent back to client
@ -64,6 +72,7 @@ func (node *Node) getTransactionsForNewBlock(maxNumTxs int) ([]*blockchain.Trans
// Start a server and process the request by a handler.
func (node *Node) StartServer(port string) {
fmt.Println("Hello in server now")
node.log.Debug("Starting server", "node", node, "port", port)
node.listenOnPort(port)
@ -110,6 +119,102 @@ func (node *Node) countNumTransactionsInBlockchain() int {
return count
}
//ConnectIdentityChain connects to identity chain
func (node *Node) ConnectIdentityChain() {
IDCPeer := node.IDCPeer
p2p.SendMessage(IDCPeer, identity.ConstructIdentityMessage(identity.ANNOUNCE, node.SerializeWaitNode()))
return
}
//NewWaitNode is a way to initiate a waiting no
func NewWaitNode(peer, IDCPeer p2p.Peer) Node {
node := Node{}
node.Self = peer
node.IDCPeer = IDCPeer
node.log = log.New()
return node
}
//NewNodefromIDC
func NewNodefromIDC(node Node, consensus *consensus.Consensus, db *db.LDBDatabase) *Node {
if consensus != nil {
// Consensus and associated channel to communicate blocks
node.Consensus = consensus
node.BlockChannel = make(chan blockchain.Block)
// Genesis Block
// TODO(minh): Use or implement new function in blockchain package for this.
genesisBlock := &blockchain.Blockchain{}
genesisBlock.Blocks = make([]*blockchain.Block, 0)
// TODO(RJ): use miner's address as coinbase address
coinbaseTx := blockchain.NewCoinbaseTX(pki.GetAddressFromInt(1), "0", node.Consensus.ShardID)
genesisBlock.Blocks = append(genesisBlock.Blocks, blockchain.NewGenesisBlock(coinbaseTx, node.Consensus.ShardID))
node.blockchain = genesisBlock
// UTXO pool from Genesis block
//node.UtxoPool = blockchain.CreateUTXOPoolFromGenesisBlockChain(node.blockchain)
// Initialize level db.
node.db = db
}
// Logger
node.log = log.New()
return &node
}
func (node *Node) processPOWMessage(message []byte) {
payload, err := identity.GetIdentityMessagePayload(message)
if err != nil {
fmt.Println("Could not read payload")
}
IDCPeer := node.IDCPeer
// 4 byte challengeNonce id
req := string(payload)
proof, _ := pow.Fulfil(req, []byte("")) //"This could be blockhasdata"
buffer := bytes.NewBuffer([]byte{})
proofBytes := make([]byte, 32) //proof seems to be 32 byte here
copy(proofBytes[:], proof)
buffer.Write(proofBytes)
buffer.Write(node.SerializeWaitNode())
msgPayload := buffer.Bytes()
p2p.SendMessage(IDCPeer, identity.ConstructIdentityMessage(identity.REGISTER, msgPayload))
}
//https://stackoverflow.com/questions/12854125/how-do-i-dump-the-struct-into-the-byte-array-without-reflection/12854659#12854659
//SerializeWaitNode serializes the node
func (node *Node) SerializeWaitNode() []byte {
//Needs to escape the serialization of unexported fields
result := new(bytes.Buffer)
encoder := gob.NewEncoder(result)
err := encoder.Encode(node.Self)
if err != nil {
fmt.Println("Could not serialize node")
fmt.Println("ERROR", err)
//node.log.Error("Could not serialize node")
}
err = encoder.Encode(node.IDCPeer)
return result.Bytes()
}
// DeserializeWaitNode deserializes the node
func DeserializeWaitNode(d []byte) *Node {
var wn Node
r := bytes.NewBuffer(d)
decoder := gob.NewDecoder(r)
err := decoder.Decode(&wn.Self)
if err != nil {
log.Error("Could not de-serialize node")
}
err = decoder.Decode(&wn.IDCPeer)
if err != nil {
log.Error("Could not de-serialize node")
}
return &wn
}
// Create a new Node
func New(consensus *consensus.Consensus, db *db.LDBDatabase) *Node {
node := Node{}
@ -129,7 +234,7 @@ func New(consensus *consensus.Consensus, db *db.LDBDatabase) *Node {
node.blockchain = genesisBlock
// UTXO pool from Genesis block
node.UtxoPool = blockchain.CreateUTXOPoolFromGenesisBlockChain(node.blockchain)
node.UtxoPool = blockchain.CreateUTXOPoolFromGenesisBlock(node.blockchain.Blocks[0])
// Initialize level db.
node.db = db

@ -3,6 +3,7 @@ package node
import (
"bytes"
"encoding/gob"
"fmt"
"net"
"os"
"strconv"
@ -13,6 +14,7 @@ import (
"github.com/simple-rules/harmony-benchmark/proto"
"github.com/simple-rules/harmony-benchmark/proto/client"
"github.com/simple-rules/harmony-benchmark/proto/consensus"
proto_identity "github.com/simple-rules/harmony-benchmark/proto/identity"
proto_node "github.com/simple-rules/harmony-benchmark/proto/node"
)
@ -20,7 +22,7 @@ const (
// The max number of transaction per a block.
MaxNumberOfTransactionsPerBlock = 10000
// The number of blocks allowed before generating state block
NumBlocksBeforeStateBlock = 100
NumBlocksBeforeStateBlock = 1000
)
// NodeHandler handles a new incoming connection.
@ -55,6 +57,19 @@ func (node *Node) NodeHandler(conn net.Conn) {
}
switch msgCategory {
case proto.IDENTITY:
actionType := proto_identity.IdentityMessageType(msgType)
switch actionType {
case proto_identity.IDENTITY:
messageType := proto_identity.MessageType(msgPayload[0])
switch messageType {
case proto_identity.REGISTER:
fmt.Println("received a identity message")
node.processPOWMessage(msgPayload)
case proto_identity.ANNOUNCE:
node.log.Error("Announce message should be sent to IdentityChain")
}
}
case proto.CONSENSUS:
actionType := consensus.ConsensusMessageType(msgType)
switch actionType {
@ -304,27 +319,25 @@ func (node *Node) PostConsensusProcessing(newBlock *blockchain.Block) {
}
}
node.blockchain.Blocks = []*blockchain.Block{}
node.AddNewBlock(newBlock)
} else {
node.AddNewBlock(newBlock)
node.UpdateUtxoAndState(newBlock)
if node.Consensus.IsLeader {
// Move crossTx-in-consensus into the list to be returned to client
for _, crossTxAndProof := range node.CrossTxsInConsensus {
crossTxAndProof.Proof.BlockHash = newBlock.Hash
// TODO: fill in the signature proofs
}
if len(node.CrossTxsInConsensus) != 0 {
node.addCrossTxsToReturn(node.CrossTxsInConsensus)
node.CrossTxsInConsensus = []*blockchain.CrossShardTxAndProof{}
}
}
node.SendBackProofOfAcceptOrReject()
node.BroadcastNewBlock(newBlock)
if node.Consensus.IsLeader {
// Move crossTx-in-consensus into the list to be returned to client
for _, crossTxAndProof := range node.CrossTxsInConsensus {
crossTxAndProof.Proof.BlockHash = newBlock.Hash
// TODO: fill in the signature proofs
}
if len(node.CrossTxsInConsensus) != 0 {
node.addCrossTxsToReturn(node.CrossTxsInConsensus)
node.CrossTxsInConsensus = []*blockchain.CrossShardTxAndProof{}
}
node.SendBackProofOfAcceptOrReject()
node.BroadcastNewBlock(newBlock)
}
node.AddNewBlock(newBlock)
node.UpdateUtxoAndState(newBlock)
}
func (node *Node) AddNewBlock(newBlock *blockchain.Block) {
@ -339,7 +352,22 @@ func (node *Node) AddNewBlock(newBlock *blockchain.Block) {
func (node *Node) UpdateUtxoAndState(newBlock *blockchain.Block) {
// Update UTXO pool
node.UtxoPool.Update(newBlock.Transactions)
if newBlock.IsStateBlock() {
newUtxoPool := blockchain.CreateUTXOPoolFromGenesisBlock(newBlock)
node.UtxoPool.UtxoMap = newUtxoPool.UtxoMap
} else {
node.UtxoPool.Update(newBlock.Transactions)
}
// Clear transaction-in-Consensus list
node.transactionInConsensus = []*blockchain.Transaction{}
//if node.Consensus.IsLeader {
// fmt.Printf("TX in New BLOCK - %d %s\n", node.UtxoPool.ShardID, newBlock.IsStateBlock())
// //fmt.Println(newBlock.Transactions)
// fmt.Printf("LEADER CURRENT UTXO - %d\n", node.UtxoPool.ShardID)
// fmt.Println(node.UtxoPool.CountNumOfUtxos())
// //fmt.Println(node.UtxoPool)
// fmt.Printf("LEADER LOCKED UTXO - %d\n", node.UtxoPool.ShardID)
// fmt.Println(node.UtxoPool.CountNumOfLockedUtxos())
// //fmt.Println(node.UtxoPool.StringOfLockedUtxos())
//}
}

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 Bas Westerbaan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,57 @@
go-pow
======
`go-pow` is a simple Go package to add (asymmetric) *Proof of Work* to your service.
To create a Proof-of-Work request (with difficulty 5), use `pow.NewRequest`:
```go
req := pow.NewRequest(5, someRandomNonce)
```
This returns a string like `sha2bday-5-c29tZSByYW5kb20gbm9uY2U`,
which can be passed on to the client.
The client fulfils the proof of work by running `pow.Fulfil`:
```go
proof, _ := pow.Fulfil(req, []byte("some bound data"))
```
The client returns the proof (in this case `AAAAAAAAAAMAAAAAAAAADgAAAAAAAAAb`)
to the server, which can check it is indeed a valid proof of work, by running:
``` go
ok, _ := pow.Check(req, proof, []byte("some bound data"))
```
Notes
-----
1. There should be at least sufficient randomness in either the `nonce` passed to
`NewRequest` or the `data` passed to `Fulfil` and `Check`.
Thus it is fine to use the same bound `data` for every client, if every client
get a different `nonce` in its proof-of-work request.
It is also fine to use the same `nonce` in the proof-of-work request,
if every client is (by the encapsulating protocol) forced to use
different bound `data`.
2. The work to fulfil a request scales exponentially in the difficulty parameter.
The work to check it proof is correct remains constant:
```
Check on Difficulty=5 500000 2544 ns/op
Check on Difficulty=10 500000 2561 ns/op
Check on Difficulty=15 500000 2549 ns/op
Check on Difficulty=20 500000 2525 ns/op
Fulfil on Difficulty=5 100000 15725 ns/op
Fulfil on Difficulty=10 30000 46808 ns/op
Fulfil on Difficulty=15 2000 955606 ns/op
Fulfil on Difficulty=20 200 6887722 ns/op
```
To do
-----
- Support for [equihash](https://www.cryptolux.org/index.php/Equihash) would be nice.
- Port to Python, Java, Javascript, ...
- Parallelize.

@ -0,0 +1,131 @@
// Create and fulfill proof of work requests.
package pow
import (
"encoding/base64"
"fmt"
"strconv"
"strings"
)
type Algorithm string
const (
Sha2BDay Algorithm = "sha2bday"
)
// Represents a proof-of-work request.
type Request struct {
// The requested algorithm
Alg Algorithm
// The requested difficulty
Difficulty uint32
// Nonce to diversify the request
Nonce []byte
}
// Represents a completed proof-of-work
type Proof struct {
buf []byte
}
// Convenience function to create a new sha3bday proof-of-work request
// as a string
func NewRequest(difficulty uint32, nonce []byte) string {
req := Request{
Difficulty: difficulty,
Nonce: nonce,
Alg: Sha2BDay,
}
s, _ := req.MarshalText()
return string(s)
}
func (proof Proof) MarshalText() ([]byte, error) {
return []byte(base64.RawStdEncoding.EncodeToString(proof.buf)), nil
}
func (proof *Proof) UnmarshalText(buf []byte) error {
var err error
proof.buf, err = base64.RawStdEncoding.DecodeString(string(buf))
return err
}
func (req Request) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%s-%d-%s",
req.Alg,
req.Difficulty,
string(base64.RawStdEncoding.EncodeToString(req.Nonce)))), nil
}
func (req *Request) UnmarshalText(buf []byte) error {
bits := strings.SplitN(string(buf), "-", 3)
if len(bits) != 3 {
return fmt.Errorf("There should be two dashes in a PoW request")
}
alg := Algorithm(bits[0])
if alg != Sha2BDay {
return fmt.Errorf("%s: unsupported algorithm", bits[0])
}
req.Alg = alg
diff, err := strconv.Atoi(bits[1])
if err != nil {
return err
}
req.Difficulty = uint32(diff)
req.Nonce, err = base64.RawStdEncoding.DecodeString(bits[2])
if err != nil {
return err
}
return nil
}
// Convenience function to check whether a proof of work is fulfilled
func Check(request, proof string, data []byte) (bool, error) {
var req Request
var prf Proof
err := req.UnmarshalText([]byte(request))
if err != nil {
return false, err
}
err = prf.UnmarshalText([]byte(proof))
if err != nil {
return false, err
}
return prf.Check(req, data), nil
}
// Fulfil the proof-of-work request.
func (req *Request) Fulfil(data []byte) Proof {
switch req.Alg {
case Sha2BDay:
return Proof{fulfilSha2BDay(req.Nonce, req.Difficulty, data)}
default:
panic("No such algorithm")
}
}
// Convenience function to fulfil the proof of work request
func Fulfil(request string, data []byte) (string, error) {
var req Request
err := req.UnmarshalText([]byte(request))
if err != nil {
return "", err
}
proof := req.Fulfil(data)
s, _ := proof.MarshalText()
return string(s), nil
}
// Check whether the proof is ok
func (proof *Proof) Check(req Request, data []byte) bool {
switch req.Alg {
case Sha2BDay:
return checkSha2BDay(proof.buf, req.Nonce, data, req.Difficulty)
default:
panic("No such algorithm")
}
}

@ -0,0 +1,56 @@
package pow
import (
"testing"
)
func TestSha2BDay(t *testing.T) {
nonce := []byte{1, 2, 3, 4, 5}
data := []byte{2, 2, 3, 4, 5}
r := NewRequest(5, nonce)
proof, err := Fulfil(r, data)
if err != nil {
t.Fatalf("Fulfil: %v", err)
}
ok, err := Check(r, proof, data)
if err != nil {
t.Fatalf("Check: %v", err)
}
if !ok {
t.Fatalf("Proof of work should be ok")
}
ok, err = Check(r, proof, nonce)
if err != nil {
t.Fatalf("Check: %v", err)
}
if ok {
t.Fatalf("Proof of work should not be ok")
}
}
func BenchmarkCheck5(b *testing.B) { benchmarkCheck(5, b) }
func BenchmarkCheck10(b *testing.B) { benchmarkCheck(10, b) }
func BenchmarkCheck15(b *testing.B) { benchmarkCheck(15, b) }
func BenchmarkCheck20(b *testing.B) { benchmarkCheck(20, b) }
func benchmarkCheck(diff uint32, b *testing.B) {
req := NewRequest(diff, []byte{1, 2, 3, 4, 5})
prf, _ := Fulfil(req, []byte{6, 7, 8, 9})
b.ResetTimer()
for n := 0; n < b.N; n++ {
Check(req, prf, []byte{6, 7, 8, 9})
}
}
func BenchmarkFulfil5(b *testing.B) { benchmarkFulfil(5, b) }
func BenchmarkFulfil10(b *testing.B) { benchmarkFulfil(10, b) }
func BenchmarkFulfil15(b *testing.B) { benchmarkFulfil(15, b) }
func BenchmarkFulfil20(b *testing.B) { benchmarkFulfil(20, b) }
func benchmarkFulfil(diff uint32, b *testing.B) {
req := NewRequest(diff, []byte{1, 2, 3, 4, 5})
b.ResetTimer()
for n := 0; n < b.N; n++ {
Fulfil(req, []byte{6, 7, 8, 9})
}
}

@ -0,0 +1,25 @@
package pow_test
import (
"fmt" // imported as pow
"github.com/simple-rules/harmony-benchmark/pow"
)
func Example() {
// Create a proof of work request with difficulty 5
req := pow.NewRequest(5, []byte("some random nonce"))
fmt.Printf("req: %s\n", req)
// Fulfil the proof of work
proof, _ := pow.Fulfil(req, []byte("some bound data"))
fmt.Printf("proof: %s\n", proof)
// Check if the proof is correct
ok, _ := pow.Check(req, proof, []byte("some bound data"))
fmt.Printf("check: %v", ok)
// Output: req: sha2bday-5-c29tZSByYW5kb20gbm9uY2U
// proof: AAAAAAAAAAMAAAAAAAAADgAAAAAAAAAb
// check: true
}

@ -1,81 +0,0 @@
package pow
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
"math"
"math/big"
)
var (
maxNonce = math.MaxUint32
)
const targetBits = 24
// ProofOfWork represents a proof-of-work
type ProofOfWork struct {
Challenge uint32
target *big.Int
FinalNonce uint32
}
// NewProofOfWork builds and returns a ProofOfWork
func NewProofOfWork(c uint32) *ProofOfWork {
target := big.NewInt(1)
target.Lsh(target, uint(256-targetBits))
pow := &ProofOfWork{Challenge: c, target: target, FinalNonce: 0}
return pow
}
func (pow *ProofOfWork) prepareData(nonce uint32) []byte {
challenge := make([]byte, 4)
binary.LittleEndian.PutUint32(challenge, pow.Challenge)
nonceB := make([]byte, 4)
binary.LittleEndian.PutUint32(nonceB, nonce)
data := bytes.Join(
[][]byte{
challenge,
nonceB,
},
[]byte{},
)
return data
}
// Run performs a proof-of-work
func (pow *ProofOfWork) Run() int {
var hashInt big.Int
var hash [32]byte
nonce := 0
for nonce < maxNonce {
data := pow.prepareData(uint32(nonce))
hash = sha256.Sum256(data)
fmt.Printf("\r%x", hash)
hashInt.SetBytes(hash[:])
if hashInt.Cmp(pow.target) == -1 {
pow.FinalNonce = uint32(nonce)
break
} else {
nonce++
}
}
fmt.Print("\n\n")
return nonce
}
// Validate validates block's PoW
func (pow *ProofOfWork) Validate(nonce uint32) bool {
var hashInt big.Int
data := pow.prepareData(nonce)
hash := sha256.Sum256(data)
hashInt.SetBytes(hash[:])
isValid := hashInt.Cmp(pow.target) == -1
return isValid
}

@ -0,0 +1,78 @@
package pow
import (
"bytes"
"crypto/sha256"
"encoding/binary"
)
func checkSha2BDay(proof []byte, nonce, data []byte, diff uint32) bool {
if len(proof) != 24 {
return false
}
prefix1 := proof[:8]
prefix2 := proof[8:16]
prefix3 := proof[16:]
if bytes.Equal(prefix1, prefix2) || bytes.Equal(prefix2, prefix3) ||
bytes.Equal(prefix1, prefix3) {
return false
}
resBuf := make([]byte, 32)
h := sha256.New()
h.Write(prefix1)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res1 := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
h.Reset()
h.Write(prefix2)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res2 := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
h.Reset()
h.Write(prefix3)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res3 := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
return res1 == res2 && res2 == res3
}
func fulfilSha2BDay(nonce []byte, diff uint32, data []byte) []byte {
// TODO make multithreaded if the difficulty is high enough.
// For light proof-of-work requests, the overhead of parallelizing is
// not worth it.
type Pair struct {
First, Second uint64
}
var i uint64 = 1
prefix := make([]byte, 8)
resBuf := make([]byte, 32)
lut := make(map[uint64]Pair)
h := sha256.New()
for {
binary.BigEndian.PutUint64(prefix, i)
h.Write(prefix)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
pair, ok := lut[res]
if ok {
if pair.Second != 0 {
ret := make([]byte, 24)
binary.BigEndian.PutUint64(ret, pair.First)
binary.BigEndian.PutUint64(ret[8:], pair.Second)
copy(ret[16:], prefix)
return ret
}
lut[res] = Pair{First: pair.First, Second: i}
} else {
lut[res] = Pair{First: i}
}
h.Reset()
i++
}
}

@ -21,21 +21,25 @@ type MessageType int
const (
REGISTER MessageType = iota
ANNOUNCE
CONFIG
)
// Returns string name for the MessageType enum
func (msgType MessageType) String() string {
names := [...]string{
"REGISTER",
"ANNOUNCE",
"CONFIG",
}
if msgType < REGISTER || msgType > REGISTER {
if msgType < REGISTER || msgType > CONFIG {
return "Unknown"
}
return names[msgType]
}
// GetIdentityMessageType Get the consensus message type from the identity message
// GetIdentityMessageType Get the identity message type from the identity message
func GetIdentityMessageType(message []byte) (MessageType, error) {
if len(message) < 1 {
return 0, errors.New("Failed to get identity message type: no data available.")

@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
"time"
"github.com/simple-rules/harmony-benchmark/identitychain"
"github.com/simple-rules/harmony-benchmark/p2p"
@ -15,5 +16,15 @@ func main() {
peer := p2p.Peer{Ip: *ip, Port: *port}
IDC := identitychain.New(peer)
fmt.Println(IDC)
epochTimer := time.NewTicker(10 * time.Second)
go func() {
for t := range epochTimer.C {
fmt.Println("Changing epoch at ", t)
IDC.Shard()
}
}()
IDC.StartServer()
}

@ -1,21 +1,30 @@
package main
import "fmt"
import (
"flag"
"fmt"
"time"
// import (
// "flag"
// "github.com/simple-rules/harmony-benchmark/p2p"
// "github.com/simple-rules/harmony-benchmark/waitnode"
// )
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/p2p"
)
func main() {
fmt.Println("hello")
// ip := flag.String("ip", "127.0.0.0", "IP of the node")
// port := flag.String("port", "8080", "port of the node")
// flag.Parse()
// peer := p2p.Peer{Ip: *ip, Port: *port}
// idcpeer := p2p.Peer{Ip: "localhost", Port: "9000"} //Hardcoded here.
// node := waitnode.New(peer)
// node.ConnectIdentityChain(idcpeer)
ip := flag.String("ip", "localhost", "IP of the node")
port := flag.String("port", "8080", "port of the node")
flag.Parse()
i := 0
peer := p2p.Peer{Ip: *ip, Port: *port}
fmt.Println("Now onto node i", i)
idcpeer := p2p.Peer{Ip: "localhost", Port: "9000"} //Hardcoded here.
node := node.NewWaitNode(peer, idcpeer)
go func() {
node.ConnectIdentityChain()
}()
node.StartServer(*port)
time.Sleep(5 * time.Second)
//}
}

@ -148,6 +148,9 @@ func (config *DistributionConfig) GetConfigEntries() []ConfigEntry {
}
func (config *DistributionConfig) GetMyConfigEntry(ip string, port string) *ConfigEntry {
if config.config == nil {
return nil
}
for _, entry := range config.config {
if entry.IP == ip && entry.Port == port {
return &entry

@ -1,82 +0,0 @@
package waitnode
import (
"bytes"
"crypto/sha256"
"fmt"
"math"
"math/big"
)
var (
maxNonce = math.MaxInt64
)
const targetBits = 24
// ProofOfWork represents a proof-of-work
type ProofOfWork struct {
Challenge int32
target *big.Int
}
// NewProofOfWork builds and returns a ProofOfWork
func NewProofOfWork(c int32) *ProofOfWork {
target := big.NewInt(1)
target.Lsh(target, uint(256-targetBits))
pow := &ProofOfWork{c, target}
return pow
}
func (pow *ProofOfWork) prepareData(nonce int) []byte {
data := bytes.Join(
[][]byte{
pow.Challenge.,
IntToHex(int64(targetBits)),
IntToHex(int64(nonce)),
},
[]byte{},
)
return data
}
// Run performs a proof-of-work
func (pow *ProofOfWork) Run() (int, []byte) {
var hashInt big.Int
var hash [32]byte
nonce := 0
fmt.Printf("Mining the block containing \"%s\"\n", pow.block.Data)
for nonce < maxNonce {
data := pow.prepareData(nonce)
hash = sha256.Sum256(data)
fmt.Printf("\r%x", hash)
hashInt.SetBytes(hash[:])
if hashInt.Cmp(pow.target) == -1 {
break
} else {
nonce++
}
}
fmt.Print("\n\n")
return nonce, hash[:]
}
// Validate validates block's PoW
func (pow *ProofOfWork) Validate() bool {
var hashInt big.Int
data := pow.prepareData(pow.challenge.Nonce)
hash := sha256.Sum256(data)
hashInt.SetBytes(hash[:])
isValid := hashInt.Cmp(pow.target) == -1
return isValid
}

@ -1,82 +0,0 @@
package waitnode
import (
"bytes"
"crypto/sha256"
"encoding/gob"
"log"
"github.com/simple-rules/harmony-benchmark/p2p"
"github.com/simple-rules/harmony-benchmark/utils"
)
//WaitNode is for nodes waiting to join consensus
type WaitNode struct {
Peer p2p.Peer
ID uint16
SeedPeers p2p.Peer
}
// StartServer a server and process the request by a handler.
func (node *WaitNode) StartServer() {
log.Printf("Starting waitnode on server %s and port %s", node.Peer.Ip, node.Peer.Port)
}
// //ConnectIdentityChain connects to identity chain
// func (node *WaitNode) ConnectIdentityChain(peer p2p.Peer) {
// pow := NewProofOfWork(10)
// nonce := pow.Run()
// if pow.FinalNonce != uint32(nonce) {
// fmt.Println("Something wrong with POW")
// }
// p2p.SendMessage(peer, identity.ConstructIdentityMessage(identity.REGISTER, node.SerializeWaitNode()))
// }
//Constructs node-id by hashing the IP.
func calculateHash(num string) []byte {
var hashes [][]byte
hashes = append(hashes, utils.ConvertFixedDataIntoByteArray(num))
hash := sha256.Sum256(bytes.Join(hashes, []byte{}))
return hash[:]
}
// //SerializePOW serializes the node
// func SerializePOW(pow ProofOfWork) []byte {
// var result bytes.Buffer
// encoder := gob.NewEncoder(&pow)
// err := encoder.Encode(pow)
// if err != nil {
// log.Panic(err.Error())
// }
// return pow.Bytes()
// }
//SerializeWaitNode serializes the node
func (node *WaitNode) SerializeWaitNode() []byte {
var result bytes.Buffer
encoder := gob.NewEncoder(&result)
err := encoder.Encode(node)
if err != nil {
log.Panic(err.Error())
}
return result.Bytes()
}
// DeserializeWaitNode deserializes the node
func DeserializeWaitNode(d []byte) *WaitNode {
var wn WaitNode
decoder := gob.NewDecoder(bytes.NewReader(d))
err := decoder.Decode(&wn)
if err != nil {
log.Panic(err)
}
return &wn
}
// New Create a new Node
func New(Peer p2p.Peer) *WaitNode {
node := WaitNode{}
node.Peer = Peer
node.ID = utils.GetUniqueIdFromPeer(Peer)
return &node
}

@ -1,18 +0,0 @@
package waitnode
import (
"testing"
"github.com/simple-rules/harmony-benchmark/p2p"
)
func TestNewNode(test *testing.T) {
p := p2p.Peer{Ip: "127.0.0.1", Port: "8080"}
wn := New(p)
b := wn.SerializeWaitNode()
wnd := DeserializeWaitNode(b)
if *wn != *wnd {
test.Error("Serialization is not working")
}
}
Loading…
Cancel
Save