Merge branch 'master' into minh

pull/76/head
Minh Doan 6 years ago
commit 1323877d61
  1. 293
      client/btctxgen/main.go
  2. 111
      client/btctxiter/btctxiter.go
  3. 24
      identitychain/identitychain.go
  4. 111
      identitychain/identitychain_handler.go
  5. 36
      node/node.go
  6. 3
      node/node_handler.go
  7. 21
      pow/LICENSE
  8. 57
      pow/README.md
  9. 128
      pow/api.go
  10. 56
      pow/api_test.go
  11. 25
      pow/example_test.go
  12. 78
      pow/sha2bday.go

@ -1,293 +0,0 @@
/*
The btctxgen iterates the btc tx history block by block, transaction by transaction.
The btxtxiter provide a simple api called `NextTx` for us to move thru TXs one by one.
Same as txgen, iterate on each shard to generate simulated TXs (GenerateSimulatedTransactions):
1. Get a new btc tx
2. If it's a coinbase tx, create a corresponding coinbase tx in our blockchain
3. Otherwise, create a normal TX, which might be cross-shard and might not, depending on whether all the TX inputs belong to the current shard.
Same as txgen, send single shard tx shard by shard, then broadcast cross shard tx.
TODO
Some todos for ricl
* correct the logic to outputing to one of the input shard, rather than the current shard
*/
package main
import (
"flag"
"fmt"
"sync"
"time"
"github.com/simple-rules/harmony-benchmark/blockchain"
"github.com/simple-rules/harmony-benchmark/client"
"github.com/simple-rules/harmony-benchmark/client/btctxiter"
client_config "github.com/simple-rules/harmony-benchmark/client/config"
"github.com/simple-rules/harmony-benchmark/consensus"
"github.com/simple-rules/harmony-benchmark/crypto/pki"
"github.com/simple-rules/harmony-benchmark/log"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/p2p"
proto_node "github.com/simple-rules/harmony-benchmark/proto/node"
)
type txGenSettings struct {
crossShard bool
maxNumTxsPerBatch int
}
type TXRef struct {
txID [32]byte
shardID uint32
toAddress [20]byte // we use the same toAddress in btc and hmy
}
var (
utxoPoolMutex sync.Mutex
setting txGenSettings
iter btctxiter.BTCTXIterator
utxoMapping map[string]TXRef // btcTXID to { txID, shardID }
// map from bitcoin address to a int value (the privKey in hmy)
addressMapping map[[20]byte]int
currentInt int
)
func getHmyInt(btcAddr [20]byte) int {
var privKey int
if privKey, ok := addressMapping[btcAddr]; !ok { // If cannot find key
privKey = currentInt
addressMapping[btcAddr] = privKey
currentInt++
}
return privKey
}
// Generates at most "maxNumTxs" number of simulated transactions based on the current UtxoPools of all shards.
// The transactions are generated by going through the existing utxos and
// randomly select a subset of them as the input for each new transaction. The output
// address of the new transaction are randomly selected from [0 - N), where N is the total number of fake addresses.
//
// When crossShard=true, besides the selected utxo input, select another valid utxo as input from the same address in a second shard.
// Similarly, generate another utxo output in that second shard.
//
// NOTE: the genesis block should contain N coinbase transactions which add
// token (1000) to each address in [0 - N). See node.AddTestingAddresses()
//
// Params:
// shardID - the shardID for current shard
// dataNodes - nodes containing utxopools of all shards
// Returns:
// all single-shard txs
// all cross-shard txs
func generateSimulatedTransactions(shardID int, dataNodes []*node.Node) ([]*blockchain.Transaction, []*blockchain.Transaction) {
/*
UTXO map structure:
{
address: {
txID: {
outputIndex: value
}
}
}
*/
utxoPoolMutex.Lock()
txs := []*blockchain.Transaction{}
crossTxs := []*blockchain.Transaction{}
nodeShardID := dataNodes[shardID].Consensus.ShardID
cnt := 0
LOOP:
for {
btcTx := iter.NextTx()
if btcTx == nil {
log.Error("Failed to parse tx", "height", iter.GetBlockIndex())
}
tx := blockchain.Transaction{}
isCrossShardTx := false
if btctxiter.IsCoinBaseTx(btcTx) {
// ricl: coinbase tx should just have one txo
btcTXO := btcTx.Vout[0]
btcTXOAddr := btcTXO.ScriptPubKey.Addresses[0]
var toAddress [20]byte
copy(toAddress[:], btcTXOAddr) // TODO(ricl): string to [20]byte
hmyInt := getHmyInt(toAddress)
tx = *blockchain.NewCoinbaseTX(pki.GetAddressFromInt(hmyInt), "", nodeShardID)
utxoMapping[btcTx.Hash] = TXRef{tx.ID, nodeShardID, toAddress}
} else {
var btcFromAddresses [][20]byte
for _, btcTXI := range btcTx.Vin {
btcTXIDStr := btcTXI.Txid
txRef := utxoMapping[btcTXIDStr] // find the corresponding harmony tx info
if txRef.shardID != nodeShardID {
isCrossShardTx = true
}
tx.TxInput = append(tx.TxInput, *blockchain.NewTXInput(blockchain.NewOutPoint(&txRef.txID, btcTXI.Vout), [20]byte{}, txRef.shardID))
// Add the from address to array, so that we can later use it to sign the tx.
btcFromAddresses = append(btcFromAddresses, txRef.toAddress)
}
for _, btcTXO := range btcTx.Vout {
for _, btcTXOAddr := range btcTXO.ScriptPubKey.Addresses {
var toAddress [20]byte
copy(toAddress[:], btcTXOAddr) //TODO(ricl): string to [20]byte
txo := blockchain.TXOutput{Amount: int(btcTXO.Value), Address: toAddress, ShardID: nodeShardID}
tx.TxOutput = append(tx.TxOutput, txo)
utxoMapping[btcTx.Txid] = TXRef{tx.ID, nodeShardID, toAddress}
}
}
// get private key and sign the tx
for _, btcFromAddress := range btcFromAddresses {
hmyInt := getHmyInt(btcFromAddress)
tx.SetID() // TODO(RJ): figure out the correct way to set Tx ID.
tx.Sign(pki.GetPrivateKeyScalarFromInt(hmyInt))
}
}
if isCrossShardTx {
crossTxs = append(crossTxs, &tx)
} else {
txs = append(txs, &tx)
}
// log.Debug("[Generator] transformed btc tx", "block height", iter.GetBlockIndex(), "block tx count", iter.GetBlock().TxCount, "block tx cnt", len(iter.GetBlock().Txs), "txi", len(tx.TxInput), "txo", len(tx.TxOutput), "txCount", cnt)
cnt++
if cnt >= setting.maxNumTxsPerBatch {
break LOOP
}
}
utxoPoolMutex.Unlock()
log.Debug("[Generator] generated transations", "single-shard", len(txs), "cross-shard", len(crossTxs))
return txs, crossTxs
}
func initClient(clientNode *node.Node, clientPort string, shardIDLeaderMap *map[uint32]p2p.Peer, nodes *[]*node.Node) {
if clientPort == "" {
return
}
clientNode.Client = client.NewClient(shardIDLeaderMap)
// This func is used to update the client's utxopool when new blocks are received from the leaders
updateBlocksFunc := func(blocks []*blockchain.Block) {
log.Debug("Received new block from leader", "len", len(blocks))
for _, block := range blocks {
for _, node := range *nodes {
if node.Consensus.ShardID == block.ShardID {
log.Debug("Adding block from leader", "shardID", block.ShardID)
// Add it to blockchain
utxoPoolMutex.Lock()
node.AddNewBlock(block)
utxoPoolMutex.Unlock()
} else {
continue
}
}
}
}
clientNode.Client.UpdateBlocks = updateBlocksFunc
// Start the client server to listen to leader's message
go func() {
clientNode.StartServer(clientPort)
}()
}
func main() {
configFile := flag.String("config_file", "local_config.txt", "file containing all ip addresses and config")
maxNumTxsPerBatch := flag.Int("max_num_txs_per_batch", 10000, "number of transactions to send per message")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
flag.Parse()
// Read the configs
config := client_config.NewConfig()
config.ReadConfigFile(*configFile)
shardIDLeaderMap := config.GetShardIDToLeaderMap()
// Do cross shard tx if there are more than one shard
setting.crossShard = len(shardIDLeaderMap) > 1
setting.maxNumTxsPerBatch = *maxNumTxsPerBatch
// TODO(Richard): refactor this chuck to a single method
// Setup a logger to stdout and log file.
logFileName := fmt.Sprintf("./%v/txgen.log", *logFolder)
h := log.MultiHandler(
log.StdoutHandler,
log.Must.FileHandler(logFileName, log.LogfmtFormat()), // Log to file
// log.Must.NetHandler("tcp", ":3000", log.JSONFormat()) // Log to remote
)
log.Root().SetHandler(h)
iter.Init()
utxoMapping = make(map[string]TXRef)
addressMapping = make(map[[20]byte]int)
currentInt = 1 // start from address 1
// Nodes containing utxopools to mirror the shards' data in the network
nodes := []*node.Node{}
for shardID, _ := range shardIDLeaderMap {
node := node.New(&consensus.Consensus{ShardID: shardID}, nil)
// Assign many fake addresses so we have enough address to play with at first
node.AddTestingAddresses(10000)
nodes = append(nodes, node)
}
// Client/txgenerator server node setup
clientPort := config.GetClientPort()
consensusObj := consensus.NewConsensus("0", clientPort, "0", nil, p2p.Peer{})
clientNode := node.New(consensusObj, nil)
initClient(clientNode, clientPort, &shardIDLeaderMap, &nodes)
// Transaction generation process
time.Sleep(3 * time.Second) // wait for nodes to be ready
leaders := []p2p.Peer{}
for _, leader := range shardIDLeaderMap {
leaders = append(leaders, leader)
}
for {
allCrossTxs := []*blockchain.Transaction{}
// Generate simulated transactions
for shardID, leader := range shardIDLeaderMap {
txs, crossTxs := generateSimulatedTransactions(int(shardID), nodes)
allCrossTxs = append(allCrossTxs, crossTxs...)
log.Debug("[Generator] Sending single-shard txs ...", "leader", leader, "numTxs", len(txs), "numCrossTxs", len(crossTxs), "block height", iter.GetBlockIndex())
msg := proto_node.ConstructTransactionListMessage(txs)
p2p.SendMessage(leader, msg)
// Note cross shard txs are later sent in batch
}
if len(allCrossTxs) > 0 {
log.Debug("[Generator] Broadcasting cross-shard txs ...", "allCrossTxs", len(allCrossTxs))
msg := proto_node.ConstructTransactionListMessage(allCrossTxs)
p2p.BroadcastMessage(leaders, msg)
// Put cross shard tx into a pending list waiting for proofs from leaders
if clientPort != "" {
clientNode.Client.PendingCrossTxsMutex.Lock()
for _, tx := range allCrossTxs {
clientNode.Client.PendingCrossTxs[tx.ID] = tx
}
clientNode.Client.PendingCrossTxsMutex.Unlock()
}
}
time.Sleep(500 * time.Millisecond) // Send a batch of transactions periodically
}
// Send a stop message to stop the nodes at the end
msg := proto_node.ConstructStopMessage()
peers := append(config.GetValidators(), leaders...)
p2p.BroadcastMessage(peers, msg)
}

@ -1,111 +0,0 @@
// Uses btcd node.
// Use `GetBlockVerboseTx` to get block and tx at once.
// This way is faster
package btctxiter
import (
"io/ioutil"
"log"
"path/filepath"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcutil"
)
// BTCTXIterator is a btc transaction iterator.
type BTCTXIterator struct {
blockIndex int64
block *btcjson.GetBlockVerboseResult
txIndex int
tx *btcjson.TxRawResult
client *rpcclient.Client
}
// Init is an init function of BTCTXIterator.
func (iter *BTCTXIterator) Init() {
btcdHomeDir := btcutil.AppDataDir("btcd", false)
certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert"))
if err != nil {
log.Fatal(err)
}
connCfg := &rpcclient.ConnConfig{
Host: "localhost:8334", // This goes to btcd
Endpoint: "ws",
User: "",
Pass: "",
Certificates: certs,
}
iter.client, err = rpcclient.New(connCfg, nil)
if err != nil {
log.Fatal(err)
}
iter.blockIndex = 0 // the genesis block cannot retrieved. Skip it intentionally.
iter.block = nil
iter.nextBlock()
// defer iter.client.Shutdown()
}
// NextTx is to move to the next transaction.
func (iter *BTCTXIterator) NextTx() *btcjson.TxRawResult {
iter.txIndex++
if iter.txIndex >= len(iter.block.RawTx) {
iter.nextBlock()
iter.txIndex++
}
iter.tx = &iter.block.RawTx[iter.txIndex]
// log.Println(iter.blockIndex, iter.txIndex, hashes[iter.txIndex])
return iter.tx
}
// GetBlockIndex gets the index/height of the current block
func (iter *BTCTXIterator) GetBlockIndex() int64 {
return iter.blockIndex
}
// GetBlock gets the current block
func (iter *BTCTXIterator) GetBlock() *btcjson.GetBlockVerboseResult {
return iter.block
}
// GetTxIndex gets the index of the current transaction
func (iter *BTCTXIterator) GetTxIndex() int {
return iter.txIndex
}
// GetTx gets the current transaction
func (iter *BTCTXIterator) GetTx() *btcjson.TxRawResult {
return iter.tx
}
func (iter *BTCTXIterator) resetTx() {
iter.txIndex = -1
iter.tx = nil
}
// Move to the next block
func (iter *BTCTXIterator) nextBlock() *btcjson.GetBlockVerboseResult {
iter.blockIndex++
hash, err := iter.client.GetBlockHash(iter.blockIndex)
if err != nil {
log.Panic("Failed to get block hash at", iter.blockIndex, err)
}
iter.block, err = iter.client.GetBlockVerboseTx(hash)
if err != nil {
log.Panic("Failed to get block", iter.blockIndex, err)
}
iter.resetTx()
return iter.block
}
// IsCoinBaseTx returns true if tx is a coinbase tx.
func IsCoinBaseTx(tx *btcjson.TxRawResult) bool {
// A coin base must only have one transaction input.
if len(tx.Vin) != 1 {
return false
}
return tx.Vin[0].IsCoinBase()
}

@ -2,7 +2,6 @@ package identitychain
import (
"fmt"
"math"
"math/rand"
"net"
"os"
@ -20,7 +19,6 @@ var identityPerBlock = 100000
type IdentityChain struct {
//Identities []*IdentityBlock //No need to have the identity block as of now
Identities []*node.Node
PendingIdentities []*node.Node
log log.Logger
Peer p2p.Peer
SelectedIdentitites []*node.Node
@ -31,7 +29,6 @@ type IdentityChain struct {
CurrentEpochStartTime int64
NumberOfShards int
NumberOfNodesInShard int
PowMap map[p2p.Peer]string
}
func seekRandomNumber(EpochNum int, SelectedIdentitites []*node.Node) int {
@ -72,6 +69,11 @@ func (IDC *IdentityChain) BroadCastNewConfiguration() {
}
//BroadCast Peer Infor to Node
// func (IDC *IdentityChain) SendPeerInfo(Node node) {
// return
// }
//CreateShardAssignment
func (IDC *IdentityChain) CreateShardAssignment() {
num := seekRandomNumber(IDC.EpochNum, IDC.SelectedIdentitites)
@ -98,15 +100,15 @@ func (IDC *IdentityChain) generateRandomPermutations(num int) {
// SelectIds as
func (IDC *IdentityChain) SelectIds() {
selectNumber := IDC.NumberOfNodesInShard - len(IDC.Identities)
// selectNumber := IDC.NumberOfNodesInShard - len(IDC.Identities)
// Insert the lines below once you have a identity block
// IB := IDC.GetLatestBlock()
// currentIDS := IB.GetIdentities()
currentIDS := IDC.Identities
selectNumber = int(math.Min(float64(len(IDC.PendingIdentities)), float64(selectNumber)))
pending := IDC.PendingIdentities[:selectNumber]
IDC.SelectedIdentitites = append(currentIDS, pending...)
IDC.PendingIdentities = []*node.Node{}
// currentIDS := IDC.Identities
// selectNumber = int(math.Min(float64(len(IDC.PendingIdentities)), float64(selectNumber)))
// pending := IDC.PendingIdentities[:selectNumber]
// IDC.SelectedIdentitites = append(currentIDS, pending...)
// IDC.PendingIdentities = []*node.Node{}
}
@ -153,9 +155,9 @@ func New(Peer p2p.Peer) *IdentityChain {
IDC.NumberOfShards = 1 //to be filled via global config
IDC.NumberOfNodesInShard = 500 //to be filled via global config
IDC.Identities = make([]*node.Node, 0)
IDC.PendingIdentities = make([]*node.Node, 0)
// IDC.PendingIdentities = make([]*node.Node, 0)
IDC.SelectedIdentitites = make([]*node.Node, 0)
IDC.PowMap = make(map[p2p.Peer]string)
// IDC.PowMap = make(map[p2p.Peer]string)
return &IDC
}

@ -1,17 +1,11 @@
package identitychain
import (
"bytes"
"fmt"
"math/rand"
"net"
"os"
"strconv"
"time"
"github.com/simple-rules/harmony-benchmark/node"
"github.com/simple-rules/harmony-benchmark/p2p"
"github.com/simple-rules/harmony-benchmark/pow"
"github.com/simple-rules/harmony-benchmark/proto"
proto_identity "github.com/simple-rules/harmony-benchmark/proto/identity"
)
@ -56,9 +50,9 @@ func (IDC *IdentityChain) IdentityChainHandler(conn net.Conn) {
}
switch idMsgType {
case proto_identity.Register:
IDC.registerIdentity(msgPayload)
// IDC.registerIdentity(msgPayload)
case proto_identity.Announce:
IDC.acceptNewConnection(msgPayload)
// IDC.acceptNewConnection(msgPayload)
}
}
@ -66,55 +60,56 @@ func (IDC *IdentityChain) IdentityChainHandler(conn net.Conn) {
}
}
func (IDC *IdentityChain) registerIdentity(msgPayload []byte) {
payload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
if err != nil {
IDC.log.Error("identity payload not read")
} else {
fmt.Println("identity payload read")
}
fmt.Println("we are now registering identities")
offset := 0
proof := payload[offset : offset+32]
offset = offset + 32
Node := node.DeserializeWaitNode(payload[offset:])
req := IDC.PowMap[Node.Self]
ok, err := pow.Check(req, string(proof), []byte(""))
fmt.Println(err)
if ok {
fmt.Println("Proof of work accepted")
IDC.PendingIdentities = append(IDC.PendingIdentities, Node)
fmt.Println(len(IDC.PendingIdentities)) //Fix why IDC does not have log working.
} else {
fmt.Println("identity proof of work not accepted")
}
}
// TODO(alok): You removed pow package.
// func (IDC *IdentityChain) registerIdentity(msgPayload []byte) {
// payload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
// if err != nil {
// IDC.log.Error("identity payload not read")
// } else {
// fmt.Println("identity payload read")
// }
// fmt.Println("we are now registering identities")
// offset := 0
// proof := payload[offset : offset+32]
// offset = offset + 32
// Node := node.DeserializeWaitNode(payload[offset:])
// req := IDC.PowMap[Node.Self]
// ok, err := pow.Check(req, string(proof), []byte(""))
// fmt.Println(err)
// if ok {
// fmt.Println("Proof of work accepted")
// IDC.PendingIdentities = append(IDC.PendingIdentities, Node)
// fmt.Println(len(IDC.PendingIdentities)) //Fix why IDC does not have log working.
// } else {
// fmt.Println("identity proof of work not accepted")
// }
// }
func (IDC *IdentityChain) acceptNewConnection(msgPayload []byte) {
// func (IDC *IdentityChain) acceptNewConnection(msgPayload []byte) {
identityPayload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
if err != nil {
fmt.Println("There was a error in reading the identity payload")
} else {
fmt.Println("accepted new connection")
}
fmt.Println("Sleeping for 2 secs ...")
time.Sleep(2 * time.Second)
Node := node.DeserializeWaitNode(identityPayload)
buffer := bytes.NewBuffer([]byte{})
src := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(src)
challengeNonce := int((rnd.Int31()))
req := pow.NewRequest(5, []byte(strconv.Itoa(challengeNonce)))
IDC.PowMap[Node.Self] = req
buffer.Write([]byte(req))
// 32 byte block hash
// buffer.Write(prevBlockHash)
// The message is missing previous BlockHash, this is because we don't actively maintain a identitychain
// This canbe included in the fulfill request.
// Message should be encrypted and then signed to follow PKE.
//IDC should accept node publickey, encrypt the nonce and blockhash
// Then sign the message by own private key and send the message back.
msgToSend := proto_identity.ConstructIdentityMessage(proto_identity.Register, buffer.Bytes())
p2p.SendMessage(Node.Self, msgToSend)
}
// identityPayload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
// if err != nil {
// fmt.Println("There was a error in reading the identity payload")
// } else {
// fmt.Println("accepted new connection")
// }
// fmt.Println("Sleeping for 2 secs ...")
// time.Sleep(2 * time.Second)
// Node := node.DeserializeWaitNode(identityPayload)
// buffer := bytes.NewBuffer([]byte{})
// src := rand.NewSource(time.Now().UnixNano())
// rnd := rand.New(src)
// challengeNonce := int((rnd.Int31()))
// req := pow.NewRequest(5, []byte(strconv.Itoa(challengeNonce)))
// IDC.PowMap[Node.Self] = req
// buffer.Write([]byte(req))
// // 32 byte block hash
// // buffer.Write(prevBlockHash)
// // The message is missing previous BlockHash, this is because we don't actively maintain a identitychain
// // This canbe included in the fulfill request.
// // Message should be encrypted and then signed to follow PKE.
// //IDC should accept node publickey, encrypt the nonce and blockhash
// // Then sign the message by own private key and send the message back.
// msgToSend := proto_identity.ConstructIdentityMessage(proto_identity.Register, buffer.Bytes())
// p2p.SendMessage(Node.Self, msgToSend)
// }

@ -15,7 +15,6 @@ import (
"github.com/simple-rules/harmony-benchmark/db"
"github.com/simple-rules/harmony-benchmark/log"
"github.com/simple-rules/harmony-benchmark/p2p"
"github.com/simple-rules/harmony-benchmark/pow"
"github.com/simple-rules/harmony-benchmark/proto/identity"
"github.com/simple-rules/harmony-benchmark/syncing"
)
@ -168,23 +167,24 @@ func NewNodefromIDC(node *Node, consensus *consensus.Consensus, db *db.LDBDataba
return node
}
func (node *Node) processPOWMessage(message []byte) {
payload, err := identity.GetIdentityMessagePayload(message)
if err != nil {
fmt.Println("Could not read payload")
}
IDCPeer := node.IDCPeer
// 4 byte challengeNonce id
req := string(payload)
proof, _ := pow.Fulfil(req, []byte("")) //"This could be blockhasdata"
buffer := bytes.NewBuffer([]byte{})
proofBytes := make([]byte, 32) //proof seems to be 32 byte here
copy(proofBytes[:], proof)
buffer.Write(proofBytes)
buffer.Write(node.SerializeWaitNode())
msgPayload := buffer.Bytes()
p2p.SendMessage(IDCPeer, identity.ConstructIdentityMessage(identity.Register, msgPayload))
}
// TODO(alok): Fix it or revert the cl on 11/8.
// func (node *Node) processPOWMessage(message []byte) {
// payload, err := identity.GetIdentityMessagePayload(message)
// if err != nil {
// fmt.Println("Could not read payload")
// }
// IDCPeer := node.IDCPeer
// // 4 byte challengeNonce id
// req := string(payload)
// proof, _ := pow.Fulfil(req, []byte("")) //"This could be blockhasdata"
// buffer := bytes.NewBuffer([]byte{})
// proofBytes := make([]byte, 32) //proof seems to be 32 byte here
// copy(proofBytes[:], proof)
// buffer.Write(proofBytes)
// buffer.Write(node.SerializeWaitNode())
// msgPayload := buffer.Bytes()
// p2p.SendMessage(IDCPeer, identity.ConstructIdentityMessage(identity.Register, msgPayload))
// }
// SerializeWaitNode serializes the node
// https://stackoverflow.com/questions/12854125/how-do-i-dump-the-struct-into-the-byte-array-without-reflection/12854659#12854659

@ -77,7 +77,8 @@ func (node *Node) NodeHandler(conn net.Conn) {
switch messageType {
case proto_identity.Register:
fmt.Println("received a identity message")
node.processPOWMessage(msgPayload)
// TODO(ak): fix it.
// node.processPOWMessage(msgPayload)
case proto_identity.Announce:
node.log.Error("Announce message should be sent to IdentityChain")
}

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2018 Bas Westerbaan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -1,57 +0,0 @@
go-pow
======
`go-pow` is a simple Go package to add (asymmetric) *Proof of Work* to your service.
To create a Proof-of-Work request (with difficulty 5), use `pow.NewRequest`:
```go
req := pow.NewRequest(5, someRandomNonce)
```
This returns a string like `sha2bday-5-c29tZSByYW5kb20gbm9uY2U`,
which can be passed on to the client.
The client fulfils the proof of work by running `pow.Fulfil`:
```go
proof, _ := pow.Fulfil(req, []byte("some bound data"))
```
The client returns the proof (in this case `AAAAAAAAAAMAAAAAAAAADgAAAAAAAAAb`)
to the server, which can check it is indeed a valid proof of work, by running:
``` go
ok, _ := pow.Check(req, proof, []byte("some bound data"))
```
Notes
-----
1. There should be at least sufficient randomness in either the `nonce` passed to
`NewRequest` or the `data` passed to `Fulfil` and `Check`.
Thus it is fine to use the same bound `data` for every client, if every client
get a different `nonce` in its proof-of-work request.
It is also fine to use the same `nonce` in the proof-of-work request,
if every client is (by the encapsulating protocol) forced to use
different bound `data`.
2. The work to fulfil a request scales exponentially in the difficulty parameter.
The work to check it proof is correct remains constant:
```
Check on Difficulty=5 500000 2544 ns/op
Check on Difficulty=10 500000 2561 ns/op
Check on Difficulty=15 500000 2549 ns/op
Check on Difficulty=20 500000 2525 ns/op
Fulfil on Difficulty=5 100000 15725 ns/op
Fulfil on Difficulty=10 30000 46808 ns/op
Fulfil on Difficulty=15 2000 955606 ns/op
Fulfil on Difficulty=20 200 6887722 ns/op
```
To do
-----
- Support for [equihash](https://www.cryptolux.org/index.php/Equihash) would be nice.
- Port to Python, Java, Javascript, ...
- Parallelize.

@ -1,128 +0,0 @@
// Create and fulfill proof of work requests.
package pow
import (
"encoding/base64"
"fmt"
"strconv"
"strings"
)
type Algorithm string
const (
Sha2BDay Algorithm = "sha2bday"
)
// Represents a proof-of-work request.
type Request struct {
// The requested algorithm
Alg Algorithm
// The requested difficulty
Difficulty uint32
// Nonce to diversify the request
Nonce []byte
}
// Represents a completed proof-of-work
type Proof struct {
buf []byte
}
// Convenience function to create a new sha3bday proof-of-work request
// as a string
func NewRequest(difficulty uint32, nonce []byte) string {
req := Request{
Difficulty: difficulty,
Nonce: nonce,
Alg: Sha2BDay,
}
s, _ := req.MarshalText()
return string(s)
}
func (proof Proof) MarshalText() ([]byte, error) {
return []byte(base64.RawStdEncoding.EncodeToString(proof.buf)), nil
}
func (proof *Proof) UnmarshalText(buf []byte) error {
var err error
proof.buf, err = base64.RawStdEncoding.DecodeString(string(buf))
return err
}
func (req Request) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%s-%d-%s",
req.Alg,
req.Difficulty,
string(base64.RawStdEncoding.EncodeToString(req.Nonce)))), nil
}
func (req *Request) UnmarshalText(buf []byte) error {
bits := strings.SplitN(string(buf), "-", 3)
if len(bits) != 3 {
return fmt.Errorf("There should be two dashes in a PoW request")
}
alg := Algorithm(bits[0])
if alg != Sha2BDay {
return fmt.Errorf("%s: unsupported algorithm", bits[0])
}
req.Alg = alg
diff, err := strconv.Atoi(bits[1])
if err != nil {
return err
}
req.Difficulty = uint32(diff)
req.Nonce, err = base64.RawStdEncoding.DecodeString(bits[2])
return err
}
// Convenience function to check whether a proof of work is fulfilled
func Check(request, proof string, data []byte) (bool, error) {
var req Request
var prf Proof
err := req.UnmarshalText([]byte(request))
if err != nil {
return false, err
}
err = prf.UnmarshalText([]byte(proof))
if err != nil {
return false, err
}
return prf.Check(req, data), nil
}
// Fulfil the proof-of-work request.
func (req *Request) Fulfil(data []byte) Proof {
switch req.Alg {
case Sha2BDay:
return Proof{fulfilSha2BDay(req.Nonce, req.Difficulty, data)}
default:
panic("No such algorithm")
}
}
// Convenience function to fulfil the proof of work request
func Fulfil(request string, data []byte) (string, error) {
var req Request
err := req.UnmarshalText([]byte(request))
if err != nil {
return "", err
}
proof := req.Fulfil(data)
s, _ := proof.MarshalText()
return string(s), nil
}
// Check whether the proof is ok
func (proof *Proof) Check(req Request, data []byte) bool {
switch req.Alg {
case Sha2BDay:
return checkSha2BDay(proof.buf, req.Nonce, data, req.Difficulty)
default:
panic("No such algorithm")
}
}

@ -1,56 +0,0 @@
package pow
import (
"testing"
)
func TestSha2BDay(t *testing.T) {
nonce := []byte{1, 2, 3, 4, 5}
data := []byte{2, 2, 3, 4, 5}
r := NewRequest(5, nonce)
proof, err := Fulfil(r, data)
if err != nil {
t.Fatalf("Fulfil: %v", err)
}
ok, err := Check(r, proof, data)
if err != nil {
t.Fatalf("Check: %v", err)
}
if !ok {
t.Fatalf("Proof of work should be ok")
}
ok, err = Check(r, proof, nonce)
if err != nil {
t.Fatalf("Check: %v", err)
}
if ok {
t.Fatalf("Proof of work should not be ok")
}
}
func BenchmarkCheck5(b *testing.B) { benchmarkCheck(5, b) }
func BenchmarkCheck10(b *testing.B) { benchmarkCheck(10, b) }
func BenchmarkCheck15(b *testing.B) { benchmarkCheck(15, b) }
func BenchmarkCheck20(b *testing.B) { benchmarkCheck(20, b) }
func benchmarkCheck(diff uint32, b *testing.B) {
req := NewRequest(diff, []byte{1, 2, 3, 4, 5})
prf, _ := Fulfil(req, []byte{6, 7, 8, 9})
b.ResetTimer()
for n := 0; n < b.N; n++ {
Check(req, prf, []byte{6, 7, 8, 9})
}
}
func BenchmarkFulfil5(b *testing.B) { benchmarkFulfil(5, b) }
func BenchmarkFulfil10(b *testing.B) { benchmarkFulfil(10, b) }
func BenchmarkFulfil15(b *testing.B) { benchmarkFulfil(15, b) }
func BenchmarkFulfil20(b *testing.B) { benchmarkFulfil(20, b) }
func benchmarkFulfil(diff uint32, b *testing.B) {
req := NewRequest(diff, []byte{1, 2, 3, 4, 5})
b.ResetTimer()
for n := 0; n < b.N; n++ {
Fulfil(req, []byte{6, 7, 8, 9})
}
}

@ -1,25 +0,0 @@
package pow_test
import (
"fmt" // imported as pow
"github.com/simple-rules/harmony-benchmark/pow"
)
func Example() {
// Create a proof of work request with difficulty 5
req := pow.NewRequest(5, []byte("some random nonce"))
fmt.Printf("req: %s\n", req)
// Fulfil the proof of work
proof, _ := pow.Fulfil(req, []byte("some bound data"))
fmt.Printf("proof: %s\n", proof)
// Check if the proof is correct
ok, _ := pow.Check(req, proof, []byte("some bound data"))
fmt.Printf("check: %v", ok)
// Output: req: sha2bday-5-c29tZSByYW5kb20gbm9uY2U
// proof: AAAAAAAAAAMAAAAAAAAADgAAAAAAAAAb
// check: true
}

@ -1,78 +0,0 @@
package pow
import (
"bytes"
"crypto/sha256"
"encoding/binary"
)
func checkSha2BDay(proof []byte, nonce, data []byte, diff uint32) bool {
if len(proof) != 24 {
return false
}
prefix1 := proof[:8]
prefix2 := proof[8:16]
prefix3 := proof[16:]
if bytes.Equal(prefix1, prefix2) || bytes.Equal(prefix2, prefix3) ||
bytes.Equal(prefix1, prefix3) {
return false
}
resBuf := make([]byte, 32)
h := sha256.New()
h.Write(prefix1)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res1 := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
h.Reset()
h.Write(prefix2)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res2 := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
h.Reset()
h.Write(prefix3)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res3 := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
return res1 == res2 && res2 == res3
}
func fulfilSha2BDay(nonce []byte, diff uint32, data []byte) []byte {
// TODO make multithreaded if the difficulty is high enough.
// For light proof-of-work requests, the overhead of parallelizing is
// not worth it.
type Pair struct {
First, Second uint64
}
var i uint64 = 1
prefix := make([]byte, 8)
resBuf := make([]byte, 32)
lut := make(map[uint64]Pair)
h := sha256.New()
for {
binary.BigEndian.PutUint64(prefix, i)
h.Write(prefix)
h.Write(data)
h.Write(nonce)
h.Sum(resBuf[:0])
res := binary.BigEndian.Uint64(resBuf) & ((1 << diff) - 1)
pair, ok := lut[res]
if ok {
if pair.Second != 0 {
ret := make([]byte, 24)
binary.BigEndian.PutUint64(ret, pair.First)
binary.BigEndian.PutUint64(ret[8:], pair.Second)
copy(ret[16:], prefix)
return ret
}
lut[res] = Pair{First: pair.First, Second: i}
} else {
lut[res] = Pair{First: i}
}
h.Reset()
i++
}
}
Loading…
Cancel
Save